aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore4
-rw-r--r--Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt (renamed from Documentation/devicetree/bindings/misc/arm-charlcd.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/wakeup-source.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/imx-thermal.txt25
-rw-r--r--Documentation/gpu/tve200.rst2
-rw-r--r--Documentation/i2c/busses/i2c-i8012
-rw-r--r--Documentation/virtual/kvm/cpuid.txt4
-rw-r--r--Documentation/virtual/kvm/msr.txt3
-rw-r--r--Documentation/x86/topology.txt2
-rw-r--r--MAINTAINERS21
-rw-r--r--Makefile7
-rw-r--r--arch/alpha/include/asm/cmpxchg.h6
-rw-r--r--arch/alpha/include/asm/xchg.h38
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/boot/dts/axs101.dts2
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi4
-rw-r--r--arch/arc/boot/dts/haps_hs_idu.dts2
-rw-r--r--arch/arc/boot/dts/nsim_700.dts2
-rw-r--r--arch/arc/boot/dts/nsim_hs.dts2
-rw-r--r--arch/arc/boot/dts/nsim_hs_idu.dts2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts2
-rw-r--r--arch/arc/boot/dts/nsimosci_hs.dts2
-rw-r--r--arch/arc/boot/dts/nsimosci_hs_idu.dts2
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/kernel/mcip.c74
-rw-r--r--arch/arc/kernel/setup.c4
-rw-r--r--arch/arc/kernel/smp.c50
-rw-r--r--arch/arc/kernel/unwind.c2
-rw-r--r--arch/arc/mm/cache.c5
-rw-r--r--arch/arm/boot/dts/bcm11351.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm21664.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi6
-rw-r--r--arch/arm/boot/dts/bcm2836.dtsi12
-rw-r--r--arch/arm/boot/dts/bcm2837.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts2
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dns-313.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-icore-rqs.dts2
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi9
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi8
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts2
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi4
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi6
-rw-r--r--arch/arm/boot/dts/rk3288-phycore-som.dtsi20
-rw-r--r--arch/arm/boot/dts/zx296702.dtsi20
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/kernel/time.c2
-rw-r--r--arch/arm/kvm/hyp/Makefile5
-rw-r--r--arch/arm/kvm/hyp/banked-sr.c4
-rw-r--r--arch/arm/mach-clps711x/board-dt.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-mvebu/Kconfig4
-rw-r--r--arch/arm/mach-omap1/clock.c6
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c6
-rw-r--r--arch/arm/mach-omap2/pm.c4
-rw-r--r--arch/arm/mach-omap2/timer.c19
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c35
-rw-r--r--arch/arm/plat-orion/common.c23
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi1
-rw-r--r--arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi3
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi4
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h12
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c4
-rw-r--r--arch/arm64/kernel/cpufeature.c6
-rw-r--r--arch/arm64/kernel/perf_event.c4
-rw-r--r--arch/arm64/kernel/process.c11
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c5
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/time.c2
-rw-r--r--arch/arm64/kernel/traps.c10
-rw-r--r--arch/arm64/mm/mmu.c10
-rw-r--r--arch/arm64/net/bpf_jit_comp.c5
-rw-r--r--arch/mips/include/asm/compat.h1
-rw-r--r--arch/powerpc/include/asm/firmware.h2
-rw-r--r--arch/powerpc/kernel/eeh_driver.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kvm/book3s_xive.c2
-rw-r--r--arch/powerpc/mm/drmem.c6
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c3
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/riscv/include/asm/barrier.h6
-rw-r--r--arch/s390/kvm/intercept.c51
-rw-r--r--arch/s390/kvm/interrupt.c123
-rw-r--r--arch/s390/kvm/kvm-s390.c79
-rw-r--r--arch/s390/kvm/kvm-s390.h7
-rw-r--r--arch/s390/kvm/priv.c192
-rw-r--r--arch/s390/kvm/vsie.c20
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile7
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/entry/calling.h34
-rw-r--r--arch/x86/entry/entry_32.S3
-rw-r--r--arch/x86/entry/entry_64.S153
-rw-r--r--arch/x86/entry/entry_64_compat.S71
-rw-r--r--arch/x86/include/asm/apm.h6
-rw-r--r--arch/x86/include/asm/asm-prototypes.h3
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/efi.h17
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/microcode.h9
-rw-r--r--arch/x86/include/asm/mmu_context.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h175
-rw-r--r--arch/x86/include/asm/paravirt.h17
-rw-r--r--arch/x86/include/asm/paravirt_types.h5
-rw-r--r--arch/x86/include/asm/pgtable.h8
-rw-r--r--arch/x86/include/asm/pgtable_types.h10
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/refcount.h4
-rw-r--r--arch/x86/include/asm/rmwcc.h16
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h18
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/vector.c25
-rw-r--r--arch/x86/kernel/cpu/bugs.c12
-rw-r--r--arch/x86/kernel/cpu/common.c30
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c10
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c39
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c10
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/kvm.c20
-rw-r--r--arch/x86/kernel/smpboot.c1
-rw-r--r--arch/x86/kernel/unwind_orc.c3
-rw-r--r--arch/x86/kvm/cpuid.c3
-rw-r--r--arch/x86/kvm/lapic.c1
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/svm.c46
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/retpoline.S56
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S2
-rw-r--r--arch/x86/net/bpf_jit_comp.c9
-rw-r--r--arch/x86/oprofile/nmi_int.c2
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S2
-rw-r--r--arch/xtensa/kernel/pci-dma.c40
-rw-r--r--arch/xtensa/mm/init.c70
-rw-r--r--block/sed-opal.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c1
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c12
-rw-r--r--crypto/asymmetric_keys/public_key.c4
-rw-r--r--crypto/asymmetric_keys/restrict.c21
-rw-r--r--drivers/bus/ti-sysc.c2
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c4
-rw-r--r--drivers/char/tpm/tpm-interface.c4
-rw-r--r--drivers/char/tpm/tpm2-cmd.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c8
-rw-r--r--drivers/char/tpm/tpm_tis_core.c5
-rw-r--r--drivers/clocksource/mips-gic-timer.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c16
-rw-r--r--drivers/crypto/ccp/psp-dev.c8
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/gpio/gpiolib-of.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c11
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c40
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c15
-rw-r--r--drivers/gpu/drm/drm_edid.c21
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c21
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h2
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c6
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h3
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c74
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c17
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c3
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c21
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-sirf.c4
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/md/md-multipath.c2
-rw-r--r--drivers/md/md.c53
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid1.h12
-rw-r--r--drivers/md/raid10.c18
-rw-r--r--drivers/md/raid10.h13
-rw-r--r--drivers/md/raid5-log.h3
-rw-r--r--drivers/md/raid5-ppl.c10
-rw-r--r--drivers/md/raid5.c19
-rw-r--r--drivers/md/raid5.h12
-rw-r--r--drivers/memory/brcmstb_dpfe.c74
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/misc/ocxl/file.c6
-rw-r--r--drivers/mmc/core/mmc_ops.c4
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c1
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c4
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c1
-rw-r--r--drivers/mmc/host/dw_mmc-zx.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c84
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c35
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/virtio_net.c58
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/perf/arm_pmu.c138
-rw-r--r--drivers/perf/arm_pmu_acpi.c61
-rw-r--r--drivers/perf/arm_pmu_platform.c37
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c4
-rw-r--r--drivers/platform/x86/intel-hid.c1
-rw-r--r--drivers/platform/x86/intel-vbtn.c47
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/aic7xxx/aiclib.c34
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/qedi/qedi_main.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c46
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/soc/imx/gpc.c12
-rw-r--r--fs/nfs/callback_proc.c14
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4client.c6
-rw-r--r--fs/xfs/scrub/agheader.c3
-rw-r--r--fs/xfs/xfs_refcount_item.c9
-rw-r--r--fs/xfs/xfs_rmap_item.c4
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/drm/drm_atomic.h9
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/drm/drm_drv.h1
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/init.h8
-rw-r--r--include/linux/jump_label.h3
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kvm_host.h6
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/nospec.h26
-rw-r--r--include/linux/perf/arm_pmu.h26
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/net/regulatory.h2
-rw-r--r--include/soc/arc/mcip.h5
-rw-r--r--include/uapi/drm/virtgpu_drm.h1
-rw-r--r--include/uapi/linux/psp-sev.h2
-rw-r--r--include/uapi/linux/ptrace.h4
-rw-r--r--init/main.c2
-rw-r--r--kernel/bpf/arraymap.c33
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/cpumap.c2
-rw-r--r--kernel/bpf/lpm_trie.c14
-rw-r--r--kernel/bpf/sockmap.c3
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/irq/matrix.c23
-rw-r--r--kernel/jump_label.c27
-rw-r--r--kernel/printk/printk.c3
-rw-r--r--kernel/seccomp.c6
-rw-r--r--kernel/trace/bpf_trace.c2
-rw-r--r--kernel/workqueue.c16
-rw-r--r--lib/dma-debug.c10
-rw-r--r--lib/idr.c13
-rw-r--r--lib/vsprintf.c2
-rw-r--r--net/bridge/netfilter/ebt_among.c10
-rw-r--r--net/bridge/netfilter/ebt_limit.c4
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/gen_estimator.c1
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c7
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c20
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c12
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c4
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c6
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_output.c9
-rw-r--r--net/ipv6/ipv6_sockglue.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c4
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c6
-rw-r--r--net/ipv6/netfilter/ip6t_srh.c6
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/mac80211/agg-rx.c4
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/mesh.c17
-rw-r--r--net/mac80211/spectmgmt.c7
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/netfilter/nf_nat_proto_common.c7
-rw-r--r--net/netfilter/x_tables.c74
-rw-r--r--net/netfilter/xt_AUDIT.c4
-rw-r--r--net/netfilter/xt_CHECKSUM.c8
-rw-r--r--net/netfilter/xt_CONNSECMARK.c10
-rw-r--r--net/netfilter/xt_CT.c25
-rw-r--r--net/netfilter/xt_DSCP.c4
-rw-r--r--net/netfilter/xt_HL.c13
-rw-r--r--net/netfilter/xt_HMARK.c27
-rw-r--r--net/netfilter/xt_IDLETIMER.c9
-rw-r--r--net/netfilter/xt_LED.c16
-rw-r--r--net/netfilter/xt_NFQUEUE.c8
-rw-r--r--net/netfilter/xt_SECMARK.c18
-rw-r--r--net/netfilter/xt_TCPMSS.c10
-rw-r--r--net/netfilter/xt_TPROXY.c6
-rw-r--r--net/netfilter/xt_addrtype.c33
-rw-r--r--net/netfilter/xt_bpf.c4
-rw-r--r--net/netfilter/xt_cgroup.c8
-rw-r--r--net/netfilter/xt_cluster.c8
-rw-r--r--net/netfilter/xt_connbytes.c4
-rw-r--r--net/netfilter/xt_connlabel.c7
-rw-r--r--net/netfilter/xt_connmark.c8
-rw-r--r--net/netfilter/xt_conntrack.c4
-rw-r--r--net/netfilter/xt_dscp.c4
-rw-r--r--net/netfilter/xt_ecn.c4
-rw-r--r--net/netfilter/xt_hashlimit.c26
-rw-r--r--net/netfilter/xt_helper.c4
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/netfilter/xt_ipvs.c3
-rw-r--r--net/netfilter/xt_l2tp.c22
-rw-r--r--net/netfilter/xt_limit.c4
-rw-r--r--net/netfilter/xt_nat.c5
-rw-r--r--net/netfilter/xt_nfacct.c6
-rw-r--r--net/netfilter/xt_physdev.c4
-rw-r--r--net/netfilter/xt_policy.c23
-rw-r--r--net/netfilter/xt_recent.c14
-rw-r--r--net/netfilter/xt_set.c50
-rw-r--r--net/netfilter/xt_socket.c10
-rw-r--r--net/netfilter/xt_state.c4
-rw-r--r--net/netfilter/xt_time.c6
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/rxrpc/output.c2
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/wireless/mesh.c25
-rw-r--r--net/wireless/sme.c2
-rw-r--r--samples/seccomp/Makefile10
-rw-r--r--scripts/Makefile.build8
-rw-r--r--security/integrity/digsig.c1
-rw-r--r--security/keys/big_key.c110
-rw-r--r--sound/core/control.c2
-rw-r--r--sound/pci/hda/hda_intel.c38
-rw-r--r--sound/pci/hda/patch_realtek.c3
-rw-r--r--sound/usb/quirks-table.h47
-rw-r--r--sound/x86/intel_hdmi_audio.c37
-rw-r--r--tools/bpf/bpftool/main.c2
-rw-r--r--tools/bpf/bpftool/prog.c3
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat503
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt4
-rw-r--r--tools/lib/bpf/libbpf.c5
-rw-r--r--tools/objtool/builtin-check.c6
-rw-r--r--tools/objtool/builtin-orc.c6
-rw-r--r--tools/objtool/builtin.h5
-rw-r--r--tools/objtool/check.c100
-rw-r--r--tools/objtool/check.h3
-rw-r--r--tools/testing/radix-tree/idr-test.c52
-rw-r--r--tools/testing/radix-tree/linux.c11
-rw-r--r--tools/testing/radix-tree/linux/compiler_types.h0
-rw-r--r--tools/testing/radix-tree/linux/gfp.h1
-rw-r--r--tools/testing/radix-tree/linux/slab.h6
-rw-r--r--tools/testing/selftests/android/Makefile8
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/test_maps.c2
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c26
-rw-r--r--tools/testing/selftests/futex/Makefile6
-rw-r--r--tools/testing/selftests/memfd/config1
-rw-r--r--tools/testing/selftests/memory-hotplug/Makefile2
-rw-r--r--tools/testing/selftests/pstore/config1
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c61
-rw-r--r--tools/testing/selftests/sync/Makefile2
-rw-r--r--tools/testing/selftests/vDSO/Makefile14
-rw-r--r--tools/testing/selftests/vm/.gitignore1
-rw-r--r--virt/kvm/arm/arch_timer.c116
-rw-r--r--virt/kvm/kvm_main.c3
447 files changed, 3902 insertions, 2220 deletions
diff --git a/.gitignore b/.gitignore
index 705e09913dc2..1be78fd8163b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -127,3 +127,7 @@ all.config
# Kdevelop4
*.kdev4
+
+#Automatically generated by ASN.1 compiler
+net/ipv4/netfilter/nf_nat_snmp_basic-asn1.c
+net/ipv4/netfilter/nf_nat_snmp_basic-asn1.h
diff --git a/Documentation/devicetree/bindings/misc/arm-charlcd.txt b/Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
index e28e2aac47f1..e28e2aac47f1 100644
--- a/Documentation/devicetree/bindings/misc/arm-charlcd.txt
+++ b/Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt
index 3c81f78b5c27..5d254ab13ebf 100644
--- a/Documentation/devicetree/bindings/power/wakeup-source.txt
+++ b/Documentation/devicetree/bindings/power/wakeup-source.txt
@@ -60,7 +60,7 @@ Examples
#size-cells = <0>;
button@1 {
- debounce_interval = <50>;
+ debounce-interval = <50>;
wakeup-source;
linux,code = <116>;
label = "POWER";
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt
index 28be51afdb6a..379eb763073e 100644
--- a/Documentation/devicetree/bindings/thermal/imx-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/imx-thermal.txt
@@ -22,7 +22,32 @@ Optional properties:
- clocks : thermal sensor's clock source.
Example:
+ocotp: ocotp@21bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,imx6sx-ocotp", "syscon";
+ reg = <0x021bc000 0x4000>;
+ clocks = <&clks IMX6SX_CLK_OCOTP>;
+ tempmon_calib: calib@38 {
+ reg = <0x38 4>;
+ };
+
+ tempmon_temp_grade: temp-grade@20 {
+ reg = <0x20 4>;
+ };
+};
+
+tempmon: tempmon {
+ compatible = "fsl,imx6sx-tempmon", "fsl,imx6q-tempmon";
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,tempmon = <&anatop>;
+ nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
+ nvmem-cell-names = "calib", "temp_grade";
+ clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>;
+};
+
+Legacy method (Deprecated):
tempmon {
compatible = "fsl,imx6q-tempmon";
fsl,tempmon = <&anatop>;
diff --git a/Documentation/gpu/tve200.rst b/Documentation/gpu/tve200.rst
index 69b17b324e12..152ea9398f7e 100644
--- a/Documentation/gpu/tve200.rst
+++ b/Documentation/gpu/tve200.rst
@@ -3,4 +3,4 @@
==================================
.. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c
- :doc: Faraday TV Encoder 200
+ :doc: Faraday TV Encoder TVE200 DRM Driver
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index d47702456926..65514c251318 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -28,8 +28,10 @@ Supported adapters:
* Intel Wildcat Point (PCH)
* Intel Wildcat Point-LP (PCH)
* Intel BayTrail (SOC)
+ * Intel Braswell (SOC)
* Intel Sunrise Point-H (PCH)
* Intel Sunrise Point-LP (PCH)
+ * Intel Kaby Lake-H (PCH)
* Intel DNV (SOC)
* Intel Broxton (SOC)
* Intel Lewisburg (PCH)
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index dcab6dc11e3b..87a7506f31c2 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -58,6 +58,10 @@ KVM_FEATURE_PV_TLB_FLUSH || 9 || guest checks this feature bit
|| || before enabling paravirtualized
|| || tlb flush.
------------------------------------------------------------------------------
+KVM_FEATURE_ASYNC_PF_VMEXIT || 10 || paravirtualized async PF VM exit
+ || || can be enabled by setting bit 2
+ || || when writing to msr 0x4b564d02
+------------------------------------------------------------------------------
KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
|| || per-cpu warps are expected in
|| || kvmclock.
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 1ebecc115dc6..f3f0d57ced8e 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02
when asynchronous page faults are enabled on the vcpu 0 when
disabled. Bit 1 is 1 if asynchronous page faults can be injected
when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults
- are delivered to L1 as #PF vmexits.
+ are delivered to L1 as #PF vmexits. Bit 2 can be set only if
+ KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID.
First 4 byte of 64 byte memory location will be written to by
the hypervisor at the time of asynchronous page fault (APF)
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt
index f3e9d7e9ed6c..2953e3ec9a02 100644
--- a/Documentation/x86/topology.txt
+++ b/Documentation/x86/topology.txt
@@ -108,7 +108,7 @@ The topology of a system is described in the units of:
The number of online threads is also printed in /proc/cpuinfo "siblings."
- - topology_sibling_mask():
+ - topology_sibling_cpumask():
The cpumask contains all online threads in the core to which a thread
belongs.
diff --git a/MAINTAINERS b/MAINTAINERS
index 93a12af4f180..1c95c6036098 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1238,7 +1238,7 @@ F: drivers/clk/at91
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
M: Nicolas Ferre <nicolas.ferre@microchip.com>
-M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+M: Alexandre Belloni <alexandre.belloni@bootlin.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.linux4sam.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git
@@ -1590,7 +1590,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
-M: Gregory Clement <gregory.clement@free-electrons.com>
+M: Gregory Clement <gregory.clement@bootlin.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/soc/dove/
@@ -1604,7 +1604,7 @@ F: arch/arm/boot/dts/orion5x*
ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
-M: Gregory Clement <gregory.clement@free-electrons.com>
+M: Gregory Clement <gregory.clement@bootlin.com>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -1999,8 +1999,10 @@ M: Maxime Coquelin <mcoquelin.stm32@gmail.com>
M: Alexandre Torgue <alexandre.torgue@st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next
N: stm32
+F: arch/arm/boot/dts/stm32*
+F: arch/arm/mach-stm32/
F: drivers/clocksource/armv7m_systick.c
ARM/TANGO ARCHITECTURE
@@ -10926,6 +10928,17 @@ L: linux-gpio@vger.kernel.org
S: Supported
F: drivers/pinctrl/pinctrl-at91-pio4.*
+PIN CONTROLLER - FREESCALE
+M: Dong Aisheng <aisheng.dong@nxp.com>
+M: Fabio Estevam <festevam@gmail.com>
+M: Shawn Guo <shawnguo@kernel.org>
+M: Stefan Agner <stefan@agner.ch>
+R: Pengutronix Kernel Team <kernel@pengutronix.de>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/pinctrl/freescale/
+F: Documentation/devicetree/bindings/pinctrl/fsl,*
+
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
diff --git a/Makefile b/Makefile
index d9cf3a40eda9..fb94072fd80d 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 16
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -489,6 +489,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
endif
+RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
+RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
+RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
+export RETPOLINE_CFLAGS
+
ifeq ($(config-targets),1)
# ===========================================================================
# *config targets only - make sure prerequisites are updated, and descend
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 46ebf14aed4e..8a2b331e43fe 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
* Atomic exchange routines.
*/
-#define __ASM__MB
#define ____xchg(type, args...) __xchg ## type ## _local(args)
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
#include <asm/xchg.h>
@@ -33,10 +32,6 @@
cmpxchg_local((ptr), (o), (n)); \
})
-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB "\tmb\n"
-#endif
#undef ____xchg
#undef ____cmpxchg
#define ____xchg(type, args...) __xchg ##type(args)
@@ -64,7 +59,6 @@
cmpxchg((ptr), (o), (n)); \
})
-#undef __ASM__MB
#undef ____cmpxchg
#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 68dfb3cb7145..e2b59fac5257 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -12,6 +12,10 @@
* Atomic exchange.
* Since it can be used to implement critical sections
* it must clobber "memory" (also for interrupts in UP).
+ *
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
*/
static inline unsigned long
@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
{
unsigned long ret, tmp, addr64;
+ smp_mb();
__asm__ __volatile__(
" andnot %4,7,%3\n"
" insbl %1,%4,%1\n"
@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
" or %1,%2,%2\n"
" stq_c %2,0(%3)\n"
" beq %2,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
+ smp_mb();
return ret;
}
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
{
unsigned long ret, tmp, addr64;
+ smp_mb();
__asm__ __volatile__(
" andnot %4,7,%3\n"
" inswl %1,%4,%1\n"
@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
" or %1,%2,%2\n"
" stq_c %2,0(%3)\n"
" beq %2,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
+ smp_mb();
return ret;
}
@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val)
{
unsigned long dummy;
+ smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%4\n"
" bis $31,%3,%1\n"
" stl_c %1,%2\n"
" beq %1,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
+ smp_mb();
return val;
}
@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val)
{
unsigned long dummy;
+ smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%4\n"
" bis $31,%3,%1\n"
" stq_c %1,%2\n"
" beq %1,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
+ smp_mb();
return val;
}
@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*
- * The memory barrier should be placed in SMP only when we actually
- * make the change. If we don't change anything (so if the returned
- * prev is equal to old) then we aren't acquiring anything new and
- * we don't need any memory barrier as far I can tell.
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
+ * The trailing memory barrier is placed in SMP unconditionally, in
+ * order to guarantee that dependency ordering is preserved when a
+ * dependency is headed by an unsuccessful operation.
*/
static inline unsigned long
@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
{
unsigned long prev, tmp, cmp, addr64;
+ smp_mb();
__asm__ __volatile__(
" andnot %5,7,%4\n"
" insbl %1,%5,%1\n"
@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
" or %1,%2,%2\n"
" stq_c %2,0(%4)\n"
" beq %2,3f\n"
- __ASM__MB
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+ smp_mb();
return prev;
}
@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
{
unsigned long prev, tmp, cmp, addr64;
+ smp_mb();
__asm__ __volatile__(
" andnot %5,7,%4\n"
" inswl %1,%5,%1\n"
@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
" or %1,%2,%2\n"
" stq_c %2,0(%4)\n"
" beq %2,3f\n"
- __ASM__MB
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+ smp_mb();
return prev;
}
@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
{
unsigned long prev, cmp;
+ smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%5\n"
" cmpeq %0,%3,%1\n"
@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
" mov %4,%1\n"
" stl_c %1,%2\n"
" beq %1,3f\n"
- __ASM__MB
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
+ smp_mb();
return prev;
}
@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
{
unsigned long prev, cmp;
+ smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%5\n"
" cmpeq %0,%3,%1\n"
@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
" mov %4,%1\n"
" stq_c %1,%2\n"
" beq %1,3f\n"
- __ASM__MB
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
+ smp_mb();
return prev;
}
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index f3a80cf164cc..d76bf4a83740 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -484,7 +484,6 @@ config ARC_CURR_IN_REG
config ARC_EMUL_UNALIGNED
bool "Emulate unaligned memory access (userspace only)"
- default N
select SYSCTL_ARCH_UNALIGN_NO_WARN
select SYSCTL_ARCH_UNALIGN_ALLOW
depends on ISA_ARCOMPACT
diff --git a/arch/arc/boot/dts/axs101.dts b/arch/arc/boot/dts/axs101.dts
index 70aec7d6ca60..626b694c7be7 100644
--- a/arch/arc/boot/dts/axs101.dts
+++ b/arch/arc/boot/dts/axs101.dts
@@ -17,6 +17,6 @@
compatible = "snps,axs101", "snps,arc-sdp";
chosen {
- bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60";
+ bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60 print-fatal-signals=1";
};
};
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 74d070cd3c13..47b74fbc403c 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -214,13 +214,13 @@
};
eeprom@0x54{
- compatible = "24c01";
+ compatible = "atmel,24c01";
reg = <0x54>;
pagesize = <0x8>;
};
eeprom@0x57{
- compatible = "24c04";
+ compatible = "atmel,24c04";
reg = <0x57>;
pagesize = <0x8>;
};
diff --git a/arch/arc/boot/dts/haps_hs_idu.dts b/arch/arc/boot/dts/haps_hs_idu.dts
index 215cddd0b63b..0c603308aeb3 100644
--- a/arch/arc/boot/dts/haps_hs_idu.dts
+++ b/arch/arc/boot/dts/haps_hs_idu.dts
@@ -22,7 +22,7 @@
};
chosen {
- bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
};
aliases {
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index 5ee96b067c08..ff2f2c70c545 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -17,7 +17,7 @@
interrupt-parent = <&core_intc>;
chosen {
- bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+ bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
};
aliases {
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts
index 8d787b251f73..8e2489b16b0a 100644
--- a/arch/arc/boot/dts/nsim_hs.dts
+++ b/arch/arc/boot/dts/nsim_hs.dts
@@ -24,7 +24,7 @@
};
chosen {
- bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+ bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
};
aliases {
diff --git a/arch/arc/boot/dts/nsim_hs_idu.dts b/arch/arc/boot/dts/nsim_hs_idu.dts
index 4f98ebf71fd8..ed12f494721d 100644
--- a/arch/arc/boot/dts/nsim_hs_idu.dts
+++ b/arch/arc/boot/dts/nsim_hs_idu.dts
@@ -15,7 +15,7 @@
interrupt-parent = <&core_intc>;
chosen {
- bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+ bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
};
aliases {
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index 3c391ba565ed..7842e5eb4ab5 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -20,7 +20,7 @@
/* this is for console on PGU */
/* bootargs = "console=tty0 consoleblank=0"; */
/* this is for console on serial */
- bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1";
};
aliases {
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
index 14a727cbf4c9..b8838cf2b4ec 100644
--- a/arch/arc/boot/dts/nsimosci_hs.dts
+++ b/arch/arc/boot/dts/nsimosci_hs.dts
@@ -20,7 +20,7 @@
/* this is for console on PGU */
/* bootargs = "console=tty0 consoleblank=0"; */
/* this is for console on serial */
- bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1";
};
aliases {
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
index 5052917d4a99..72a2c723f1f7 100644
--- a/arch/arc/boot/dts/nsimosci_hs_idu.dts
+++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts
@@ -18,7 +18,7 @@
chosen {
/* this is for console on serial */
- bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24 print-fatal-signals=1";
};
aliases {
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 257a68f3c2fe..309f4e6721b3 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -184,7 +184,7 @@
.macro FAKE_RET_FROM_EXCPN
lr r9, [status32]
bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK)
- or r9, r9, (STATUS_L_MASK|STATUS_IE_MASK)
+ or r9, r9, STATUS_IE_MASK
kflag r9
.endm
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index f61a52b01625..5fe84e481654 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
static char smp_cpuinfo_buf[128];
+/*
+ * Set mask to halt GFRC if any online core in SMP cluster is halted.
+ * Only works for ARC HS v3.0+, on earlier versions has no effect.
+ */
+static void mcip_update_gfrc_halt_mask(int cpu)
+{
+ struct bcr_generic gfrc;
+ unsigned long flags;
+ u32 gfrc_halt_mask;
+
+ READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
+
+ /*
+ * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
+ * GFRC 0x3 version.
+ */
+ if (gfrc.ver < 0x3)
+ return;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ __mcip_cmd(CMD_GFRC_READ_CORE, 0);
+ gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+ gfrc_halt_mask |= BIT(cpu);
+ __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void mcip_update_debug_halt_mask(int cpu)
+{
+ u32 mcip_mask = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ /*
+ * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
+ * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
+ * and CMD_DEBUG_READ_SELECT.
+ */
+ __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
+ mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+ mcip_mask |= BIT(cpu);
+
+ __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
+ /*
+ * Parameter specified halt cause:
+ * STATUS32[H]/actionpoint/breakpoint/self-halt
+ * We choose all of them (0xF).
+ */
+ __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
static void mcip_setup_per_cpu(int cpu)
{
+ struct mcip_bcr mp;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
+
smp_ipi_irq_setup(cpu, IPI_IRQ);
smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
+
+ /* Update GFRC halt mask as new CPU came online */
+ if (mp.gfrc)
+ mcip_update_gfrc_halt_mask(cpu);
+
+ /* Update MCIP debug mask as new CPU came online */
+ if (mp.dbg)
+ mcip_update_debug_halt_mask(cpu);
}
static void mcip_ipi_send(int cpu)
@@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
-
- if (mp.dbg) {
- __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
- __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
- }
}
struct plat_smp_ops plat_smp_ops = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 9d27331fe69a..b2cae79a25d7 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -51,7 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = {
{ 0x51, "R2.0" },
{ 0x52, "R2.1" },
{ 0x53, "R3.0" },
- { 0x54, "R4.0" },
+ { 0x54, "R3.10a" },
#endif
{ 0x00, NULL }
};
@@ -373,7 +373,7 @@ static void arc_chk_core_config(void)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
int saved = 0, present = 0;
- char *opt_nm = NULL;;
+ char *opt_nm = NULL;
if (!cpu->extn.timer0)
panic("Timer0 is not present!\n");
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index efe8b4200a67..21d86c36692b 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -24,6 +24,7 @@
#include <linux/reboot.h>
#include <linux/irqdomain.h>
#include <linux/export.h>
+#include <linux/of_fdt.h>
#include <asm/processor.h>
#include <asm/setup.h>
@@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void)
{
}
+static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
+{
+ unsigned long dt_root = of_get_flat_dt_root();
+ const char *buf;
+
+ buf = of_get_flat_dt_prop(dt_root, name, NULL);
+ if (!buf)
+ return -EINVAL;
+
+ if (cpulist_parse(buf, cpumask))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Read from DeviceTree and setup cpu possible mask. If there is no
+ * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
+ */
+static void __init arc_init_cpu_possible(void)
+{
+ struct cpumask cpumask;
+
+ if (arc_get_cpu_map("possible-cpus", &cpumask)) {
+ pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
+ NR_CPUS);
+
+ cpumask_setall(&cpumask);
+ }
+
+ if (!cpumask_test_cpu(0, &cpumask))
+ panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
+
+ init_cpu_possible(&cpumask);
+}
+
/*
* Called from setup_arch() before calling setup_processor()
*
@@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void)
*/
void __init smp_init_cpus(void)
{
- unsigned int i;
-
- for (i = 0; i < NR_CPUS; i++)
- set_cpu_possible(i, true);
+ arc_init_cpu_possible();
if (plat_smp_ops.init_early_smp)
plat_smp_ops.init_early_smp();
@@ -70,16 +104,12 @@ void __init smp_init_cpus(void)
/* called from init ( ) => process 1 */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
/*
* if platform didn't set the present map already, do it now
* boot cpu is set to present already by init/main.c
*/
- if (num_present_cpus() <= 1) {
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
- }
+ if (num_present_cpus() <= 1)
+ init_cpu_present(cpu_possible_mask);
}
void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 333daab7def0..183391d4d33a 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -366,7 +366,7 @@ static void init_unwind_hdr(struct unwind_table *table,
return;
ret_err:
- panic("Attention !!! Dwarf FDE parsing errors\n");;
+ panic("Attention !!! Dwarf FDE parsing errors\n");
}
#ifdef CONFIG_MODULES
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index eee924dfffa6..2072f3451e9c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -780,7 +780,10 @@ noinline static void slc_entire_op(const int op)
write_aux_reg(r, ctrl);
- write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+ if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
+ write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
+ else
+ write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
read_aux_reg(r);
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index 18045c38bcf1..db7cded1b7ad 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -55,7 +55,7 @@
<0x3ff00100 0x100>;
};
- smc@0x3404c000 {
+ smc@3404c000 {
compatible = "brcm,bcm11351-smc", "brcm,kona-smc";
reg = <0x3404c000 0x400>; /* 1 KiB in SRAM */
};
diff --git a/arch/arm/boot/dts/bcm21664.dtsi b/arch/arm/boot/dts/bcm21664.dtsi
index 6dde95f21cef..266f2611dc22 100644
--- a/arch/arm/boot/dts/bcm21664.dtsi
+++ b/arch/arm/boot/dts/bcm21664.dtsi
@@ -55,7 +55,7 @@
<0x3ff00100 0x100>;
};
- smc@0x3404e000 {
+ smc@3404e000 {
compatible = "brcm,bcm21664-smc", "brcm,kona-smc";
reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */
};
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 0e3d2a5ff208..a5c3824c8056 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -18,10 +18,10 @@
soc {
ranges = <0x7e000000 0x20000000 0x02000000>;
dma-ranges = <0x40000000 0x00000000 0x20000000>;
+ };
- arm-pmu {
- compatible = "arm,arm1176-pmu";
- };
+ arm-pmu {
+ compatible = "arm,arm1176-pmu";
};
};
diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi
index 1dfd76442777..c933e8413884 100644
--- a/arch/arm/boot/dts/bcm2836.dtsi
+++ b/arch/arm/boot/dts/bcm2836.dtsi
@@ -9,19 +9,19 @@
<0x40000000 0x40000000 0x00001000>;
dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
- local_intc: local_intc {
+ local_intc: local_intc@40000000 {
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
#interrupt-cells = <2>;
interrupt-parent = <&local_intc>;
};
+ };
- arm-pmu {
- compatible = "arm,cortex-a7-pmu";
- interrupt-parent = <&local_intc>;
- interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
- };
+ arm-pmu {
+ compatible = "arm,cortex-a7-pmu";
+ interrupt-parent = <&local_intc>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
};
timer {
diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi
index efa7d3387ab2..7704bb029605 100644
--- a/arch/arm/boot/dts/bcm2837.dtsi
+++ b/arch/arm/boot/dts/bcm2837.dtsi
@@ -8,7 +8,7 @@
<0x40000000 0x40000000 0x00001000>;
dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
- local_intc: local_intc {
+ local_intc: local_intc@40000000 {
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 18db25a5a66e..9d293decf8d3 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -465,7 +465,7 @@
status = "disabled";
};
- aux: aux@0x7e215000 {
+ aux: aux@7e215000 {
compatible = "brcm,bcm2835-aux";
#clock-cells = <1>;
reg = <0x7e215000 0x8>;
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index 6a44b8021702..f0e2008f7490 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -49,7 +49,7 @@
memory {
device_type = "memory";
- reg = <0x60000000 0x80000000>;
+ reg = <0x60000000 0x20000000>;
};
gpio-restart {
diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts
index 08568ce24d06..da8bb9d60f99 100644
--- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts
@@ -269,7 +269,7 @@
sata: sata@46000000 {
/* The ROM uses this muxmode */
- cortina,gemini-ata-muxmode = <3>;
+ cortina,gemini-ata-muxmode = <0>;
cortina,gemini-enable-sata-bridge;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6dl-icore-rqs.dts b/arch/arm/boot/dts/imx6dl-icore-rqs.dts
index cf42c2f5cdc7..1281bc39b7ab 100644
--- a/arch/arm/boot/dts/imx6dl-icore-rqs.dts
+++ b/arch/arm/boot/dts/imx6dl-icore-rqs.dts
@@ -42,7 +42,7 @@
/dts-v1/;
-#include "imx6q.dtsi"
+#include "imx6dl.dtsi"
#include "imx6qdl-icore-rqs.dtsi"
/ {
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index c1aa7a4518fb..a30ee9fcb3ae 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -71,6 +71,8 @@
};
&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
clock-frequency = <2600000>;
twl: twl@48 {
@@ -189,7 +191,12 @@
>;
};
-
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+ >;
+ };
};
&omap3_pmx_wkup {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index b50b796e15c7..47915447a826 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -66,6 +66,8 @@
};
&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
clock-frequency = <2600000>;
twl: twl@48 {
@@ -136,6 +138,12 @@
OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
>;
};
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+ >;
+ };
};
&uart2 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index ec2c8baef62a..592e17fd4eeb 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -47,7 +47,7 @@
gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 */
wakeup-source;
autorepeat;
- debounce_interval = <50>;
+ debounce-interval = <50>;
};
};
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index 3b704cfed69a..a97458112ff6 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -280,7 +280,7 @@
max-frequency = <37500000>;
clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
<&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
resets = <&cru SRST_SDIO>;
@@ -298,7 +298,7 @@
max-frequency = <37500000>;
clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
<&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
default-sample-phase = <158>;
disable-wp;
dmas = <&pdma 12>;
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 780ec3a99b21..341deaf62ff6 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -621,7 +621,7 @@
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
<&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
@@ -634,7 +634,7 @@
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
<&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
pinctrl-names = "default";
pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>;
@@ -649,7 +649,7 @@
max-frequency = <37500000>;
clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
<&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
bus-width = <8>;
default-sample-phase = <158>;
fifo-depth = <0x100>;
diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
index 99cfae875e12..5eae4776ffde 100644
--- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
@@ -110,26 +110,6 @@
};
};
-&cpu0 {
- cpu0-supply = <&vdd_cpu>;
- operating-points = <
- /* KHz uV */
- 1800000 1400000
- 1608000 1350000
- 1512000 1300000
- 1416000 1200000
- 1200000 1100000
- 1008000 1050000
- 816000 1000000
- 696000 950000
- 600000 900000
- 408000 900000
- 312000 900000
- 216000 900000
- 126000 900000
- >;
-};
-
&emmc {
status = "okay";
bus-width = <8>;
diff --git a/arch/arm/boot/dts/zx296702.dtsi b/arch/arm/boot/dts/zx296702.dtsi
index 8a74efdb6360..240e7a23d81f 100644
--- a/arch/arm/boot/dts/zx296702.dtsi
+++ b/arch/arm/boot/dts/zx296702.dtsi
@@ -56,7 +56,7 @@
clocks = <&topclk ZX296702_A9_PERIPHCLK>;
};
- l2cc: l2-cache-controller@0x00c00000 {
+ l2cc: l2-cache-controller@c00000 {
compatible = "arm,pl310-cache";
reg = <0x00c00000 0x1000>;
cache-unified;
@@ -67,30 +67,30 @@
arm,double-linefill-incr = <0>;
};
- pcu: pcu@0xa0008000 {
+ pcu: pcu@a0008000 {
compatible = "zte,zx296702-pcu";
reg = <0xa0008000 0x1000>;
};
- topclk: topclk@0x09800000 {
+ topclk: topclk@9800000 {
compatible = "zte,zx296702-topcrm-clk";
reg = <0x09800000 0x1000>;
#clock-cells = <1>;
};
- lsp1clk: lsp1clk@0x09400000 {
+ lsp1clk: lsp1clk@9400000 {
compatible = "zte,zx296702-lsp1crpm-clk";
reg = <0x09400000 0x1000>;
#clock-cells = <1>;
};
- lsp0clk: lsp0clk@0x0b000000 {
+ lsp0clk: lsp0clk@b000000 {
compatible = "zte,zx296702-lsp0crpm-clk";
reg = <0x0b000000 0x1000>;
#clock-cells = <1>;
};
- uart0: serial@0x09405000 {
+ uart0: serial@9405000 {
compatible = "zte,zx296702-uart";
reg = <0x09405000 0x1000>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
@@ -98,7 +98,7 @@
status = "disabled";
};
- uart1: serial@0x09406000 {
+ uart1: serial@9406000 {
compatible = "zte,zx296702-uart";
reg = <0x09406000 0x1000>;
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
@@ -106,7 +106,7 @@
status = "disabled";
};
- mmc0: mmc@0x09408000 {
+ mmc0: mmc@9408000 {
compatible = "snps,dw-mshc";
#address-cells = <1>;
#size-cells = <0>;
@@ -119,7 +119,7 @@
status = "disabled";
};
- mmc1: mmc@0x0b003000 {
+ mmc1: mmc@b003000 {
compatible = "snps,dw-mshc";
#address-cells = <1>;
#size-cells = <0>;
@@ -132,7 +132,7 @@
status = "disabled";
};
- sysctrl: sysctrl@0xa0007000 {
+ sysctrl: sysctrl@a0007000 {
compatible = "zte,sysctrl", "syscon";
reg = <0xa0007000 0x1000>;
};
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 2f145c4af93a..92674f247a12 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -319,7 +319,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_RC_CORE=m
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_LIRC=m
+CONFIG_LIRC=y
CONFIG_RC_DEVICES=y
CONFIG_IR_RX51=m
CONFIG_V4L_PLATFORM_DRIVERS=y
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 629f8e9981f1..cf2701cb0de8 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -83,7 +83,7 @@ static void dummy_clock_access(struct timespec64 *ts)
}
static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;;
+static clock_access_fn __read_boot_clock = dummy_clock_access;
void read_persistent_clock64(struct timespec64 *ts)
{
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 5638ce0c9524..63d6b404d88e 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
+CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
+
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
+CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
+
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
index 111bda8cdebd..be4b8b0a40ad 100644
--- a/arch/arm/kvm/hyp/banked-sr.c
+++ b/arch/arm/kvm/hyp/banked-sr.c
@@ -20,6 +20,10 @@
#include <asm/kvm_hyp.h>
+/*
+ * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
+ * trick the assembler.
+ */
__asm__(".arch_extension virt");
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
diff --git a/arch/arm/mach-clps711x/board-dt.c b/arch/arm/mach-clps711x/board-dt.c
index ee1f83b1a332..4c89a8e9a2e3 100644
--- a/arch/arm/mach-clps711x/board-dt.c
+++ b/arch/arm/mach-clps711x/board-dt.c
@@ -69,7 +69,7 @@ static void clps711x_restart(enum reboot_mode mode, const char *cmd)
soft_restart(0);
}
-static const char *clps711x_compat[] __initconst = {
+static const char *const clps711x_compat[] __initconst = {
"cirrus,ep7209",
NULL
};
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index e457f299cd44..d6b11907380c 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -368,7 +368,7 @@ static struct spi_eeprom at25640a = {
.flags = EE_ADDR2,
};
-static struct spi_board_info dm355_evm_spi_info[] __initconst = {
+static const struct spi_board_info dm355_evm_spi_info[] __initconst = {
{
.modalias = "at25",
.platform_data = &at25640a,
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index be997243447b..fad9a5611a5d 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -217,7 +217,7 @@ static struct spi_eeprom at25640a = {
.flags = EE_ADDR2,
};
-static struct spi_board_info dm355_leopard_spi_info[] __initconst = {
+static const struct spi_board_info dm355_leopard_spi_info[] __initconst = {
{
.modalias = "at25",
.platform_data = &at25640a,
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index e75741fb2c1d..e3780986d2a3 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -726,7 +726,7 @@ static struct spi_eeprom at25640 = {
.flags = EE_ADDR2,
};
-static struct spi_board_info dm365_evm_spi_info[] __initconst = {
+static const struct spi_board_info dm365_evm_spi_info[] __initconst = {
{
.modalias = "at25",
.platform_data = &at25640,
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 6b32dc527edc..2c20599cc350 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -41,7 +41,7 @@ config MACH_ARMADA_375
depends on ARCH_MULTI_V7
select ARMADA_370_XP_IRQ
select ARM_ERRATA_720789
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_GIC
select ARMADA_375_CLK
select HAVE_ARM_SCU
@@ -57,7 +57,7 @@ config MACH_ARMADA_38X
bool "Marvell Armada 380/385 boards"
depends on ARCH_MULTI_V7
select ARM_ERRATA_720789
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_GIC
select ARM_GLOBAL_TIMER
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 43e3e188f521..fa512413a471 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c)
return -ENOMEM;
c->dent = d;
- d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
+ d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
if (!d) {
err = -ENOMEM;
goto err_out;
}
- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+ d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
if (!d) {
err = -ENOMEM;
goto err_out;
}
- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
+ d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
if (!d) {
err = -ENOMEM;
goto err_out;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 4bb6751864a5..fc5fb776a710 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -299,8 +299,6 @@ static void irq_save_context(void)
if (soc_is_dra7xx())
return;
- if (!sar_base)
- sar_base = omap4_get_sar_ram_base();
if (wakeupgen_ops && wakeupgen_ops->save_context)
wakeupgen_ops->save_context();
}
@@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node,
irq_hotplug_init();
irq_pm_init();
+ sar_base = omap4_get_sar_ram_base();
+
return 0;
}
IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 124f9af34a15..34156eca8e23 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -977,6 +977,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name);
+ if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
+ _enable_optional_clocks(oh);
+
if (oh->_clk)
clk_enable(oh->_clk);
@@ -985,9 +988,6 @@ static int _enable_clocks(struct omap_hwmod *oh)
clk_enable(os->_clk);
}
- if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
- _enable_optional_clocks(oh);
-
/* The opt clocks are controlled by the device driver. */
return 0;
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 366158a54fcd..6f68576e5695 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -186,7 +186,7 @@ static void omap_pm_end(void)
cpu_idle_poll_ctrl(false);
}
-static void omap_pm_finish(void)
+static void omap_pm_wake(void)
{
if (soc_is_omap34xx())
omap_prcm_irq_complete();
@@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
.begin = omap_pm_begin,
.end = omap_pm_end,
.enter = omap_pm_enter,
- .finish = omap_pm_finish,
+ .wake = omap_pm_wake,
.valid = suspend_valid_only_mem,
};
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index ece09c9461f7..d61fbd7a2840 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = {
.tick_resume = omap2_gp_timer_shutdown,
};
-static struct property device_disabled = {
- .name = "status",
- .length = sizeof("disabled"),
- .value = "disabled",
-};
-
static const struct of_device_id omap_timer_match[] __initconst = {
{ .compatible = "ti,omap2420-timer", },
{ .compatible = "ti,omap3430-timer", },
@@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
of_get_property(np, "ti,timer-secure", NULL)))
continue;
- if (!of_device_is_compatible(np, "ti,omap-counter32k"))
- of_add_property(np, &device_disabled);
+ if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
+ struct property *prop;
+
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return NULL;
+ prop->name = "status";
+ prop->value = "disabled";
+ prop->length = strlen(prop->value);
+ of_add_property(np, prop);
+ }
return np;
}
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 57058ac46f49..7e5d7a083707 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -23,7 +23,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/perf/arm_pmu.h>
#include <linux/regulator/machine.h>
#include <asm/outercache.h>
@@ -112,37 +111,6 @@ static void ux500_restart(enum reboot_mode mode, const char *cmd)
prcmu_system_reset(0);
}
-/*
- * The PMU IRQ lines of two cores are wired together into a single interrupt.
- * Bounce the interrupt to the other core if it's not ours.
- */
-static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
-{
- irqreturn_t ret = handler(irq, dev);
- int other = !smp_processor_id();
-
- if (ret == IRQ_NONE && cpu_online(other))
- irq_set_affinity(irq, cpumask_of(other));
-
- /*
- * We should be able to get away with the amount of IRQ_NONEs we give,
- * while still having the spurious IRQ detection code kick in if the
- * interrupt really starts hitting spuriously.
- */
- return ret;
-}
-
-static struct arm_pmu_platdata db8500_pmu_platdata = {
- .handle_irq = db8500_pmu_handler,
- .irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
-};
-
-static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
- /* Requires call-back bindings. */
- OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
- {},
-};
-
static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL),
{},
@@ -165,9 +133,6 @@ static void __init u8500_init_machine(void)
if (of_machine_is_compatible("st-ericsson,u8540"))
of_platform_populate(NULL, u8500_local_bus_nodes,
u8540_auxdata_lookup, NULL);
- else
- of_platform_populate(NULL, u8500_local_bus_nodes,
- u8500_auxdata_lookup, NULL);
}
static const char * stericsson_dt_platform_compat[] = {
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index aff6994950ba..a2399fd66e97 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
/*****************************************************************************
* Ethernet switch
****************************************************************************/
-static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
-static __initdata struct mdio_board_info
- orion_ge00_switch_board_info;
+static __initdata struct mdio_board_info orion_ge00_switch_board_info = {
+ .bus_id = "orion-mii",
+ .modalias = "mv88e6085",
+};
void __init orion_ge00_switch_init(struct dsa_chip_data *d)
{
- struct mdio_board_info *bd;
unsigned int i;
if (!IS_BUILTIN(CONFIG_PHYLIB))
return;
- for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
- if (!strcmp(d->port_names[i], "cpu"))
+ for (i = 0; i < ARRAY_SIZE(d->port_names); i++) {
+ if (!strcmp(d->port_names[i], "cpu")) {
+ d->netdev[i] = &orion_ge00.dev;
break;
+ }
+ }
- bd = &orion_ge00_switch_board_info;
- bd->bus_id = orion_ge00_mvmdio_bus_name;
- bd->mdio_addr = d->sw_addr;
- d->netdev[i] = &orion_ge00.dev;
- strcpy(bd->modalias, "mv88e6085");
- bd->platform_data = d;
+ orion_ge00_switch_board_info.mdio_addr = d->sw_addr;
+ orion_ge00_switch_board_info.platform_data = d;
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
}
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index a80632641b39..70c776ef7aa7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -165,14 +165,14 @@
uart_A: serial@24000 {
compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart";
- reg = <0x0 0x24000 0x0 0x14>;
+ reg = <0x0 0x24000 0x0 0x18>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
uart_B: serial@23000 {
compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart";
- reg = <0x0 0x23000 0x0 0x14>;
+ reg = <0x0 0x23000 0x0 0x18>;
interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 6cb3c2a52baf..4ee2e7951482 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -235,14 +235,14 @@
uart_A: serial@84c0 {
compatible = "amlogic,meson-gx-uart";
- reg = <0x0 0x84c0 0x0 0x14>;
+ reg = <0x0 0x84c0 0x0 0x18>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
uart_B: serial@84dc {
compatible = "amlogic,meson-gx-uart";
- reg = <0x0 0x84dc 0x0 0x14>;
+ reg = <0x0 0x84dc 0x0 0x18>;
interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
@@ -287,7 +287,7 @@
uart_C: serial@8700 {
compatible = "amlogic,meson-gx-uart";
- reg = <0x0 0x8700 0x0 0x14>;
+ reg = <0x0 0x8700 0x0 0x18>;
interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
@@ -404,14 +404,14 @@
uart_AO: serial@4c0 {
compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
- reg = <0x0 0x004c0 0x0 0x14>;
+ reg = <0x0 0x004c0 0x0 0x18>;
interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
uart_AO_B: serial@4e0 {
compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
- reg = <0x0 0x004e0 0x0 0x14>;
+ reg = <0x0 0x004e0 0x0 0x18>;
interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 4f355f17eed6..c8514110b9da 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -631,6 +631,7 @@
internal_phy: ethernet-phy@8 {
compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22";
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
reg = <8>;
max-speed = <100>;
};
diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
index 4220fbdcb24a..ff5c4c47b22b 100644
--- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
@@ -98,7 +98,7 @@
clock-output-names = "clk125mhz";
};
- pci {
+ pcie@30000000 {
compatible = "pci-host-ecam-generic";
device_type = "pci";
#interrupt-cells = <1>;
@@ -118,6 +118,7 @@
ranges =
<0x02000000 0 0x40000000 0 0x40000000 0 0x20000000
0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
+ bus-range = <0 0xff>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map =
/* addr pin ic icaddr icintr */
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index e94fa1a53192..047641fe294c 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -51,7 +51,7 @@
#size-cells = <2>;
ranges;
- ramoops@0x21f00000 {
+ ramoops@21f00000 {
compatible = "ramoops";
reg = <0x0 0x21f00000 0x0 0x00100000>;
record-size = <0x00020000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 9fbe4705ee88..94597e33c806 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -341,7 +341,7 @@
reg = <0 0x10005000 0 0x1000>;
};
- pio: pinctrl@0x10005000 {
+ pio: pinctrl@10005000 {
compatible = "mediatek,mt8173-pinctrl";
reg = <0 0x1000b000 0 0x1000>;
mediatek,pctl-regmap = <&syscfg_pctl_a>;
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index 492a011f14f6..1c8f1b86472d 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -140,16 +140,16 @@
};
agnoc@0 {
- qcom,pcie@00600000 {
+ qcom,pcie@600000 {
perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
};
- qcom,pcie@00608000 {
+ qcom,pcie@608000 {
status = "okay";
perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
};
- qcom,pcie@00610000 {
+ qcom,pcie@610000 {
status = "okay";
perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 4b2afcc4fdf4..0a6f7952bbb1 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -840,7 +840,7 @@
#size-cells = <1>;
ranges;
- pcie0: qcom,pcie@00600000 {
+ pcie0: qcom,pcie@600000 {
compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
status = "disabled";
power-domains = <&gcc PCIE0_GDSC>;
@@ -893,7 +893,7 @@
};
- pcie1: qcom,pcie@00608000 {
+ pcie1: qcom,pcie@608000 {
compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
power-domains = <&gcc PCIE1_GDSC>;
bus-range = <0x00 0xff>;
@@ -946,7 +946,7 @@
"bus_slave";
};
- pcie2: qcom,pcie@00610000 {
+ pcie2: qcom,pcie@610000 {
compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
power-domains = <&gcc PCIE2_GDSC>;
bus-range = <0x00 0xff>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 3890468678ce..28257724a56e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -132,17 +132,16 @@
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
clock_in_out = "input";
- /* shows instability at 1GBit right now */
- max-speed = <100>;
phy-supply = <&vcc_io>;
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&rgmiim1_pins>;
+ snps,force_thresh_dma_mode;
snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
- tx_delay = <0x26>;
- rx_delay = <0x11>;
+ tx_delay = <0x24>;
+ rx_delay = <0x18>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index a037ee56fead..cae341554486 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -730,7 +730,7 @@
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
<&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
status = "disabled";
};
@@ -741,7 +741,7 @@
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
<&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
status = "disabled";
};
@@ -752,7 +752,7 @@
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
<&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index aa4d07046a7b..03458ac44201 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -257,7 +257,7 @@
max-frequency = <150000000>;
clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
<&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
resets = <&cru SRST_SDIO0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index 0f873c897d0d..ce592a4c0c4c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -457,7 +457,7 @@
assigned-clocks = <&cru SCLK_PCIEPHY_REF>;
assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>;
assigned-clock-rates = <100000000>;
- ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>;
+ ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 7aa2144e0d47..2605118d4b4c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1739,8 +1739,8 @@
compatible = "rockchip,rk3399-edp";
reg = <0x0 0xff970000 0x0 0x8000>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>;
- clock-names = "dp", "pclk";
+ clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>;
+ clock-names = "dp", "pclk", "grf";
pinctrl-names = "default";
pinctrl-0 = <&edp_hpd>;
power-domains = <&power RK3399_PD_EDP>;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index eda8c5f629fc..350c76a1d15b 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -20,7 +20,7 @@
#define MPIDR_UP_BITMASK (0x1 << 30)
#define MPIDR_MT_BITMASK (0x1 << 24)
-#define MPIDR_HWID_BITMASK 0xff00ffffffUL
+#define MPIDR_HWID_BITMASK UL(0xff00ffffff)
#define MPIDR_LEVEL_BITS_SHIFT 3
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 472ef944e932..902f9edacbea 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -28,7 +28,7 @@ struct stackframe {
unsigned long fp;
unsigned long pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- unsigned int graph;
+ int graph;
#endif
};
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 543e11f0f657..e66b0fca99c2 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs)
* This is equivalent to the following test:
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/
-static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
{
- unsigned long limit = current_thread_info()->addr_limit;
+ unsigned long ret, limit = current_thread_info()->addr_limit;
__chk_user_ptr(addr);
asm volatile(
// A + B <= C + 1 for all A,B,C, in four easy steps:
// 1: X = A + B; X' = X % 2^64
- " adds %0, %0, %2\n"
+ " adds %0, %3, %2\n"
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
" csel %1, xzr, %1, hi\n"
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
@@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
// testing X' - C == 0, subject to the previous adjustments.
" sbcs xzr, %0, %1\n"
" cset %0, ls\n"
- : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+ : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
- return addr;
+ return ret;
}
/*
@@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
-#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
+#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index c33b5e4010ab..68450e954d47 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
static int swp_handler(struct pt_regs *regs, u32 instr)
{
u32 destreg, data, type, address = 0;
+ const void __user *user_ptr;
int rn, rt2, res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
@@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
- if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+ user_ptr = (const void __user *)(unsigned long)(address & ~3);
+ if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
address);
goto fault;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 29b1f873e337..2985a067fc13 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -199,9 +199,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
};
static const struct arm64_ftr_bits ftr_ctr[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
/*
* Linux can handle differing I-cache policies. Userspace JITs will
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 75b220ba73a3..85a251b6dfa8 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -908,9 +908,9 @@ static void __armv8pmu_probe_pmu(void *info)
int pmuver;
dfr0 = read_sysreg(id_aa64dfr0_el1);
- pmuver = cpuid_feature_extract_signed_field(dfr0,
+ pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT);
- if (pmuver < 1)
+ if (pmuver == 0xf || pmuver == 0)
return;
probe->present = true;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index ad8aeb098b31..c0da6efe5465 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -220,8 +220,15 @@ void __show_regs(struct pt_regs *regs)
show_regs_print_info(KERN_DEFAULT);
print_pstate(regs);
- printk("pc : %pS\n", (void *)regs->pc);
- printk("lr : %pS\n", (void *)lr);
+
+ if (!user_mode(regs)) {
+ printk("pc : %pS\n", (void *)regs->pc);
+ printk("lr : %pS\n", (void *)lr);
+ } else {
+ printk("pc : %016llx\n", regs->pc);
+ printk("lr : %016llx\n", lr);
+ }
+
printk("sp : %016llx\n", sp);
i = top_reg;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6618036ae6d4..9ae31f7e2243 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1419,7 +1419,7 @@ static int compat_ptrace_hbp_get(unsigned int note_type,
u64 addr = 0;
u32 ctrl = 0;
- int err, idx = compat_ptrace_hbp_num_to_idx(num);;
+ int err, idx = compat_ptrace_hbp_num_to_idx(num);
if (num & 1) {
err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 76809ccd309c..d5718a060672 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
+ if (WARN_ON_ONCE(frame->graph == -1))
+ return -EINVAL;
+ if (frame->graph < -1)
+ frame->graph += FTRACE_NOTRACE_DEPTH;
+
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 8b8bbd3eaa52..a382b2a1b84e 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
if (end < start || flags)
return -EINVAL;
- if (!access_ok(VERIFY_READ, start, end - start))
+ if (!access_ok(VERIFY_READ, (const void __user *)start, end - start))
return -EFAULT;
return __do_compat_cache_op(start, end);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index a4391280fba9..f258636273c9 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs)
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = -1; /* no task info */
+ frame.graph = current->curr_ret_stack;
#endif
do {
int ret = unwind_frame(NULL, &frame);
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index bbb0fde2780e..eb2d15147e8d 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -57,7 +57,7 @@ static const char *handler[]= {
"Error"
};
-int show_unhandled_signals = 1;
+int show_unhandled_signals = 0;
static void dump_backtrace_entry(unsigned long where)
{
@@ -526,14 +526,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
}
#endif
- if (show_unhandled_signals_ratelimited()) {
- pr_info("%s[%d]: syscall %d\n", current->comm,
- task_pid_nr(current), regs->syscallno);
- dump_instr("", regs);
- if (user_mode(regs))
- __show_regs(regs);
- }
-
return sys_ni_syscall();
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3161b853f29e..84a019f55022 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -933,6 +933,11 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
+
+ /* ioremap_page_range doesn't honour BBM */
+ if (pud_present(READ_ONCE(*pudp)))
+ return 0;
+
BUG_ON(phys & ~PUD_MASK);
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
return 1;
@@ -942,6 +947,11 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
+
+ /* ioremap_page_range doesn't honour BBM */
+ if (pmd_present(READ_ONCE(*pmdp)))
+ return 0;
+
BUG_ON(phys & ~PMD_MASK);
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
return 1;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 1d4f1da7c58f..a93350451e8e 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -250,8 +250,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
off = offsetof(struct bpf_array, map.max_entries);
emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR32(tmp, r2, tmp), ctx);
+ emit(A64_MOV(0, r3, r3), ctx);
emit(A64_CMP(0, r3, tmp), ctx);
- emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
+ emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
@@ -259,7 +260,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
*/
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
emit(A64_CMP(1, tcc, tmp), ctx);
- emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
+ emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
/* prog = array->ptrs[index];
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 946681db8dc3..9a0fa66b81ac 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -86,7 +86,6 @@ struct compat_flock {
compat_off_t l_len;
s32 l_sysid;
compat_pid_t l_pid;
- short __unused;
s32 pad[4];
};
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 511acfd7ab0d..535add3f7791 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -52,7 +52,7 @@
#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
#define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000)
-#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000400000000)
+#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000)
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index beea2182d754..0c0b66fc5bfb 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -384,7 +384,8 @@ static void *eeh_report_resume(void *data, void *userdata)
eeh_pcid_put(dev);
pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
#ifdef CONFIG_PCI_IOV
- eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
+ if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
+ eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
#endif
return NULL;
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index adf044daafd7..d22c41c26bb3 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -874,7 +874,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
.mmu = 0,
.hash_ext = 0,
.radix_ext = 0,
- .byte22 = OV5_FEAT(OV5_DRC_INFO),
+ .byte22 = 0,
},
/* option vector 6: IBM PAPR hints */
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f0f5cd4d2fe7..f9818d7d3381 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -188,7 +188,7 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
if (!qpage) {
pr_err("Failed to allocate queue %d for VCPU %d\n",
prio, xc->server_num);
- return -ENOMEM;;
+ return -ENOMEM;
}
memset(qpage, 0, 1 << xive->q_order);
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index 916844f99c64..3f1803672c9b 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -98,7 +98,7 @@ static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
- dr_cell->flags = cpu_to_be32(lmb->flags);
+ dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
}
static int drmem_update_dt_v2(struct device_node *memory,
@@ -121,7 +121,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
}
if (prev_lmb->aa_index != lmb->aa_index ||
- prev_lmb->flags != lmb->flags)
+ drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
lmb_sets++;
prev_lmb = lmb;
@@ -150,7 +150,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
}
if (prev_lmb->aa_index != lmb->aa_index ||
- prev_lmb->flags != lmb->flags) {
+ drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
/* end of one set, start of another */
dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
dr_cell++;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 872d1f6dd11e..a9636d8cba15 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -327,6 +327,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
break;
+ case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
+ PPC_LWZ_OFFS(r_A, r_skb, K);
+ break;
case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
break;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 496e47696ed0..a6c92c78c9b2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1854,7 +1854,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
s64 rc;
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
- return -ENODEV;;
+ return -ENODEV;
pe = &phb->ioda.pe_array[pdn->pe_number];
if (pe->tce_bypass_enabled) {
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 4fb21e17504a..092715b9674b 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -80,6 +80,10 @@ static void pnv_setup_rfi_flush(void)
if (np && of_property_read_bool(np, "disabled"))
enable--;
+ np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
+ if (np && of_property_read_bool(np, "disabled"))
+ enable = 0;
+
of_node_put(np);
of_node_put(fw_features);
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 372d7ada1a0c..1a527625acf7 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -482,7 +482,8 @@ static void pseries_setup_rfi_flush(void)
if (types == L1D_FLUSH_NONE)
types = L1D_FLUSH_FALLBACK;
- if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+ if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+ (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
enable = false;
} else {
/* Default to fallback if case hcall is not available */
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index c0319cbf1eec..5510366d169a 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -34,9 +34,9 @@
#define wmb() RISCV_FENCE(ow,ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
-#define smp_mb() RISCV_FENCE(rw,rw)
-#define smp_rmb() RISCV_FENCE(r,r)
-#define smp_wmb() RISCV_FENCE(w,w)
+#define __smp_mb() RISCV_FENCE(rw,rw)
+#define __smp_rmb() RISCV_FENCE(r,r)
+#define __smp_wmb() RISCV_FENCE(w,w)
/*
* This is a very specific barrier: it's currently only used in two places in
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 9c7d70715862..07c6e81163bf 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -22,22 +22,6 @@
#include "trace.h"
#include "trace-s390.h"
-
-static const intercept_handler_t instruction_handlers[256] = {
- [0x01] = kvm_s390_handle_01,
- [0x82] = kvm_s390_handle_lpsw,
- [0x83] = kvm_s390_handle_diag,
- [0xaa] = kvm_s390_handle_aa,
- [0xae] = kvm_s390_handle_sigp,
- [0xb2] = kvm_s390_handle_b2,
- [0xb6] = kvm_s390_handle_stctl,
- [0xb7] = kvm_s390_handle_lctl,
- [0xb9] = kvm_s390_handle_b9,
- [0xe3] = kvm_s390_handle_e3,
- [0xe5] = kvm_s390_handle_e5,
- [0xeb] = kvm_s390_handle_eb,
-};
-
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
{
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
@@ -129,16 +113,39 @@ static int handle_validity(struct kvm_vcpu *vcpu)
static int handle_instruction(struct kvm_vcpu *vcpu)
{
- intercept_handler_t handler;
-
vcpu->stat.exit_instruction++;
trace_kvm_s390_intercept_instruction(vcpu,
vcpu->arch.sie_block->ipa,
vcpu->arch.sie_block->ipb);
- handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
- if (handler)
- return handler(vcpu);
- return -EOPNOTSUPP;
+
+ switch (vcpu->arch.sie_block->ipa >> 8) {
+ case 0x01:
+ return kvm_s390_handle_01(vcpu);
+ case 0x82:
+ return kvm_s390_handle_lpsw(vcpu);
+ case 0x83:
+ return kvm_s390_handle_diag(vcpu);
+ case 0xaa:
+ return kvm_s390_handle_aa(vcpu);
+ case 0xae:
+ return kvm_s390_handle_sigp(vcpu);
+ case 0xb2:
+ return kvm_s390_handle_b2(vcpu);
+ case 0xb6:
+ return kvm_s390_handle_stctl(vcpu);
+ case 0xb7:
+ return kvm_s390_handle_lctl(vcpu);
+ case 0xb9:
+ return kvm_s390_handle_b9(vcpu);
+ case 0xe3:
+ return kvm_s390_handle_e3(vcpu);
+ case 0xe5:
+ return kvm_s390_handle_e5(vcpu);
+ case 0xeb:
+ return kvm_s390_handle_eb(vcpu);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index aabf46f5f883..b04616b57a94 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
+ const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
+ const u64 ckc = vcpu->arch.sie_block->ckc;
+
+ if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
+ if ((s64)ckc >= (s64)now)
+ return 0;
+ } else if (ckc >= now) {
return 0;
+ }
return ckc_interrupts_enabled(vcpu);
}
@@ -187,12 +194,6 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
return kvm_s390_get_cpu_timer(vcpu) >> 63;
}
-static inline int is_ioirq(unsigned long irq_type)
-{
- return ((irq_type >= IRQ_PEND_IO_ISC_7) &&
- (irq_type <= IRQ_PEND_IO_ISC_0));
-}
-
static uint64_t isc_to_isc_bits(int isc)
{
return (0x80 >> isc) << 24;
@@ -236,10 +237,15 @@ static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gis
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
-static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
+static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.float_int.pending_irqs |
- vcpu->arch.local_int.pending_irqs |
+ vcpu->arch.local_int.pending_irqs;
+}
+
+static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
+{
+ return pending_irqs_no_gisa(vcpu) |
kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
}
@@ -337,7 +343,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
{
- if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
+ if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
return;
else if (psw_ioint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
@@ -1011,24 +1017,6 @@ out:
return rc;
}
-typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
-
-static const deliver_irq_t deliver_irq_funcs[] = {
- [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
- [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
- [IRQ_PEND_PROG] = __deliver_prog,
- [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
- [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
- [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
- [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
- [IRQ_PEND_RESTART] = __deliver_restart,
- [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
- [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
- [IRQ_PEND_EXT_SERVICE] = __deliver_service,
- [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
- [IRQ_PEND_VIRTIO] = __deliver_virtio,
-};
-
/* Check whether an external call is pending (deliverable or not) */
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{
@@ -1066,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
{
- u64 now, cputm, sltime = 0;
+ const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
+ const u64 ckc = vcpu->arch.sie_block->ckc;
+ u64 cputm, sltime = 0;
if (ckc_interrupts_enabled(vcpu)) {
- now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
- sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
- /* already expired or overflow? */
- if (!sltime || vcpu->arch.sie_block->ckc <= now)
+ if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
+ if ((s64)now < (s64)ckc)
+ sltime = tod_to_ns((s64)ckc - (s64)now);
+ } else if (now < ckc) {
+ sltime = tod_to_ns(ckc - now);
+ }
+ /* already expired */
+ if (!sltime)
return 0;
if (cpu_timer_interrupts_enabled(vcpu)) {
cputm = kvm_s390_get_cpu_timer(vcpu);
@@ -1192,7 +1186,6 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- deliver_irq_t func;
int rc = 0;
unsigned long irq_type;
unsigned long irqs;
@@ -1212,16 +1205,57 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
while ((irqs = deliverable_irqs(vcpu)) && !rc) {
/* bits are in the reverse order of interrupt priority */
irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
- if (is_ioirq(irq_type)) {
+ switch (irq_type) {
+ case IRQ_PEND_IO_ISC_0:
+ case IRQ_PEND_IO_ISC_1:
+ case IRQ_PEND_IO_ISC_2:
+ case IRQ_PEND_IO_ISC_3:
+ case IRQ_PEND_IO_ISC_4:
+ case IRQ_PEND_IO_ISC_5:
+ case IRQ_PEND_IO_ISC_6:
+ case IRQ_PEND_IO_ISC_7:
rc = __deliver_io(vcpu, irq_type);
- } else {
- func = deliver_irq_funcs[irq_type];
- if (!func) {
- WARN_ON_ONCE(func == NULL);
- clear_bit(irq_type, &li->pending_irqs);
- continue;
- }
- rc = func(vcpu);
+ break;
+ case IRQ_PEND_MCHK_EX:
+ case IRQ_PEND_MCHK_REP:
+ rc = __deliver_machine_check(vcpu);
+ break;
+ case IRQ_PEND_PROG:
+ rc = __deliver_prog(vcpu);
+ break;
+ case IRQ_PEND_EXT_EMERGENCY:
+ rc = __deliver_emergency_signal(vcpu);
+ break;
+ case IRQ_PEND_EXT_EXTERNAL:
+ rc = __deliver_external_call(vcpu);
+ break;
+ case IRQ_PEND_EXT_CLOCK_COMP:
+ rc = __deliver_ckc(vcpu);
+ break;
+ case IRQ_PEND_EXT_CPU_TIMER:
+ rc = __deliver_cpu_timer(vcpu);
+ break;
+ case IRQ_PEND_RESTART:
+ rc = __deliver_restart(vcpu);
+ break;
+ case IRQ_PEND_SET_PREFIX:
+ rc = __deliver_set_prefix(vcpu);
+ break;
+ case IRQ_PEND_PFAULT_INIT:
+ rc = __deliver_pfault_init(vcpu);
+ break;
+ case IRQ_PEND_EXT_SERVICE:
+ rc = __deliver_service(vcpu);
+ break;
+ case IRQ_PEND_PFAULT_DONE:
+ rc = __deliver_pfault_done(vcpu);
+ break;
+ case IRQ_PEND_VIRTIO:
+ rc = __deliver_virtio(vcpu);
+ break;
+ default:
+ WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
+ clear_bit(irq_type, &li->pending_irqs);
}
}
@@ -1701,7 +1735,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
+ if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa))
+ kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break;
default:
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba4c7092335a..77d7818130db 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void)
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long end);
+static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
+{
+ u8 delta_idx = 0;
+
+ /*
+ * The TOD jumps by delta, we have to compensate this by adding
+ * -delta to the epoch.
+ */
+ delta = -delta;
+
+ /* sign-extension - we're adding to signed values below */
+ if ((s64)delta < 0)
+ delta_idx = -1;
+
+ scb->epoch += delta;
+ if (scb->ecd & ECD_MEF) {
+ scb->epdx += delta_idx;
+ if (scb->epoch < delta)
+ scb->epdx += 1;
+ }
+}
+
/*
* This callback is executed during stop_machine(). All CPUs are therefore
* temporarily stopped. In order not to change guest behavior, we have to
@@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
unsigned long long *delta = v;
list_for_each_entry(kvm, &vm_list, vm_list) {
- kvm->arch.epoch -= *delta;
kvm_for_each_vcpu(i, vcpu, kvm) {
- vcpu->arch.sie_block->epoch -= *delta;
+ kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
+ if (i == 0) {
+ kvm->arch.epoch = vcpu->arch.sie_block->epoch;
+ kvm->arch.epdx = vcpu->arch.sie_block->epdx;
+ }
if (vcpu->arch.cputm_enabled)
vcpu->arch.cputm_start += *delta;
if (vcpu->arch.vsie_block)
- vcpu->arch.vsie_block->epoch -= *delta;
+ kvm_clock_sync_scb(vcpu->arch.vsie_block,
+ *delta);
}
}
return NOTIFY_OK;
@@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
return -EFAULT;
- if (test_kvm_facility(kvm, 139))
- kvm_s390_set_tod_clock_ext(kvm, &gtod);
- else if (gtod.epoch_idx == 0)
- kvm_s390_set_tod_clock(kvm, gtod.tod);
- else
+ if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
return -EINVAL;
+ kvm_s390_set_tod_clock(kvm, &gtod);
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
gtod.epoch_idx, gtod.tod);
@@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
- u64 gtod;
+ struct kvm_s390_vm_tod_clock gtod = { 0 };
- if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
+ if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
+ sizeof(gtod.tod)))
return -EFAULT;
- kvm_s390_set_tod_clock(kvm, gtod);
- VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
+ kvm_s390_set_tod_clock(kvm, &gtod);
+ VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
return 0;
}
@@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
mutex_lock(&vcpu->kvm->lock);
preempt_disable();
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+ vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
preempt_enable();
mutex_unlock(&vcpu->kvm->lock);
if (!kvm_is_ucontrol(vcpu->kvm)) {
@@ -3021,8 +3046,8 @@ retry:
return 0;
}
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
- const struct kvm_s390_vm_tod_clock *gtod)
+void kvm_s390_set_tod_clock(struct kvm *kvm,
+ const struct kvm_s390_vm_tod_clock *gtod)
{
struct kvm_vcpu *vcpu;
struct kvm_s390_tod_clock_ext htod;
@@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
get_tod_clock_ext((char *)&htod);
kvm->arch.epoch = gtod->tod - htod.tod;
- kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
-
- if (kvm->arch.epoch > gtod->tod)
- kvm->arch.epdx -= 1;
+ kvm->arch.epdx = 0;
+ if (test_kvm_facility(kvm, 139)) {
+ kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
+ if (kvm->arch.epoch > gtod->tod)
+ kvm->arch.epdx -= 1;
+ }
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
mutex_unlock(&kvm->lock);
}
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
-{
- struct kvm_vcpu *vcpu;
- int i;
-
- mutex_lock(&kvm->lock);
- preempt_disable();
- kvm->arch.epoch = tod - get_tod_clock();
- kvm_s390_vcpu_block_all(kvm);
- kvm_for_each_vcpu(i, vcpu, kvm)
- vcpu->arch.sie_block->epoch = kvm->arch.epoch;
- kvm_s390_vcpu_unblock_all(kvm);
- preempt_enable();
- mutex_unlock(&kvm->lock);
-}
-
/**
* kvm_arch_fault_in_page - fault-in guest page if necessary
* @vcpu: The corresponding virtual cpu
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index bd31b37b0e6f..f55ac0ef99ea 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -19,8 +19,6 @@
#include <asm/processor.h>
#include <asm/sclp.h>
-typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
-
/* Transactional Memory Execution related macros */
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
#define TDB_FORMAT1 1
@@ -283,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
- const struct kvm_s390_vm_tod_clock *gtod);
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
+void kvm_s390_set_tod_clock(struct kvm *kvm,
+ const struct kvm_s390_vm_tod_clock *gtod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c4c4e157c036..f0b4185158af 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_vm_tod_clock gtod = { 0 };
int rc;
u8 ar;
- u64 op2, val;
+ u64 op2;
vcpu->stat.instruction_sck++;
@@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (op2 & 7) /* Operand must be on a doubleword boundary */
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
+ rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
- VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
- kvm_s390_set_tod_clock(vcpu->kvm, val);
+ VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
+ kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
kvm_s390_set_psw_cc(vcpu, 0);
return 0;
@@ -795,55 +796,60 @@ out:
return rc;
}
-static const intercept_handler_t b2_handlers[256] = {
- [0x02] = handle_stidp,
- [0x04] = handle_set_clock,
- [0x10] = handle_set_prefix,
- [0x11] = handle_store_prefix,
- [0x12] = handle_store_cpu_address,
- [0x14] = kvm_s390_handle_vsie,
- [0x21] = handle_ipte_interlock,
- [0x29] = handle_iske,
- [0x2a] = handle_rrbe,
- [0x2b] = handle_sske,
- [0x2c] = handle_test_block,
- [0x30] = handle_io_inst,
- [0x31] = handle_io_inst,
- [0x32] = handle_io_inst,
- [0x33] = handle_io_inst,
- [0x34] = handle_io_inst,
- [0x35] = handle_io_inst,
- [0x36] = handle_io_inst,
- [0x37] = handle_io_inst,
- [0x38] = handle_io_inst,
- [0x39] = handle_io_inst,
- [0x3a] = handle_io_inst,
- [0x3b] = handle_io_inst,
- [0x3c] = handle_io_inst,
- [0x50] = handle_ipte_interlock,
- [0x56] = handle_sthyi,
- [0x5f] = handle_io_inst,
- [0x74] = handle_io_inst,
- [0x76] = handle_io_inst,
- [0x7d] = handle_stsi,
- [0xb1] = handle_stfl,
- [0xb2] = handle_lpswe,
-};
-
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
{
- intercept_handler_t handler;
-
- /*
- * A lot of B2 instructions are priviledged. Here we check for
- * the privileged ones, that we can handle in the kernel.
- * Anything else goes to userspace.
- */
- handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
- if (handler)
- return handler(vcpu);
-
- return -EOPNOTSUPP;
+ switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+ case 0x02:
+ return handle_stidp(vcpu);
+ case 0x04:
+ return handle_set_clock(vcpu);
+ case 0x10:
+ return handle_set_prefix(vcpu);
+ case 0x11:
+ return handle_store_prefix(vcpu);
+ case 0x12:
+ return handle_store_cpu_address(vcpu);
+ case 0x14:
+ return kvm_s390_handle_vsie(vcpu);
+ case 0x21:
+ case 0x50:
+ return handle_ipte_interlock(vcpu);
+ case 0x29:
+ return handle_iske(vcpu);
+ case 0x2a:
+ return handle_rrbe(vcpu);
+ case 0x2b:
+ return handle_sske(vcpu);
+ case 0x2c:
+ return handle_test_block(vcpu);
+ case 0x30:
+ case 0x31:
+ case 0x32:
+ case 0x33:
+ case 0x34:
+ case 0x35:
+ case 0x36:
+ case 0x37:
+ case 0x38:
+ case 0x39:
+ case 0x3a:
+ case 0x3b:
+ case 0x3c:
+ case 0x5f:
+ case 0x74:
+ case 0x76:
+ return handle_io_inst(vcpu);
+ case 0x56:
+ return handle_sthyi(vcpu);
+ case 0x7d:
+ return handle_stsi(vcpu);
+ case 0xb1:
+ return handle_stfl(vcpu);
+ case 0xb2:
+ return handle_lpswe(vcpu);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int handle_epsw(struct kvm_vcpu *vcpu)
@@ -1105,25 +1111,22 @@ static int handle_essa(struct kvm_vcpu *vcpu)
return 0;
}
-static const intercept_handler_t b9_handlers[256] = {
- [0x8a] = handle_ipte_interlock,
- [0x8d] = handle_epsw,
- [0x8e] = handle_ipte_interlock,
- [0x8f] = handle_ipte_interlock,
- [0xab] = handle_essa,
- [0xaf] = handle_pfmf,
-};
-
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
- intercept_handler_t handler;
-
- /* This is handled just as for the B2 instructions. */
- handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
- if (handler)
- return handler(vcpu);
-
- return -EOPNOTSUPP;
+ switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+ case 0x8a:
+ case 0x8e:
+ case 0x8f:
+ return handle_ipte_interlock(vcpu);
+ case 0x8d:
+ return handle_epsw(vcpu);
+ case 0xab:
+ return handle_essa(vcpu);
+ case 0xaf:
+ return handle_pfmf(vcpu);
+ default:
+ return -EOPNOTSUPP;
+ }
}
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
@@ -1271,22 +1274,20 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
-static const intercept_handler_t eb_handlers[256] = {
- [0x2f] = handle_lctlg,
- [0x25] = handle_stctg,
- [0x60] = handle_ri,
- [0x61] = handle_ri,
- [0x62] = handle_ri,
-};
-
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
{
- intercept_handler_t handler;
-
- handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
- if (handler)
- return handler(vcpu);
- return -EOPNOTSUPP;
+ switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
+ case 0x25:
+ return handle_stctg(vcpu);
+ case 0x2f:
+ return handle_lctlg(vcpu);
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ return handle_ri(vcpu);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int handle_tprot(struct kvm_vcpu *vcpu)
@@ -1346,10 +1347,12 @@ out_unlock:
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
- /* For e5xx... instructions we only handle TPROT */
- if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
+ switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+ case 0x01:
return handle_tprot(vcpu);
- return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int handle_sckpf(struct kvm_vcpu *vcpu)
@@ -1380,17 +1383,14 @@ static int handle_ptff(struct kvm_vcpu *vcpu)
return 0;
}
-static const intercept_handler_t x01_handlers[256] = {
- [0x04] = handle_ptff,
- [0x07] = handle_sckpf,
-};
-
int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
- intercept_handler_t handler;
-
- handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
- if (handler)
- return handler(vcpu);
- return -EOPNOTSUPP;
+ switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+ case 0x04:
+ return handle_ptff(vcpu);
+ case 0x07:
+ return handle_sckpf(vcpu);
+ default:
+ return -EOPNOTSUPP;
+ }
}
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index ec772700ff96..8961e3970901 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -821,6 +821,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ int guest_bp_isolation;
int rc;
handle_last_fault(vcpu, vsie_page);
@@ -831,6 +832,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
s390_handle_mcck();
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
+ /* save current guest state of bp isolation override */
+ guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
+
+ /*
+ * The guest is running with BPBC, so we have to force it on for our
+ * nested guest. This is done by enabling BPBC globally, so the BPBC
+ * control in the SCB (which the nested guest can modify) is simply
+ * ignored.
+ */
+ if (test_kvm_facility(vcpu->kvm, 82) &&
+ vcpu->arch.sie_block->fpf & FPF_BPBC)
+ set_thread_flag(TIF_ISOLATE_BP_GUEST);
+
local_irq_disable();
guest_enter_irqoff();
local_irq_enable();
@@ -840,6 +855,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
local_irq_disable();
guest_exit_irqoff();
local_irq_enable();
+
+ /* restore guest state for bp isolation override */
+ if (!guest_bp_isolation)
+ clear_thread_flag(TIF_ISOLATE_BP_GUEST);
+
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
if (rc == -EINTR) {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c1236b187824..eb7f43f23521 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -430,6 +430,7 @@ config GOLDFISH
config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
default y
+ select STACK_VALIDATION if HAVE_STACK_VALIDATION
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index fad55160dcb9..498c1b812300 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE
- RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
- ifneq ($(RETPOLINE_CFLAGS),)
- KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
- endif
+ifneq ($(RETPOLINE_CFLAGS),)
+ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+endif
endif
archscripts: scripts_basic
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 353e20c3f114..886a9115af62 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -439,7 +439,7 @@ setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
struct efi_uga_draw_protocol *uga = NULL, *first_uga;
efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
unsigned long nr_ugas;
- u32 *handles = (u32 *)uga_handle;;
+ u32 *handles = (u32 *)uga_handle;
efi_status_t status = EFI_INVALID_PARAMETER;
int i;
@@ -484,7 +484,7 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
struct efi_uga_draw_protocol *uga = NULL, *first_uga;
efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
unsigned long nr_ugas;
- u64 *handles = (u64 *)uga_handle;;
+ u64 *handles = (u64 *)uga_handle;
efi_status_t status = EFI_INVALID_PARAMETER;
int i;
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index dce7092ab24a..be63330c5511 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8
-.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
+.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
/*
* Push registers and sanitize registers of values that a
* speculation attack might otherwise want to exploit. The
@@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with
* could be put to use in a speculative execution gadget.
* Interleave XOR with PUSH for better uop scheduling:
*/
+ .if \save_ret
+ pushq %rsi /* pt_regs->si */
+ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
+ movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
+ .else
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
+ .endif
pushq \rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq \rax /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */
- xorq %r8, %r8 /* nospec r8 */
+ xorl %r8d, %r8d /* nospec r8 */
pushq %r9 /* pt_regs->r9 */
- xorq %r9, %r9 /* nospec r9 */
+ xorl %r9d, %r9d /* nospec r9 */
pushq %r10 /* pt_regs->r10 */
- xorq %r10, %r10 /* nospec r10 */
+ xorl %r10d, %r10d /* nospec r10 */
pushq %r11 /* pt_regs->r11 */
- xorq %r11, %r11 /* nospec r11*/
+ xorl %r11d, %r11d /* nospec r11*/
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx*/
pushq %rbp /* pt_regs->rbp */
xorl %ebp, %ebp /* nospec rbp*/
pushq %r12 /* pt_regs->r12 */
- xorq %r12, %r12 /* nospec r12*/
+ xorl %r12d, %r12d /* nospec r12*/
pushq %r13 /* pt_regs->r13 */
- xorq %r13, %r13 /* nospec r13*/
+ xorl %r13d, %r13d /* nospec r13*/
pushq %r14 /* pt_regs->r14 */
- xorq %r14, %r14 /* nospec r14*/
+ xorl %r14d, %r14d /* nospec r14*/
pushq %r15 /* pt_regs->r15 */
- xorq %r15, %r15 /* nospec r15*/
+ xorl %r15d, %r15d /* nospec r15*/
UNWIND_HINT_REGS
+ .if \save_ret
+ pushq %rsi /* return address on top of stack */
+ .endif
.endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
@@ -172,12 +181,7 @@ For 32-bit we have the following conventions - kernel is built with
*/
.macro ENCODE_FRAME_POINTER ptregs_offset=0
#ifdef CONFIG_FRAME_POINTER
- .if \ptregs_offset
- leaq \ptregs_offset(%rsp), %rbp
- .else
- mov %rsp, %rbp
- .endif
- orq $0x1, %rbp
+ leaq 1+\ptregs_offset(%rsp), %rbp
#endif
.endm
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 16c2c022540d..6ad064c8cf35 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -252,8 +252,7 @@ ENTRY(__switch_to_asm)
* exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack.
*/
- /* Clobbers %ebx */
- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 8971bd64d515..d5c7f18f79ac 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -364,8 +364,7 @@ ENTRY(__switch_to_asm)
* exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack.
*/
- /* Clobbers %rbx */
- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
@@ -449,9 +448,19 @@ END(irq_entries_start)
*
* The invariant is that, if irq_count != -1, then the IRQ stack is in use.
*/
-.macro ENTER_IRQ_STACK regs=1 old_rsp
+.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
DEBUG_ENTRY_ASSERT_IRQS_OFF
+
+ .if \save_ret
+ /*
+ * If save_ret is set, the original stack contains one additional
+ * entry -- the return address. Therefore, move the address one
+ * entry below %rsp to \old_rsp.
+ */
+ leaq 8(%rsp), \old_rsp
+ .else
movq %rsp, \old_rsp
+ .endif
.if \regs
UNWIND_HINT_REGS base=\old_rsp
@@ -497,6 +506,15 @@ END(irq_entries_start)
.if \regs
UNWIND_HINT_REGS indirect=1
.endif
+
+ .if \save_ret
+ /*
+ * Push the return address to the stack. This return address can
+ * be found at the "real" original RSP, which was offset by 8 at
+ * the beginning of this macro.
+ */
+ pushq -8(\old_rsp)
+ .endif
.endm
/*
@@ -520,27 +538,65 @@ END(irq_entries_start)
.endm
/*
- * Interrupt entry/exit.
- *
- * Interrupt entry points save only callee clobbered registers in fast path.
+ * Interrupt entry helper function.
*
- * Entry runs with interrupts off.
+ * Entry runs with interrupts off. Stack layout at entry:
+ * +----------------------------------------------------+
+ * | regs->ss |
+ * | regs->rsp |
+ * | regs->eflags |
+ * | regs->cs |
+ * | regs->ip |
+ * +----------------------------------------------------+
+ * | regs->orig_ax = ~(interrupt number) |
+ * +----------------------------------------------------+
+ * | return address |
+ * +----------------------------------------------------+
*/
-
-/* 0(%rsp): ~(interrupt number) */
- .macro interrupt func
+ENTRY(interrupt_entry)
+ UNWIND_HINT_FUNC
+ ASM_CLAC
cld
- testb $3, CS-ORIG_RAX(%rsp)
+ testb $3, CS-ORIG_RAX+8(%rsp)
jz 1f
SWAPGS
- call switch_to_thread_stack
+
+ /*
+ * Switch to the thread stack. The IRET frame and orig_ax are
+ * on the stack, as well as the return address. RDI..R12 are
+ * not (yet) on the stack and space has not (yet) been
+ * allocated for them.
+ */
+ pushq %rdi
+
+ /* Need to switch before accessing the thread stack. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+ movq %rsp, %rdi
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+ /*
+ * We have RDI, return address, and orig_ax on the stack on
+ * top of the IRET frame. That means offset=24
+ */
+ UNWIND_HINT_IRET_REGS base=%rdi offset=24
+
+ pushq 7*8(%rdi) /* regs->ss */
+ pushq 6*8(%rdi) /* regs->rsp */
+ pushq 5*8(%rdi) /* regs->eflags */
+ pushq 4*8(%rdi) /* regs->cs */
+ pushq 3*8(%rdi) /* regs->ip */
+ pushq 2*8(%rdi) /* regs->orig_ax */
+ pushq 8(%rdi) /* return address */
+ UNWIND_HINT_FUNC
+
+ movq (%rdi), %rdi
1:
- PUSH_AND_CLEAR_REGS
- ENCODE_FRAME_POINTER
+ PUSH_AND_CLEAR_REGS save_ret=1
+ ENCODE_FRAME_POINTER 8
- testb $3, CS(%rsp)
+ testb $3, CS+8(%rsp)
jz 1f
/*
@@ -548,7 +604,7 @@ END(irq_entries_start)
*
* We need to tell lockdep that IRQs are off. We can't do this until
* we fix gsbase, and we should do it before enter_from_user_mode
- * (which can take locks). Since TRACE_IRQS_OFF idempotent,
+ * (which can take locks). Since TRACE_IRQS_OFF is idempotent,
* the simplest way to handle it is to just call it twice if
* we enter from user mode. There's no reason to optimize this since
* TRACE_IRQS_OFF is a no-op if lockdep is off.
@@ -558,12 +614,15 @@ END(irq_entries_start)
CALL_enter_from_user_mode
1:
- ENTER_IRQ_STACK old_rsp=%rdi
+ ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
/* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF
- call \func /* rdi points to pt_regs */
- .endm
+ ret
+END(interrupt_entry)
+
+
+/* Interrupt entry/exit. */
/*
* The interrupt stubs push (~vector+0x80) onto the stack and
@@ -571,9 +630,10 @@ END(irq_entries_start)
*/
.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
- ASM_CLAC
addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
- interrupt do_IRQ
+ call interrupt_entry
+ UNWIND_HINT_REGS indirect=1
+ call do_IRQ /* rdi points to pt_regs */
/* 0(%rsp): old RSP */
ret_from_intr:
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -766,10 +826,11 @@ END(common_interrupt)
.macro apicinterrupt3 num sym do_sym
ENTRY(\sym)
UNWIND_HINT_IRET_REGS
- ASM_CLAC
pushq $~(\num)
.Lcommon_\sym:
- interrupt \do_sym
+ call interrupt_entry
+ UNWIND_HINT_REGS indirect=1
+ call \do_sym /* rdi points to pt_regs */
jmp ret_from_intr
END(\sym)
.endm
@@ -832,34 +893,6 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
*/
#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
-/*
- * Switch to the thread stack. This is called with the IRET frame and
- * orig_ax on the stack. (That is, RDI..R12 are not on the stack and
- * space has not been allocated for them.)
- */
-ENTRY(switch_to_thread_stack)
- UNWIND_HINT_FUNC
-
- pushq %rdi
- /* Need to switch before accessing the thread stack. */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
- movq %rsp, %rdi
- movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
- UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
-
- pushq 7*8(%rdi) /* regs->ss */
- pushq 6*8(%rdi) /* regs->rsp */
- pushq 5*8(%rdi) /* regs->eflags */
- pushq 4*8(%rdi) /* regs->cs */
- pushq 3*8(%rdi) /* regs->ip */
- pushq 2*8(%rdi) /* regs->orig_ax */
- pushq 8(%rdi) /* return address */
- UNWIND_HINT_FUNC
-
- movq (%rdi), %rdi
- ret
-END(switch_to_thread_stack)
-
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
@@ -875,12 +908,8 @@ ENTRY(\sym)
pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif
- /* Save all registers in pt_regs */
- PUSH_AND_CLEAR_REGS
- ENCODE_FRAME_POINTER
-
.if \paranoid < 2
- testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
+ testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */
jnz .Lfrom_usermode_switch_stack_\@
.endif
@@ -1130,13 +1159,15 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
#endif
/*
- * Switch gs if needed.
+ * Save all registers in pt_regs, and switch gs if needed.
* Use slow, but surefire "are we in kernel?" check.
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/
ENTRY(paranoid_entry)
UNWIND_HINT_FUNC
cld
+ PUSH_AND_CLEAR_REGS save_ret=1
+ ENCODE_FRAME_POINTER 8
movl $1, %ebx
movl $MSR_GS_BASE, %ecx
rdmsr
@@ -1181,12 +1212,14 @@ ENTRY(paranoid_exit)
END(paranoid_exit)
/*
- * Switch gs if needed.
+ * Save all registers in pt_regs, and switch GS if needed.
* Return: EBX=0: came from user mode; EBX=1: otherwise
*/
ENTRY(error_entry)
- UNWIND_HINT_REGS offset=8
+ UNWIND_HINT_FUNC
cld
+ PUSH_AND_CLEAR_REGS save_ret=1
+ ENCODE_FRAME_POINTER 8
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
@@ -1577,8 +1610,6 @@ end_repeat_nmi:
* frame to point back to repeat_nmi.
*/
pushq $-1 /* ORIG_RAX: no syscall to restart */
- PUSH_AND_CLEAR_REGS
- ENCODE_FRAME_POINTER
/*
* Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index fd65e016e413..e811dd9c5e99 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -85,25 +85,25 @@ ENTRY(entry_SYSENTER_compat)
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
- xorq %r8, %r8 /* nospec r8 */
+ xorl %r8d, %r8d /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
- xorq %r9, %r9 /* nospec r9 */
+ xorl %r9d, %r9d /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
- xorq %r10, %r10 /* nospec r10 */
+ xorl %r10d, %r10d /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
- xorq %r11, %r11 /* nospec r11 */
+ xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp (will be overwritten) */
xorl %ebp, %ebp /* nospec rbp */
pushq $0 /* pt_regs->r12 = 0 */
- xorq %r12, %r12 /* nospec r12 */
+ xorl %r12d, %r12d /* nospec r12 */
pushq $0 /* pt_regs->r13 = 0 */
- xorq %r13, %r13 /* nospec r13 */
+ xorl %r13d, %r13d /* nospec r13 */
pushq $0 /* pt_regs->r14 = 0 */
- xorq %r14, %r14 /* nospec r14 */
+ xorl %r14d, %r14d /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
- xorq %r15, %r15 /* nospec r15 */
+ xorl %r15d, %r15d /* nospec r15 */
cld
/*
@@ -224,25 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
pushq %rbp /* pt_regs->cx (stashed in bp) */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
- xorq %r8, %r8 /* nospec r8 */
+ xorl %r8d, %r8d /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
- xorq %r9, %r9 /* nospec r9 */
+ xorl %r9d, %r9d /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
- xorq %r10, %r10 /* nospec r10 */
+ xorl %r10d, %r10d /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
- xorq %r11, %r11 /* nospec r11 */
+ xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp (will be overwritten) */
xorl %ebp, %ebp /* nospec rbp */
pushq $0 /* pt_regs->r12 = 0 */
- xorq %r12, %r12 /* nospec r12 */
+ xorl %r12d, %r12d /* nospec r12 */
pushq $0 /* pt_regs->r13 = 0 */
- xorq %r13, %r13 /* nospec r13 */
+ xorl %r13d, %r13d /* nospec r13 */
pushq $0 /* pt_regs->r14 = 0 */
- xorq %r14, %r14 /* nospec r14 */
+ xorl %r14d, %r14d /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
- xorq %r15, %r15 /* nospec r15 */
+ xorl %r15d, %r15d /* nospec r15 */
/*
* User mode is traced as though IRQs are on, and SYSENTER
@@ -298,9 +298,9 @@ sysret32_from_system_call:
*/
SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
- xorq %r8, %r8
- xorq %r9, %r9
- xorq %r10, %r10
+ xorl %r8d, %r8d
+ xorl %r9d, %r9d
+ xorl %r10d, %r10d
swapgs
sysretl
END(entry_SYSCALL_compat)
@@ -347,10 +347,23 @@ ENTRY(entry_INT80_compat)
*/
movl %eax, %eax
+ /* switch to thread stack expects orig_ax and rdi to be pushed */
pushq %rax /* pt_regs->orig_ax */
+ pushq %rdi /* pt_regs->di */
+
+ /* Need to switch before accessing the thread stack. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+ movq %rsp, %rdi
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+ pushq 6*8(%rdi) /* regs->ss */
+ pushq 5*8(%rdi) /* regs->rsp */
+ pushq 4*8(%rdi) /* regs->eflags */
+ pushq 3*8(%rdi) /* regs->cs */
+ pushq 2*8(%rdi) /* regs->ip */
+ pushq 1*8(%rdi) /* regs->orig_ax */
- /* switch to thread stack expects orig_ax to be pushed */
- call switch_to_thread_stack
+ movq (%rdi), %rdi /* restore %rdi */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
@@ -358,25 +371,25 @@ ENTRY(entry_INT80_compat)
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
- xorq %r8, %r8 /* nospec r8 */
+ xorl %r8d, %r8d /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
- xorq %r9, %r9 /* nospec r9 */
+ xorl %r9d, %r9d /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
- xorq %r10, %r10 /* nospec r10 */
+ xorl %r10d, %r10d /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
- xorq %r11, %r11 /* nospec r11 */
+ xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp */
xorl %ebp, %ebp /* nospec rbp */
pushq %r12 /* pt_regs->r12 */
- xorq %r12, %r12 /* nospec r12 */
+ xorl %r12d, %r12d /* nospec r12 */
pushq %r13 /* pt_regs->r13 */
- xorq %r13, %r13 /* nospec r13 */
+ xorl %r13d, %r13d /* nospec r13 */
pushq %r14 /* pt_regs->r14 */
- xorq %r14, %r14 /* nospec r14 */
+ xorl %r14d, %r14d /* nospec r14 */
pushq %r15 /* pt_regs->r15 */
- xorq %r15, %r15 /* nospec r15 */
+ xorl %r15d, %r15d /* nospec r15 */
cld
/*
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 4d4015ddcf26..c356098b6fb9 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -7,6 +7,8 @@
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
#define _ASM_X86_MACH_DEFAULT_APM_H
+#include <asm/nospec-branch.h>
+
#ifdef APM_ZERO_SEGS
# define APM_DO_ZERO_SEGS \
"pushl %%ds\n\t" \
@@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
+ firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
"=S" (*esi)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
+ firmware_restrict_branch_speculation_end();
}
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
+ firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
"=S" (si)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
+ firmware_restrict_branch_speculation_end();
return error;
}
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 4d111616524b..1908214b9125 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -38,7 +38,4 @@ INDIRECT_THUNK(dx)
INDIRECT_THUNK(si)
INDIRECT_THUNK(di)
INDIRECT_THUNK(bp)
-asmlinkage void __fill_rsb(void);
-asmlinkage void __clear_rsb(void);
-
#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 0dfe4d3f74e2..f41079da38c5 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -213,6 +213,7 @@
#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 85f6ccb80b91..a399c1ebf6f0 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -6,6 +6,7 @@
#include <asm/pgtable.h>
#include <asm/processor-flags.h>
#include <asm/tlb.h>
+#include <asm/nospec-branch.h>
/*
* We map the EFI regions needed for runtime services non-contiguously,
@@ -36,8 +37,18 @@
extern asmlinkage unsigned long efi_call_phys(void *, ...);
-#define arch_efi_call_virt_setup() kernel_fpu_begin()
-#define arch_efi_call_virt_teardown() kernel_fpu_end()
+#define arch_efi_call_virt_setup() \
+({ \
+ kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
+})
+
+#define arch_efi_call_virt_teardown() \
+({ \
+ firmware_restrict_branch_speculation_end(); \
+ kernel_fpu_end(); \
+})
+
/*
* Wrap all the virtual calls in a way that forces the parameters on the stack.
@@ -73,6 +84,7 @@ struct efi_scratch {
efi_sync_low_kernel_mappings(); \
preempt_disable(); \
__kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
\
if (efi_scratch.use_pgd) { \
efi_scratch.prev_cr3 = __read_cr3(); \
@@ -91,6 +103,7 @@ struct efi_scratch {
__flush_tlb_all(); \
} \
\
+ firmware_restrict_branch_speculation_end(); \
__kernel_fpu_end(); \
preempt_enable(); \
})
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dd6f57a54a26..0a9e330b34f0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1464,7 +1464,4 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
#define put_smstate(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
-
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 55520cec8b27..7fb1047d61c7 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -37,7 +37,12 @@ struct cpu_signature {
struct device;
-enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
+enum ucode_state {
+ UCODE_OK = 0,
+ UCODE_UPDATED,
+ UCODE_NFOUND,
+ UCODE_ERROR,
+};
struct microcode_ops {
enum ucode_state (*request_microcode_user) (int cpu,
@@ -54,7 +59,7 @@ struct microcode_ops {
* are being called.
* See also the "Synchronization" section in microcode_core.c.
*/
- int (*apply_microcode) (int cpu);
+ enum ucode_state (*apply_microcode) (int cpu);
int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
};
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index c931b88982a0..1de72ce514cd 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -74,6 +74,7 @@ static inline void *ldt_slot_va(int slot)
return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
#else
BUG();
+ return (void *)fix_to_virt(FIX_HOLE);
#endif
}
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 76b058533e47..d0dabeae0505 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -8,6 +8,50 @@
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * We define a CPP macro such that it can be used from both .S files and
+ * inline assembly. It's possible to do a .macro and then include that
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ */
+
+#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
+#define RSB_FILL_LOOPS 16 /* To avoid underflow */
+
+/*
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version — two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp) \
+ mov $(nr/2), reg; \
+771: \
+ call 772f; \
+773: /* speculation trap */ \
+ pause; \
+ lfence; \
+ jmp 773b; \
+772: \
+ call 774f; \
+775: /* speculation trap */ \
+ pause; \
+ lfence; \
+ jmp 775b; \
+774: \
+ dec reg; \
+ jnz 771b; \
+ add $(BITS_PER_LONG/8) * nr, sp;
+
#ifdef __ASSEMBLY__
/*
@@ -24,6 +68,18 @@
.endm
/*
+ * This should be used immediately before an indirect jump/call. It tells
+ * objtool the subsequent indirect jump/call is vouched safe for retpoline
+ * builds.
+ */
+.macro ANNOTATE_RETPOLINE_SAFE
+ .Lannotate_\@:
+ .pushsection .discard.retpoline_safe
+ _ASM_PTR .Lannotate_\@
+ .popsection
+.endm
+
+/*
* These are the bare retpoline primitives for indirect jmp and call.
* Do not use these directly; they only exist to make the ALTERNATIVE
* invocation below less ugly.
@@ -59,9 +115,9 @@
.macro JMP_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
ANNOTATE_NOSPEC_ALTERNATIVE
- ALTERNATIVE_2 __stringify(jmp *\reg), \
+ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
- __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
#else
jmp *\reg
#endif
@@ -70,18 +126,25 @@
.macro CALL_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
ANNOTATE_NOSPEC_ALTERNATIVE
- ALTERNATIVE_2 __stringify(call *\reg), \
+ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
- __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
#else
call *\reg
#endif
.endm
-/* This clobbers the BX register */
-.macro FILL_RETURN_BUFFER nr:req ftr:req
+ /*
+ * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+ * monstrosity above, manually.
+ */
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
#ifdef CONFIG_RETPOLINE
- ALTERNATIVE "", "call __clear_rsb", \ftr
+ ANNOTATE_NOSPEC_ALTERNATIVE
+ ALTERNATIVE "jmp .Lskip_rsb_\@", \
+ __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
+ \ftr
+.Lskip_rsb_\@:
#endif
.endm
@@ -93,6 +156,12 @@
".long 999b - .\n\t" \
".popsection\n\t"
+#define ANNOTATE_RETPOLINE_SAFE \
+ "999:\n\t" \
+ ".pushsection .discard.retpoline_safe\n\t" \
+ _ASM_PTR " 999b\n\t" \
+ ".popsection\n\t"
+
#if defined(CONFIG_X86_64) && defined(RETPOLINE)
/*
@@ -102,6 +171,7 @@
# define CALL_NOSPEC \
ANNOTATE_NOSPEC_ALTERNATIVE \
ALTERNATIVE( \
+ ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
"call __x86_indirect_thunk_%V[thunk_target]\n", \
X86_FEATURE_RETPOLINE)
@@ -156,25 +226,90 @@ extern char __indirect_thunk_end[];
static inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
- alternative_input("",
- "call __fill_rsb",
- X86_FEATURE_RETPOLINE,
- ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
+ unsigned long loops;
+
+ asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+ ALTERNATIVE("jmp 910f",
+ __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+ X86_FEATURE_RETPOLINE)
+ "910:"
+ : "=r" (loops), ASM_CALL_CONSTRAINT
+ : : "memory" );
#endif
}
+#define alternative_msr_write(_msr, _val, _feature) \
+ asm volatile(ALTERNATIVE("", \
+ "movl %[msr], %%ecx\n\t" \
+ "movl %[val], %%eax\n\t" \
+ "movl $0, %%edx\n\t" \
+ "wrmsr", \
+ _feature) \
+ : : [msr] "i" (_msr), [val] "i" (_val) \
+ : "eax", "ecx", "edx", "memory")
+
static inline void indirect_branch_prediction_barrier(void)
{
- asm volatile(ALTERNATIVE("",
- "movl %[msr], %%ecx\n\t"
- "movl %[val], %%eax\n\t"
- "movl $0, %%edx\n\t"
- "wrmsr",
- X86_FEATURE_USE_IBPB)
- : : [msr] "i" (MSR_IA32_PRED_CMD),
- [val] "i" (PRED_CMD_IBPB)
- : "eax", "ecx", "edx", "memory");
+ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+ X86_FEATURE_USE_IBPB);
}
+/*
+ * With retpoline, we must use IBRS to restrict branch prediction
+ * before calling into firmware.
+ *
+ * (Implemented as CPP macros due to header hell.)
+ */
+#define firmware_restrict_branch_speculation_start() \
+do { \
+ preempt_disable(); \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
+ X86_FEATURE_USE_IBRS_FW); \
+} while (0)
+
+#define firmware_restrict_branch_speculation_end() \
+do { \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
+ X86_FEATURE_USE_IBRS_FW); \
+ preempt_enable(); \
+} while (0)
+
#endif /* __ASSEMBLY__ */
+
+/*
+ * Below is used in the eBPF JIT compiler and emits the byte sequence
+ * for the following assembly:
+ *
+ * With retpolines configured:
+ *
+ * callq do_rop
+ * spec_trap:
+ * pause
+ * lfence
+ * jmp spec_trap
+ * do_rop:
+ * mov %rax,(%rsp)
+ * retq
+ *
+ * Without retpolines configured:
+ *
+ * jmp *%rax
+ */
+#ifdef CONFIG_RETPOLINE
+# define RETPOLINE_RAX_BPF_JIT_SIZE 17
+# define RETPOLINE_RAX_BPF_JIT() \
+ EMIT1_off32(0xE8, 7); /* callq do_rop */ \
+ /* spec_trap: */ \
+ EMIT2(0xF3, 0x90); /* pause */ \
+ EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
+ EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
+ /* do_rop: */ \
+ EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
+ EMIT1(0xC3); /* retq */
+#else
+# define RETPOLINE_RAX_BPF_JIT_SIZE 2
+# define RETPOLINE_RAX_BPF_JIT() \
+ EMIT2(0xFF, 0xE0); /* jmp *%rax */
+#endif
+
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 554841fab717..c83a2f418cea 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -7,6 +7,7 @@
#ifdef CONFIG_PARAVIRT
#include <asm/pgtable_types.h>
#include <asm/asm.h>
+#include <asm/nospec-branch.h>
#include <asm/paravirt_types.h>
@@ -879,23 +880,27 @@ extern void default_banner(void);
#define INTERRUPT_RETURN \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
+ ANNOTATE_RETPOLINE_SAFE; \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#ifdef CONFIG_X86_32
#define GET_CR0_INTO_EAX \
push %ecx; push %edx; \
+ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
pop %edx; pop %ecx
#else /* !CONFIG_X86_32 */
@@ -917,21 +922,25 @@ extern void default_banner(void);
*/
#define SWAPGS \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
+ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
)
#define GET_CR2_INTO_RAX \
- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
+ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
#define USERGS_SYSRET64 \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
CLBR_NONE, \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+ ANNOTATE_RETPOLINE_SAFE; \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
#ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#endif
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index f624f1f10316..180bc0bff0fb 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -43,6 +43,7 @@
#include <asm/desc_defs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable_types.h>
+#include <asm/nospec-branch.h>
struct page;
struct thread_struct;
@@ -392,7 +393,9 @@ int paravirt_disable_iospace(void);
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
-#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
+#define PARAVIRT_CALL \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%c[paravirt_opptr];"
/*
* These macros are intended to wrap calls through one of the paravirt
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 63c2552b6b65..b444d83cfc95 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
pmdval_t v = native_pmd_val(pmd);
- return __pmd(v | set);
+ return native_make_pmd(v | set);
}
static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
{
pmdval_t v = native_pmd_val(pmd);
- return __pmd(v & ~clear);
+ return native_make_pmd(v & ~clear);
}
static inline pmd_t pmd_mkold(pmd_t pmd)
@@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
{
pudval_t v = native_pud_val(pud);
- return __pud(v | set);
+ return native_make_pud(v | set);
}
static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
{
pudval_t v = native_pud_val(pud);
- return __pud(v & ~clear);
+ return native_make_pud(v & ~clear);
}
static inline pud_t pud_mkold(pud_t pud)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 3696398a9475..246f15b4e64c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud)
#else
#include <asm-generic/pgtable-nopud.h>
+static inline pud_t native_make_pud(pudval_t val)
+{
+ return (pud_t) { .p4d.pgd = native_make_pgd(val) };
+}
+
static inline pudval_t native_pud_val(pud_t pud)
{
return native_pgd_val(pud.p4d.pgd);
@@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
#else
#include <asm-generic/pgtable-nopmd.h>
+static inline pmd_t native_make_pmd(pmdval_t val)
+{
+ return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
+}
+
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return native_pgd_val(pmd.pud.p4d.pgd);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 1bd9ed87606f..b0ccd4847a58 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -977,4 +977,5 @@ bool xen_set_default_idle(void);
void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code);
+void microcode_check(void);
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index 4e44250e7d0d..d65171120e90 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -67,13 +67,13 @@ static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
- r->refs.counter, "er", i, "%0", e);
+ r->refs.counter, "er", i, "%0", e, "cx");
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
- r->refs.counter, "%0", e);
+ r->refs.counter, "%0", e, "cx");
}
static __always_inline __must_check
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index f91c365e57c3..4914a3e7c803 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -2,8 +2,7 @@
#ifndef _ASM_X86_RMWcc
#define _ASM_X86_RMWcc
-#define __CLOBBERS_MEM "memory"
-#define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx"
+#define __CLOBBERS_MEM(clb...) "memory", ## clb
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
@@ -40,18 +39,19 @@ do { \
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
- __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM)
+ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
-#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \
+#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\
__GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \
- __CLOBBERS_MEM_CC_CX)
+ __CLOBBERS_MEM(clobbers))
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \
- __CLOBBERS_MEM, vcon (val))
+ __CLOBBERS_MEM(), vcon (val))
-#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \
+#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \
+ clobbers...) \
__GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \
- __CLOBBERS_MEM_CC_CX, vcon (val))
+ __CLOBBERS_MEM(clobbers), vcon (val))
#endif /* _ASM_X86_RMWcc */
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 197c2e6c7376..099414345865 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -241,24 +241,24 @@
#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
struct hv_reenlightenment_control {
- u64 vector:8;
- u64 reserved1:8;
- u64 enabled:1;
- u64 reserved2:15;
- u64 target_vp:32;
+ __u64 vector:8;
+ __u64 reserved1:8;
+ __u64 enabled:1;
+ __u64 reserved2:15;
+ __u64 target_vp:32;
};
#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
struct hv_tsc_emulation_control {
- u64 enabled:1;
- u64 reserved:63;
+ __u64 enabled:1;
+ __u64 reserved:63;
};
struct hv_tsc_emulation_status {
- u64 inprogress:1;
- u64 reserved:63;
+ __u64 inprogress:1;
+ __u64 reserved:63;
};
#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 7a2ade4aa235..6cfa9c8cb7d6 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -26,6 +26,7 @@
#define KVM_FEATURE_PV_EOI 6
#define KVM_FEATURE_PV_UNHALT 7
#define KVM_FEATURE_PV_TLB_FLUSH 9
+#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8ad2e410974f..7c5538769f7e 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1603,7 +1603,7 @@ static void __init delay_with_tsc(void)
do {
rep_nop();
now = rdtsc();
- } while ((now - start) < 40000000000UL / HZ &&
+ } while ((now - start) < 40000000000ULL / HZ &&
time_before_eq(jiffies, end));
}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 3cc471beb50b..bb6f7a2148d7 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -134,21 +134,40 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
{
struct apic_chip_data *apicd = apic_chip_data(irqd);
struct irq_desc *desc = irq_data_to_desc(irqd);
+ bool managed = irqd_affinity_is_managed(irqd);
lockdep_assert_held(&vector_lock);
trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
apicd->cpu);
- /* Setup the vector move, if required */
- if (apicd->vector && cpu_online(apicd->cpu)) {
+ /*
+ * If there is no vector associated or if the associated vector is
+ * the shutdown vector, which is associated to make PCI/MSI
+ * shutdown mode work, then there is nothing to release. Clear out
+ * prev_vector for this and the offlined target case.
+ */
+ apicd->prev_vector = 0;
+ if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
+ goto setnew;
+ /*
+ * If the target CPU of the previous vector is online, then mark
+ * the vector as move in progress and store it for cleanup when the
+ * first interrupt on the new vector arrives. If the target CPU is
+ * offline then the regular release mechanism via the cleanup
+ * vector is not possible and the vector can be immediately freed
+ * in the underlying matrix allocator.
+ */
+ if (cpu_online(apicd->cpu)) {
apicd->move_in_progress = true;
apicd->prev_vector = apicd->vector;
apicd->prev_cpu = apicd->cpu;
} else {
- apicd->prev_vector = 0;
+ irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
+ managed);
}
+setnew:
apicd->vector = newvec;
apicd->cpu = newcpu;
BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index d71c8b54b696..bfca937bdcc3 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -300,6 +300,15 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
}
+
+ /*
+ * Retpoline means the kernel is safe because it has no indirect
+ * branches. But firmware isn't, so use IBRS to protect that.
+ */
+ if (boot_cpu_has(X86_FEATURE_IBRS)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+ pr_info("Enabling Restricted Speculation for firmware calls\n");
+ }
}
#undef pr_fmt
@@ -326,8 +335,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return sprintf(buf, "Not affected\n");
- return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
spectre_v2_module_string());
}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 824aee0117bb..348cf4821240 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1749,3 +1749,33 @@ static int __init init_cpu_syscore(void)
return 0;
}
core_initcall(init_cpu_syscore);
+
+/*
+ * The microcode loader calls this upon late microcode load to recheck features,
+ * only when microcode has been updated. Caller holds microcode_mutex and CPU
+ * hotplug lock.
+ */
+void microcode_check(void)
+{
+ struct cpuinfo_x86 info;
+
+ perf_check_microcode();
+
+ /* Reload CPUID max function as it might've changed. */
+ info.cpuid_level = cpuid_eax(0);
+
+ /*
+ * Copy all capability leafs to pick up the synthetic ones so that
+ * memcmp() below doesn't fail on that. The ones coming from CPUID will
+ * get overwritten in get_cpu_cap().
+ */
+ memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
+
+ get_cpu_cap(&info);
+
+ if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
+ return;
+
+ pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
+ pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index bdab7d2f51af..fca759d272a1 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -1804,6 +1804,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
goto out_common_fail;
}
closid = ret;
+ ret = 0;
rdtgrp->closid = closid;
list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 330b8462d426..a998e1a7d46f 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
return patch_size;
}
-static int apply_microcode_amd(int cpu)
+static enum ucode_state apply_microcode_amd(int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_amd *mc_amd;
@@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu)
p = find_patch(cpu);
if (!p)
- return 0;
+ return UCODE_NFOUND;
mc_amd = p->data;
uci->mc = p->data;
@@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu)
if (rev >= mc_amd->hdr.patch_id) {
c->microcode = rev;
uci->cpu_sig.rev = rev;
- return 0;
+ return UCODE_OK;
}
if (__apply_microcode_amd(mc_amd)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
- return -1;
+ return UCODE_ERROR;
}
pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
mc_amd->hdr.patch_id);
@@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu)
uci->cpu_sig.rev = mc_amd->hdr.patch_id;
c->microcode = mc_amd->hdr.patch_id;
- return 0;
+ return UCODE_UPDATED;
}
static int install_equiv_cpu_table(const u8 *buf)
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 319dd65f98a2..aa1b9a422f2b 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -374,7 +374,7 @@ static int collect_cpu_info(int cpu)
}
struct apply_microcode_ctx {
- int err;
+ enum ucode_state err;
};
static void apply_microcode_local(void *arg)
@@ -489,31 +489,30 @@ static void __exit microcode_dev_exit(void)
/* fake device for request_firmware */
static struct platform_device *microcode_pdev;
-static int reload_for_cpu(int cpu)
+static enum ucode_state reload_for_cpu(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
enum ucode_state ustate;
- int err = 0;
if (!uci->valid)
- return err;
+ return UCODE_OK;
ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
- if (ustate == UCODE_OK)
- apply_microcode_on_target(cpu);
- else
- if (ustate == UCODE_ERROR)
- err = -EINVAL;
- return err;
+ if (ustate != UCODE_OK)
+ return ustate;
+
+ return apply_microcode_on_target(cpu);
}
static ssize_t reload_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
+ enum ucode_state tmp_ret = UCODE_OK;
+ bool do_callback = false;
unsigned long val;
+ ssize_t ret = 0;
int cpu;
- ssize_t ret = 0, tmp_ret;
ret = kstrtoul(buf, 0, &val);
if (ret)
@@ -526,15 +525,21 @@ static ssize_t reload_store(struct device *dev,
mutex_lock(&microcode_mutex);
for_each_online_cpu(cpu) {
tmp_ret = reload_for_cpu(cpu);
- if (tmp_ret != 0)
+ if (tmp_ret > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu);
- /* save retval of the first encountered reload error */
- if (!ret)
- ret = tmp_ret;
+ /* set retval for the first encountered reload error */
+ if (!ret)
+ ret = -EINVAL;
+ }
+
+ if (tmp_ret == UCODE_UPDATED)
+ do_callback = true;
}
- if (!ret)
- perf_check_microcode();
+
+ if (!ret && do_callback)
+ microcode_check();
+
mutex_unlock(&microcode_mutex);
put_online_cpus();
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index a15db2b4e0d6..923054a6b760 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -772,7 +772,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
return 0;
}
-static int apply_microcode_intel(int cpu)
+static enum ucode_state apply_microcode_intel(int cpu)
{
struct microcode_intel *mc;
struct ucode_cpu_info *uci;
@@ -782,7 +782,7 @@ static int apply_microcode_intel(int cpu)
/* We should bind the task to the CPU */
if (WARN_ON(raw_smp_processor_id() != cpu))
- return -1;
+ return UCODE_ERROR;
uci = ucode_cpu_info + cpu;
mc = uci->mc;
@@ -790,7 +790,7 @@ static int apply_microcode_intel(int cpu)
/* Look for a newer patch in our cache: */
mc = find_patch(uci);
if (!mc)
- return 0;
+ return UCODE_NFOUND;
}
/* write microcode via MSR 0x79 */
@@ -801,7 +801,7 @@ static int apply_microcode_intel(int cpu)
if (rev != mc->hdr.rev) {
pr_err("CPU%d update to revision 0x%x failed\n",
cpu, mc->hdr.rev);
- return -1;
+ return UCODE_ERROR;
}
if (rev != prev_rev) {
@@ -818,7 +818,7 @@ static int apply_microcode_intel(int cpu)
uci->cpu_sig.rev = rev;
c->microcode = rev;
- return 0;
+ return UCODE_UPDATED;
}
static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 04a625f0fcda..0f545b3cf926 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -23,6 +23,7 @@
#include <asm/nops.h>
#include "../entry/calling.h"
#include <asm/export.h>
+#include <asm/nospec-branch.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@@ -134,6 +135,7 @@ ENTRY(secondary_startup_64)
/* Ensure I am executing from virtual addresses */
movq $1f, %rax
+ ANNOTATE_RETPOLINE_SAFE
jmp *%rax
1:
UNWIND_HINT_EMPTY
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 4e37d1a851a6..bc1a27280c4b 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -49,7 +49,7 @@
static int kvmapf = 1;
-static int parse_no_kvmapf(char *arg)
+static int __init parse_no_kvmapf(char *arg)
{
kvmapf = 0;
return 0;
@@ -58,7 +58,7 @@ static int parse_no_kvmapf(char *arg)
early_param("no-kvmapf", parse_no_kvmapf);
static int steal_acc = 1;
-static int parse_no_stealacc(char *arg)
+static int __init parse_no_stealacc(char *arg)
{
steal_acc = 0;
return 0;
@@ -67,7 +67,7 @@ static int parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
static int kvmclock_vsyscall = 1;
-static int parse_no_kvmclock_vsyscall(char *arg)
+static int __init parse_no_kvmclock_vsyscall(char *arg)
{
kvmclock_vsyscall = 0;
return 0;
@@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void)
#endif
pa |= KVM_ASYNC_PF_ENABLED;
- /* Async page fault support for L1 hypervisor is optional */
- if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
- (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
- wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
+ if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
+ pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
+
+ wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
__this_cpu_write(apf_reason.enabled, 1);
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
smp_processor_id());
@@ -545,7 +545,8 @@ static void __init kvm_guest_init(void)
pv_time_ops.steal_clock = kvm_steal_clock;
}
- if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH))
+ if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
+ !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -633,7 +634,8 @@ static __init int kvm_setup_pv_tlb_flush(void)
{
int cpu;
- if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) {
+ if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
+ !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9eee25d07586..ff99e2b6fc54 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1437,6 +1437,7 @@ static void remove_siblinginfo(int cpu)
cpumask_clear(topology_sibling_cpumask(cpu));
cpumask_clear(topology_core_cpumask(cpu));
c->cpu_core_id = 0;
+ c->booted_cores = 0;
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
recompute_smt_state();
}
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 1f9188f5357c..feb28fee6cea 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -5,7 +5,6 @@
#include <asm/unwind.h>
#include <asm/orc_types.h>
#include <asm/orc_lookup.h>
-#include <asm/sections.h>
#define orc_warn(fmt, ...) \
printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
@@ -148,7 +147,7 @@ static struct orc_entry *orc_find(unsigned long ip)
}
/* vmlinux .init slow lookup: */
- if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext)
+ if (init_kernel_text(ip))
return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
__stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a0c5a69bc7c4..b671fc2d0422 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -607,7 +607,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
(1 << KVM_FEATURE_PV_EOI) |
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
(1 << KVM_FEATURE_PV_UNHALT) |
- (1 << KVM_FEATURE_PV_TLB_FLUSH);
+ (1 << KVM_FEATURE_PV_TLB_FLUSH) |
+ (1 << KVM_FEATURE_ASYNC_PF_VMEXIT);
if (sched_info_on())
entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 924ac8ce9d50..cc5fe7a50dde 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2165,7 +2165,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
*/
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
- kvm_lapic_reset(vcpu, false);
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
return 0;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 46ff304140c7..f551962ac294 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3029,7 +3029,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
return RET_PF_RETRY;
}
- return -EFAULT;
+ return RET_PF_EMULATE;
}
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b3e488a74828..cbd7ab74952e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,6 +49,7 @@
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
+#include <asm/microcode.h>
#include <asm/nospec-branch.h>
#include <asm/virtext.h>
@@ -300,6 +301,8 @@ module_param(vgif, int, 0444);
static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
module_param(sev, int, 0444);
+static u8 rsm_ins_bytes[] = "\x0f\xaa";
+
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -1383,6 +1386,7 @@ static void init_vmcb(struct vcpu_svm *svm)
set_intercept(svm, INTERCEPT_SKINIT);
set_intercept(svm, INTERCEPT_WBINVD);
set_intercept(svm, INTERCEPT_XSETBV);
+ set_intercept(svm, INTERCEPT_RSM);
if (!kvm_mwait_in_guest()) {
set_intercept(svm, INTERCEPT_MONITOR);
@@ -3699,6 +3703,12 @@ static int emulate_on_interception(struct vcpu_svm *svm)
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
+static int rsm_interception(struct vcpu_svm *svm)
+{
+ return x86_emulate_instruction(&svm->vcpu, 0, 0,
+ rsm_ins_bytes, 2) == EMULATE_DONE;
+}
+
static int rdpmc_interception(struct vcpu_svm *svm)
{
int err;
@@ -4541,7 +4551,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_MWAIT] = mwait_interception,
[SVM_EXIT_XSETBV] = xsetbv_interception,
[SVM_EXIT_NPF] = npf_interception,
- [SVM_EXIT_RSM] = emulate_on_interception,
+ [SVM_EXIT_RSM] = rsm_interception,
[SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
[SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
};
@@ -5355,7 +5365,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken.
*/
if (svm->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile (
"push %%" _ASM_BP "; \n\t"
@@ -5464,11 +5474,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it.
*/
- if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
- rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (svm->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
@@ -6236,16 +6246,18 @@ e_free:
static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
+ void __user *measure = (void __user *)(uintptr_t)argp->data;
struct kvm_sev_info *sev = &kvm->arch.sev_info;
struct sev_data_launch_measure *data;
struct kvm_sev_launch_measure params;
+ void __user *p = NULL;
void *blob = NULL;
int ret;
if (!sev_guest(kvm))
return -ENOTTY;
- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ if (copy_from_user(&params, measure, sizeof(params)))
return -EFAULT;
data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -6256,17 +6268,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!params.len)
goto cmd;
- if (params.uaddr) {
+ p = (void __user *)(uintptr_t)params.uaddr;
+ if (p) {
if (params.len > SEV_FW_BLOB_MAX_SIZE) {
ret = -EINVAL;
goto e_free;
}
- if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
ret = -ENOMEM;
blob = kmalloc(params.len, GFP_KERNEL);
if (!blob)
@@ -6290,13 +6298,13 @@ cmd:
goto e_free_blob;
if (blob) {
- if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
+ if (copy_to_user(p, blob, params.len))
ret = -EFAULT;
}
done:
params.len = data->len;
- if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
+ if (copy_to_user(measure, &params, sizeof(params)))
ret = -EFAULT;
e_free_blob:
kfree(blob);
@@ -6597,7 +6605,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
struct page **pages;
void *blob, *hdr;
unsigned long n;
- int ret;
+ int ret, offset;
if (!sev_guest(kvm))
return -ENOTTY;
@@ -6623,6 +6631,10 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!data)
goto e_unpin_memory;
+ offset = params.guest_uaddr & (PAGE_SIZE - 1);
+ data->guest_address = __sme_page_pa(pages[0]) + offset;
+ data->guest_len = params.guest_len;
+
blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
if (IS_ERR(blob)) {
ret = PTR_ERR(blob);
@@ -6637,8 +6649,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
ret = PTR_ERR(hdr);
goto e_free_blob;
}
- data->trans_address = __psp_pa(blob);
- data->trans_len = params.trans_len;
+ data->hdr_address = __psp_pa(hdr);
+ data->hdr_len = params.hdr_len;
data->handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3dec126aa302..cab6ea1f8be5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -51,6 +51,7 @@
#include <asm/apic.h>
#include <asm/irq_remapping.h>
#include <asm/mmu_context.h>
+#include <asm/microcode.h>
#include <asm/nospec-branch.h>
#include "trace.h"
@@ -4485,7 +4486,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC);
hw_cr4 &= ~X86_CR4_UMIP;
- } else
+ } else if (!is_guest_mode(vcpu) ||
+ !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC);
@@ -9452,7 +9454,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken.
*/
if (vmx->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
vmx->__launched = vmx->loaded_vmcs->launched;
asm(
@@ -9587,11 +9589,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it.
*/
- if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
- rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (vmx->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
@@ -11199,7 +11201,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (ret)
return ret;
- if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
+ /*
+ * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
+ * by event injection, halt vcpu.
+ */
+ if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
+ !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK))
return kvm_vcpu_halt(vcpu);
vmx->nested.nested_run_pending = 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c8a0b545ac20..96edda878dbf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7975,6 +7975,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
kvm_vcpu_reset(vcpu, false);
+ kvm_lapic_reset(vcpu, false);
kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
return 0;
@@ -8460,10 +8461,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
return r;
}
- if (!size) {
- r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
- WARN_ON(r < 0);
- }
+ if (!size)
+ vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
return 0;
}
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 91e9700cc6dc..25a972c61b0a 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -28,7 +28,6 @@ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RETPOLINE) += retpoline.o
-OBJECT_FILES_NON_STANDARD_retpoline.o :=y
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 480edc3a5e03..c909961e678a 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -7,7 +7,6 @@
#include <asm/alternative-asm.h>
#include <asm/export.h>
#include <asm/nospec-branch.h>
-#include <asm/bitsperlong.h>
.macro THUNK reg
.section .text.__x86.indirect_thunk
@@ -47,58 +46,3 @@ GENERATE_THUNK(r13)
GENERATE_THUNK(r14)
GENERATE_THUNK(r15)
#endif
-
-/*
- * Fill the CPU return stack buffer.
- *
- * Each entry in the RSB, if used for a speculative 'ret', contains an
- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
- *
- * This is required in various cases for retpoline and IBRS-based
- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
- * eliminate potentially bogus entries from the RSB, and sometimes
- * purely to ensure that it doesn't get empty, which on some CPUs would
- * allow predictions from other (unwanted!) sources to be used.
- *
- * Google experimented with loop-unrolling and this turned out to be
- * the optimal version - two calls, each with their own speculation
- * trap should their return address end up getting used, in a loop.
- */
-.macro STUFF_RSB nr:req sp:req
- mov $(\nr / 2), %_ASM_BX
- .align 16
-771:
- call 772f
-773: /* speculation trap */
- pause
- lfence
- jmp 773b
- .align 16
-772:
- call 774f
-775: /* speculation trap */
- pause
- lfence
- jmp 775b
- .align 16
-774:
- dec %_ASM_BX
- jnz 771b
- add $((BITS_PER_LONG/8) * \nr), \sp
-.endm
-
-#define RSB_FILL_LOOPS 16 /* To avoid underflow */
-
-ENTRY(__fill_rsb)
- STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
- ret
-END(__fill_rsb)
-EXPORT_SYMBOL_GPL(__fill_rsb)
-
-#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
-
-ENTRY(__clear_rsb)
- STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
- ret
-END(__clear_rsb)
-EXPORT_SYMBOL_GPL(__clear_rsb)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 800de815519c..c88573d90f3e 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1248,10 +1248,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
tsk = current;
mm = tsk->mm;
- /*
- * Detect and handle instructions that would cause a page fault for
- * both a tracked kernel page and a userspace page.
- */
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 01f682cf77a8..40a6085063d6 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -15,6 +15,7 @@
#include <asm/page.h>
#include <asm/processor-flags.h>
#include <asm/msr-index.h>
+#include <asm/nospec-branch.h>
.text
.code64
@@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute)
movq %rax, %r8 /* Workarea encryption routine */
addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
+ ANNOTATE_RETPOLINE_SAFE
call *%rax /* Call the encryption routine */
pop %r12
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 4923d92f918d..45e4eb5bcbb2 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -13,6 +13,7 @@
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
+#include <asm/nospec-branch.h>
#include <linux/bpf.h>
/*
@@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT2(0x89, 0xD2); /* mov edx, edx */
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries));
-#define OFFSET1 43 /* number of bytes to jump */
+#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt;
@@ -299,7 +300,7 @@ static void emit_bpf_tail_call(u8 **pprog)
*/
EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 32
+#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
@@ -313,7 +314,7 @@ static void emit_bpf_tail_call(u8 **pprog)
* goto out;
*/
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
-#define OFFSET3 10
+#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JE, OFFSET3); /* je out */
label3 = cnt;
@@ -326,7 +327,7 @@ static void emit_bpf_tail_call(u8 **pprog)
* rdi == ctx (1st arg)
* rax == prog->bpf_func + prologue_size
*/
- EMIT2(0xFF, 0xE0); /* jmp rax */
+ RETPOLINE_RAX_BPF_JIT();
/* out: */
BUILD_BUG_ON(cnt - label1 != OFFSET1);
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 174c59774cc9..a7a7677265b6 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -460,7 +460,7 @@ static int nmi_setup(void)
goto fail;
for_each_possible_cpu(cpu) {
- if (!cpu)
+ if (!IS_ENABLED(CONFIG_SMP) || !cpu)
continue;
memcpy(per_cpu(cpu_msrs, cpu).counters,
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index de53bd15df5a..24bb7598774e 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -102,7 +102,7 @@ ENTRY(startup_32)
* don't we'll eventually crash trying to execute encrypted
* instructions.
*/
- bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
+ btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
jnc .Ldone
movl $MSR_K8_SYSCFG, %ecx
rdmsr
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 623720a11143..732631ce250f 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -16,6 +16,7 @@
*/
#include <linux/dma-contiguous.h>
+#include <linux/dma-direct.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/mm.h>
@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
unsigned long ret;
- unsigned long uncached = 0;
+ unsigned long uncached;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
if (!page)
return NULL;
- ret = (unsigned long)page_address(page);
+ *handle = phys_to_dma(dev, page_to_phys(page));
- /* We currently don't support coherent memory outside KSEG */
+#ifdef CONFIG_MMU
+ if (PageHighMem(page)) {
+ void *p;
+ p = dma_common_contiguous_remap(page, size, VM_MAP,
+ pgprot_noncached(PAGE_KERNEL),
+ __builtin_return_address(0));
+ if (!p) {
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
+ }
+ return p;
+ }
+#endif
+ ret = (unsigned long)page_address(page);
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
- *handle = virt_to_bus((void *)ret);
__invalidate_dcache_range(ret, size);
return (void *)uncached;
@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- unsigned long addr = (unsigned long)vaddr +
- XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
- struct page *page = virt_to_page(addr);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
- addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
+ unsigned long addr = (unsigned long)vaddr;
+ struct page *page;
+
+ if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
+ addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
+ addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+ page = virt_to_page(addr);
+ } else {
+#ifdef CONFIG_MMU
+ dma_common_free_remap(vaddr, size, VM_MAP);
+#endif
+ page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
+ }
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index d776ec0d7b22..34aead7dcb48 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -79,19 +79,75 @@ void __init zones_init(void)
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
+#ifdef CONFIG_HIGHMEM
+static void __init free_area_high(unsigned long pfn, unsigned long end)
+{
+ for (; pfn < end; pfn++)
+ free_highmem_page(pfn_to_page(pfn));
+}
+
+static void __init free_highpages(void)
+{
+ unsigned long max_low = max_low_pfn;
+ struct memblock_region *mem, *res;
+
+ reset_all_zones_managed_pages();
+ /* set highmem page free */
+ for_each_memblock(memory, mem) {
+ unsigned long start = memblock_region_memory_base_pfn(mem);
+ unsigned long end = memblock_region_memory_end_pfn(mem);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ if (memblock_is_nomap(mem))
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ /* Find and exclude any reserved regions */
+ for_each_memblock(reserved, res) {
+ unsigned long res_start, res_end;
+
+ res_start = memblock_region_reserved_base_pfn(res);
+ res_end = memblock_region_reserved_end_pfn(res);
+
+ if (res_end < start)
+ continue;
+ if (res_start < start)
+ res_start = start;
+ if (res_start > end)
+ res_start = end;
+ if (res_end > end)
+ res_end = end;
+ if (res_start != start)
+ free_area_high(start, res_start);
+ start = res_end;
+ if (start == end)
+ break;
+ }
+
+ /* And now free anything which remains */
+ if (start < end)
+ free_area_high(start, end);
+ }
+}
+#else
+static void __init free_highpages(void)
+{
+}
+#endif
+
/*
* Initialize memory pages.
*/
void __init mem_init(void)
{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- reset_all_zones_managed_pages();
- for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
-#endif
+ free_highpages();
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 9ed51d0c6b1d..e4929eec547f 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -490,7 +490,7 @@ static int opal_discovery0_end(struct opal_dev *dev)
if (!found_com_id) {
pr_debug("Could not find OPAL comid for device. Returning early\n");
- return -EOPNOTSUPP;;
+ return -EOPNOTSUPP;
}
dev->comid = comid;
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index 1f4e25f10049..598906b1e28d 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
pr_devel("sinfo %u: Direct signer is key %x\n",
sinfo->index, key_serial(key));
x509 = NULL;
+ sig = sinfo->sig;
goto matched;
}
if (PTR_ERR(key) != -ENOKEY)
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 39e6de0c2761..97c77f66b20d 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -270,7 +270,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
sinfo->index);
return 0;
}
- ret = public_key_verify_signature(p->pub, p->sig);
+ ret = public_key_verify_signature(p->pub, x509->sig);
if (ret < 0)
return ret;
x509->signer = p;
@@ -366,8 +366,7 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7,
*
* (*) -EBADMSG if some part of the message was invalid, or:
*
- * (*) 0 if no signature chains were found to be blacklisted or to contain
- * unsupported crypto, or:
+ * (*) 0 if a signature chain passed verification, or:
*
* (*) -EKEYREJECTED if a blacklisted key was encountered, or:
*
@@ -423,8 +422,11 @@ int pkcs7_verify(struct pkcs7_message *pkcs7,
for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
ret = pkcs7_verify_one(pkcs7, sinfo);
- if (sinfo->blacklisted && actual_ret == -ENOPKG)
- actual_ret = -EKEYREJECTED;
+ if (sinfo->blacklisted) {
+ if (actual_ret == -ENOPKG)
+ actual_ret = -EKEYREJECTED;
+ continue;
+ }
if (ret < 0) {
if (ret == -ENOPKG) {
sinfo->unsupported_crypto = true;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index de996586762a..e929fe1e4106 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -79,9 +79,11 @@ int public_key_verify_signature(const struct public_key *pkey,
BUG_ON(!pkey);
BUG_ON(!sig);
- BUG_ON(!sig->digest);
BUG_ON(!sig->s);
+ if (!sig->digest)
+ return -ENOPKG;
+
alg_name = sig->pkey_algo;
if (strcmp(sig->pkey_algo, "rsa") == 0) {
/* The data wangled by the RSA algorithm is typically padded
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 86fb68508952..7c93c7728454 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -67,8 +67,9 @@ __setup("ca_keys=", ca_keys_setup);
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a
* matching parent certificate in the trusted list, -EKEYREJECTED if the
- * signature check fails or the key is blacklisted and some other error if
- * there is a matching certificate but the signature check cannot be performed.
+ * signature check fails or the key is blacklisted, -ENOPKG if the signature
+ * uses unsupported crypto, or some other error if there is a matching
+ * certificate but the signature check cannot be performed.
*/
int restrict_link_by_signature(struct key *dest_keyring,
const struct key_type *type,
@@ -88,6 +89,8 @@ int restrict_link_by_signature(struct key *dest_keyring,
return -EOPNOTSUPP;
sig = payload->data[asym_auth];
+ if (!sig)
+ return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1])
return -ENOKEY;
@@ -139,6 +142,8 @@ static int key_or_keyring_common(struct key *dest_keyring,
return -EOPNOTSUPP;
sig = payload->data[asym_auth];
+ if (!sig)
+ return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1])
return -ENOKEY;
@@ -222,9 +227,9 @@ static int key_or_keyring_common(struct key *dest_keyring,
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list,
- * -EKEYREJECTED if the signature check fails, and some other error if
- * there is a matching certificate but the signature check cannot be
- * performed.
+ * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
+ * unsupported crypto, or some other error if there is a matching certificate
+ * but the signature check cannot be performed.
*/
int restrict_link_by_key_or_keyring(struct key *dest_keyring,
const struct key_type *type,
@@ -249,9 +254,9 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring,
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list,
- * -EKEYREJECTED if the signature check fails, and some other error if
- * there is a matching certificate but the signature check cannot be
- * performed.
+ * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
+ * unsupported crypto, or some other error if there is a matching certificate
+ * but the signature check cannot be performed.
*/
int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring,
const struct key_type *type,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 4d46003c46cf..cdaeeea7999c 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -630,7 +630,7 @@ static int sysc_init_dts_quirks(struct sysc *ddata)
for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
prop = of_get_property(np, sysc_dts_quirks[i].name, &len);
if (!prop)
- break;
+ continue;
ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
}
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 4d1dc8b46877..f95b9c75175b 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
size_t count)
{
int size = 0;
- int expected;
+ u32 expected;
if (!chip)
return -EBUSY;
@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
}
expected = be32_to_cpu(*(__be32 *)(buf + 2));
- if (expected > count) {
+ if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 76df4fbcf089..9e80a953d693 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -1190,6 +1190,10 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
break;
recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+ if (recd > num_bytes) {
+ total = -EFAULT;
+ break;
+ }
rlength = be32_to_cpu(tpm_cmd.header.out.length);
if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c17e75348a99..a700f8f9ead7 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
if (!rc) {
data_len = be16_to_cpup(
(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
+ rc = -EFAULT;
+ goto out;
+ }
rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
->header.out.length);
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index c1dd39eaaeeb..6116cd05e228 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0;
- int expected, status;
+ int status;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
expected = be32_to_cpu(*(__be32 *)(buf + 2));
- if ((size_t) expected > count) {
+ if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index c6428771841f..caa86b19c76d 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
- int expected, status, burst_count, retries, size = 0;
+ int status;
+ int burst_count;
+ int retries;
+ int size = 0;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
i2c_nuvoton_ready(chip); /* return to idle */
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* to machine native
*/
expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
+ if (expected > count || expected < size) {
dev_err(dev, "%s() expected > count\n", __func__);
size = -EIO;
continue;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 183a5f54d875..da074e3db19b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -270,7 +270,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int size = 0;
- int expected, status;
+ int status;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -285,7 +286,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
+ if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index a04808a21d4e..65e18c86d9b9 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -205,12 +205,12 @@ static int __init gic_clocksource_of_init(struct device_node *node)
} else if (of_property_read_u32(node, "clock-frequency",
&gic_frequency)) {
pr_err("GIC frequency not specified.\n");
- return -EINVAL;;
+ return -EINVAL;
}
gic_timer_irq = irq_of_parse_and_map(node, 0);
if (!gic_timer_irq) {
pr_err("GIC timer IRQ not specified.\n");
- return -EINVAL;;
+ return -EINVAL;
}
ret = __gic_clocksource_init();
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 2a3fe83ec337..3b56ea3f52af 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -334,7 +334,7 @@ static int __init sun5i_timer_init(struct device_node *node)
timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(timer_base)) {
pr_err("Can't map registers\n");
- return PTR_ERR(timer_base);;
+ return PTR_ERR(timer_base);
}
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 3a88e33b0cfe..fb586e09682d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -44,10 +44,10 @@ config ARM_DT_BL_CPUFREQ
config ARM_SCPI_CPUFREQ
tristate "SCPI based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+ depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
help
- This adds the CPUfreq driver support for ARM big.LITTLE platforms
- using SCPI protocol for CPU power management.
+ This adds the CPUfreq driver support for ARM platforms using SCPI
+ protocol for CPU power management.
This driver uses SCPI Message Protocol driver to interact with the
firmware providing the CPU DVFS functionality.
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 7b596fa38ad2..6bebc1f9f55a 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
policy->clk = clk_arm;
- return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
+
+ policy->cpuinfo.transition_latency = cpu_cur.info->latency;
+
+ if (ftab)
+ return cpufreq_table_validate_and_show(policy, ftab);
+
+ return 0;
}
static int __init s3c_cpufreq_initclks(void)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index c32a833e1b00..d300a163945f 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -51,15 +51,23 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
+ unsigned long freq = policy->freq_table[index].frequency;
struct scpi_data *priv = policy->driver_data;
- u64 rate = policy->freq_table[index].frequency * 1000;
+ u64 rate = freq * 1000;
int ret;
ret = clk_set_rate(priv->clk, rate);
- if (!ret && (clk_get_rate(priv->clk) != rate))
- ret = -EIO;
- return ret;
+ if (ret)
+ return ret;
+
+ if (clk_get_rate(priv->clk) != rate)
+ return -EIO;
+
+ arch_set_freq_scale(policy->related_cpus, freq,
+ policy->cpuinfo.max_freq);
+
+ return 0;
}
static int
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index fcfa5b1eae61..b3afb6cc9d72 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error)
{
int ret;
- ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error);
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
if (ret)
return ret;
@@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
return rc;
}
- return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error);
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
}
static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
@@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
return rc;
}
- return __sev_do_cmd_locked(cmd, 0, &argp->error);
+ return __sev_do_cmd_locked(cmd, NULL, &argp->error);
}
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission);
int sev_guest_df_flush(int *error)
{
- return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error);
+ return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
}
EXPORT_SYMBOL_GPL(sev_guest_df_flush);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f34430f99fd8..872100215ca0 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
* sbridge structs
*/
-#define NUM_CHANNELS 4 /* Max channels per MC */
+#define NUM_CHANNELS 6 /* Max channels per MC */
#define MAX_DIMMS 3 /* Max DIMMS per channel */
#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 564bb7a31da4..84e5a9df2344 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -241,6 +241,19 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
+ /*
+ * -EPROBE_DEFER in our case means that we found a
+ * valid GPIO property, but no controller has been
+ * registered so far.
+ *
+ * This means we don't need to look any further for
+ * alternate name conventions, and we should really
+ * preserve the return code for our user to be able to
+ * retry probing later.
+ */
+ if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
+ return desc;
+
if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
break;
}
@@ -250,7 +263,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
desc = of_find_spi_gpio(dev, con_id, &of_flags);
/* Special handling for regulator GPIOs if used */
- if (IS_ERR(desc))
+ if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
desc = of_find_regulator_gpio(dev, con_id, &of_flags);
if (IS_ERR(desc))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d5a2eefd6c3e..74edba18b159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1156,7 +1156,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8ca3783f2deb..74d2efaec52f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -736,9 +736,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -757,8 +759,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
amdgpu_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -868,9 +874,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder)
@@ -924,8 +932,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -988,9 +998,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1115,8 +1127,10 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1359,9 +1373,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1429,8 +1445,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 00a50cc5ec9a..af1b879a9ee9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
/* clear wb memory */
- memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
+ memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
}
return 0;
@@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
*/
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
+ wb >>= 3;
if (wb < adev->wb.num_wb)
- __clear_bit(wb >> 3, adev->wb.used);
+ __clear_bit(wb, adev->wb.used);
}
/**
@@ -1455,11 +1456,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- amdgpu_free_static_csa(adev);
- amdgpu_device_wb_fini(adev);
- amdgpu_device_vram_scratch_fini(adev);
- }
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
@@ -1486,6 +1482,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw)
continue;
+
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ amdgpu_free_static_csa(adev);
+ amdgpu_device_wb_fini(adev);
+ amdgpu_device_vram_scratch_fini(adev);
+ }
+
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
@@ -2284,14 +2287,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
drm_modeset_unlock_all(dev);
- } else {
- /*
- * There is no equivalent atomic helper to turn on
- * display, so we defined our own function for this,
- * once suspend resume is supported by the atomic
- * framework this will be reworked
- */
- amdgpu_dm_display_resume(adev);
}
}
@@ -2726,7 +2721,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (amdgpu_device_has_dc_support(adev)) {
if (drm_atomic_helper_resume(adev->ddev, state))
dev_info(adev->dev, "drm resume failed:%d\n", r);
- amdgpu_dm_display_resume(adev);
} else {
drm_helper_resume_force_mode(adev->ddev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index e14ab34d8262..7c2be32c5aea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
-
+ spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 56bcd59c3399..36483e0d3c97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -257,7 +257,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
if (r) {
adev->irq.installed = false;
- flush_work(&adev->hotplug_work);
+ if (!amdgpu_device_has_dc_support(adev))
+ flush_work(&adev->hotplug_work);
cancel_work_sync(&adev->reset_work);
return r;
}
@@ -282,7 +283,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
adev->irq.installed = false;
if (adev->irq.msi_enabled)
pci_disable_msi(adev->pdev);
- flush_work(&adev->hotplug_work);
+ if (!amdgpu_device_has_dc_support(adev))
+ flush_work(&adev->hotplug_work);
cancel_work_sync(&adev->reset_work);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2719937e09d6..3b7e7af09ead 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -634,7 +634,7 @@ static int gmc_v9_0_late_init(void *handle)
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
BUG_ON(vm_inv_eng[i] > 16);
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
r = gmc_v9_0_ecc_available(adev);
if (r == 1) {
DRM_INFO("ECC is active.\n");
@@ -682,7 +682,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
if (!adev->mc.vram_width) {
/* hbm memory channel size */
- chansize = 128;
+ if (adev->flags & AMD_IS_APU)
+ chansize = 64;
+ else
+ chansize = 128;
tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e92fb372bc99..91cf95a8c39c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u64 *wptr = NULL;
- uint64_t local_wptr = 0;
+ u64 wptr;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
- DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
- *wptr = (*wptr) >> 2;
- DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
u32 lowbit, highbit;
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- wptr = &local_wptr;
lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
me, highbit, lowbit);
- *wptr = highbit;
- *wptr = (*wptr) << 32;
- *wptr |= lowbit;
+ wptr = highbit;
+ wptr = wptr << 32;
+ wptr |= lowbit;
}
- return *wptr;
+ return wptr >> 2;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index b2bfedaf57f1..9bab4842cd44 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1618,7 +1618,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
.set_wptr = uvd_v6_0_enc_ring_set_wptr,
.emit_frame_size =
4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
- 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
+ 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1, /* uvd_v6_0_enc_ring_insert_end */
.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1ce4c98385e3..862835dc054e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -629,11 +629,13 @@ static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
- return 0;
+ ret = amdgpu_dm_display_resume(adev);
+ return ret;
}
int amdgpu_dm_display_resume(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 61e8c3e02d16..639421a00ab6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -718,7 +718,7 @@ static enum link_training_result perform_channel_equalization_sequence(
uint32_t retries_ch_eq;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {{0}};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
hw_tr_pattern = get_supported_tp(link);
@@ -1465,7 +1465,7 @@ void decide_link_settings(struct dc_stream_state *stream,
/* MST doesn't perform link training for now
* TODO: add MST specific link training routine
*/
- if (is_mst_supported(link)) {
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
*link_setting = link->verified_link_cap;
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 261811e0c094..539c3e0a6292 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -197,7 +197,8 @@ bool dc_stream_set_cursor_attributes(
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
+ !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
continue;
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
continue;
@@ -273,7 +274,8 @@ bool dc_stream_set_cursor_position(
if (pipe_ctx->stream != stream ||
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
!pipe_ctx->plane_state ||
- (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
+ !pipe_ctx->plane_res.ipp)
continue;
if (pipe_ctx->plane_state->address.type
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 4c3223a4d62b..adb6e7b9280c 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -162,7 +162,7 @@ static int pp_hw_init(void *handle)
if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
pr_err("smc start failed\n");
hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
- return -EINVAL;;
+ return -EINVAL;
}
if (ret == PP_DPM_DISABLED)
goto exit;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 41e42beff213..08e8a793714f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2756,10 +2756,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
- disable_mclk_switching = ((1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock ||
- smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
- (mode_info.refresh_rate > 120));
+ if (info.display_count == 0)
+ disable_mclk_switching = false;
+ else
+ disable_mclk_switching = ((1 < info.display_count) ||
+ disable_mclk_switching_for_frame_lock ||
+ smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
+ (mode_info.refresh_rate > 120));
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -4534,13 +4537,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
int tmp_result, result = 0;
uint32_t sclk_mask = 0, mclk_mask = 0;
- if (hwmgr->chip_id == CHIP_FIJI) {
- if (request->type == AMD_PP_GFX_PROFILE)
- smu7_enable_power_containment(hwmgr);
- else if (request->type == AMD_PP_COMPUTE_PROFILE)
- smu7_disable_power_containment(hwmgr);
- }
-
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 2d55dabc77d4..5f9c3efb532f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3168,10 +3168,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
- disable_mclk_switching = (info.display_count > 1) ||
- disable_mclk_switching_for_frame_lock ||
- disable_mclk_switching_for_vr ||
- force_mclk_high;
+ if (info.display_count == 0)
+ disable_mclk_switching = false;
+ else
+ disable_mclk_switching = (info.display_count > 1) ||
+ disable_mclk_switching_for_frame_lock ||
+ disable_mclk_switching_for_vr ||
+ force_mclk_high;
sclk = vega10_ps->performance_levels[0].gfx_clock;
mclk = vega10_ps->performance_levels[0].mem_clock;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index cd23b1b28259..c91b9b054e3f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc)
{
}
-/*
- * This is called after a mode is programmed. It should reverse anything done
- * by the prepare function
- */
-static void cirrus_crtc_commit(struct drm_crtc *crtc)
-{
-}
-
-/*
- * The core can pass us a set of gamma values to program. We actually only
- * use this for 8-bit mode so can't perform smooth fades on deeper modes,
- * but it's a requirement that we provide the function
- */
-static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct cirrus_device *cdev = dev->dev_private;
@@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
int i;
if (!crtc->enabled)
- return 0;
+ return;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
@@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
WREG8(PALETTE_DATA, *g++ >> 8);
WREG8(PALETTE_DATA, *b++ >> 8);
}
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+ cirrus_crtc_load_lut(crtc);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ cirrus_crtc_load_lut(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ab4032167094..ae3cbfe9e01c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1878,6 +1878,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
new_crtc_state->event->base.completion = &commit->flip_done;
new_crtc_state->event->base.completion_release = release_crtc_commit;
drm_crtc_commit_get(commit);
+
+ commit->abort_completion = true;
}
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
@@ -3421,8 +3423,21 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
{
if (state->commit) {
+ /*
+ * In the event that a non-blocking commit returns
+ * -ERESTARTSYS before the commit_tail work is queued, we will
+ * have an extra reference to the commit object. Release it, if
+ * the event has not been consumed by the worker.
+ *
+ * state->event may be freed, so we can't directly look at
+ * state->event->base.completion.
+ */
+ if (state->event && state->commit->abort_completion)
+ drm_crtc_commit_put(state->commit);
+
kfree(state->commit->event);
state->commit->event = NULL;
+
drm_crtc_commit_put(state->commit);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ddd537914575..4f751a9d71a3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -113,6 +113,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+ { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -162,6 +165,24 @@ static const struct edid_quirk {
/* HTC Vive VR Headset */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+
+ /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+ { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+
+ /* Windows Mixed Reality Headsets */
+ { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+ { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
+ { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
+ { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
+ { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
+ { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+ { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
+ { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
+
+ /* Sony PlayStation VR Headset */
+ { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
};
/*
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 5a13ff29f4f0..c0530a1af5e3 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev,
r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
r.handles[0] = or->handle;
+ if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
+ dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
+ r.pixel_format = DRM_FORMAT_XBGR2101010;
+
ret = drm_mode_addfb2(dev, &r, file_priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 186c4e90cc1c..89eef1bb4ddc 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -836,9 +836,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
if (!mm->color_adjust)
return NULL;
- hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
- hole_start = __drm_mm_hole_node_start(hole);
- hole_end = hole_start + hole->hole_size;
+ /*
+ * The hole found during scanning should ideally be the first element
+ * in the hole_stack list, but due to side-effects in the driver it
+ * may not be.
+ */
+ list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = hole_start + hole->hole_size;
+
+ if (hole_start <= scan->hit_start &&
+ hole_end >= scan->hit_end)
+ break;
+ }
+
+ /* We should only be called after we found the hole previously */
+ DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
+ if (unlikely(&hole->hole_stack == &mm->hole_stack))
+ return NULL;
DRM_MM_BUG_ON(hole_start > scan->hit_start);
DRM_MM_BUG_ON(hole_end < scan->hit_end);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 555fbe54d6e2..00b8445ba819 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -654,6 +654,26 @@ out:
}
/**
+ * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
+ *
+ * Determine if %current task is an output poll worker. This can be used
+ * to select distinct code paths for output polling versus other contexts.
+ *
+ * One use case is to avoid a deadlock between the output poll worker and
+ * the autosuspend worker wherein the latter waits for polling to finish
+ * upon calling drm_kms_helper_poll_disable(), while the former waits for
+ * runtime suspend to finish upon calling pm_runtime_get_sync() in a
+ * connector ->detect hook.
+ */
+bool drm_kms_helper_is_poll_worker(void)
+{
+ struct work_struct *work = current_work();
+
+ return work && work->func == output_poll_execute;
+}
+EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+
+/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 2b8bf2dd6387..f68ef1b3a28c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
if (!node) {
- dev_err(dev, "failed to allocate memory\n");
ret = -ENOMEM;
goto err;
}
@@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
struct drm_device *drm_dev = g2d->subdrv.drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
- struct timeval now;
+ struct timespec64 now;
if (list_empty(&runqueue_node->event_list))
return;
@@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
e = list_first_entry(&runqueue_node->event_list,
struct drm_exynos_pending_g2d_event, base.link);
- do_gettimeofday(&now);
+ ktime_get_ts64(&now);
e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
e->event.cmdlist_no = cmdlist_no;
drm_send_event(drm_dev, &e->base);
@@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
return -EFAULT;
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
- if (!runqueue_node) {
- dev_err(dev, "failed to allocate memory\n");
+ if (!runqueue_node)
return -ENOMEM;
- }
+
run_cmdlist = &runqueue_node->run_cmdlist;
event_list = &runqueue_node->event_list;
INIT_LIST_HEAD(run_cmdlist);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
deleted file mode 100644
index 71a0b4c0c1e8..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * YoungJun Cho <yj44.cho@samsung.com>
- * Eunchul Kim <chulspro.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_ROTATOR_H_
-#define _EXYNOS_DRM_ROTATOR_H_
-
-/* TODO */
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a4b75a46f946..abd84cbcf1c2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1068,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata)
/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
| HDMI_I2S_SEL_LRCK(6));
- hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
- | HDMI_I2S_SEL_SDATA2(4));
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3)
+ | HDMI_I2S_SEL_SDATA0(4));
+
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
| HDMI_I2S_SEL_SDATA2(2));
+
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
/* I2S_CON_1 & 2 */
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index 30496134a3d0..d7cbe53c4c01 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -569,7 +569,7 @@
#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
/* Real input DMA size register */
#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 04be0f7e8193..4420c203ac85 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -464,7 +464,7 @@
/* I2S_PIN_SEL_1 */
#define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4)
-#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7)
+#define HDMI_I2S_SEL_SDATA0(x) ((x) & 0x7)
/* I2S_PIN_SEL_2 */
#define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4401068ff468..3ab1ace2a6bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
list_add_tail(&vma->exec_link, &eb->unbound);
if (drm_mm_node_allocated(&vma->node))
err = i915_vma_unbind(vma);
+ if (unlikely(err))
+ vma->exec_flags = NULL;
}
return err;
}
@@ -2410,7 +2412,7 @@ err_request:
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
- args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
+ args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
args->rsvd2 |= (u64)out_fence_fd << 32;
out_fence_fd = -1;
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e09d18df8b7f..a3e93d46316a 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -476,8 +476,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
- trace_i915_gem_request_execute(request);
-
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
@@ -501,6 +499,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
+ trace_i915_gem_request_execute(request);
+
wake_up_all(&request->execute);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a2108e35c599..33eb0c5b1d32 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2027,7 +2027,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
#define _CNL_PORT_TX_DW5_LN0_B 0x162654
#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
-#define _CNL_PORT_TX_DW5_LN0_D 0x162ED4
+#define _CNL_PORT_TX_DW5_LN0_D 0x162E54
#define _CNL_PORT_TX_DW5_LN0_F 0x162854
#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW5_GRP_AE, \
@@ -2058,7 +2058,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
-#define _CNL_PORT_TX_DW7_LN0_D 0x162EDC
+#define _CNL_PORT_TX_DW7_LN0_D 0x162E5C
#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW7_GRP_AE, \
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 522d54fecb53..4a01f62a392d 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
- return NULL;
-
/* MST */
if (pipe >= 0) {
+ if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+ return NULL;
+
encoder = dev_priv->av_enc_map[pipe];
/*
* when bootup, audio driver may not know it is
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 5155f0179b61..05520202c967 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -36,6 +36,7 @@
#include "meson_venc.h"
#include "meson_vpp.h"
#include "meson_viu.h"
+#include "meson_canvas.h"
#include "meson_registers.h"
/* CRTC definition */
@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv)
} else
meson_vpp_disable_interlace_vscaler_osd1(priv);
+ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR);
+
/* Enable OSD1 */
writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
priv->io_base + _REG(VPP_MISC));
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 5e8b392b9d1f..8450d6ac8c9b 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -43,6 +43,9 @@ struct meson_drm {
bool osd1_commit;
uint32_t osd1_ctrl_stat;
uint32_t osd1_blk0_cfg[5];
+ uint32_t osd1_addr;
+ uint32_t osd1_stride;
+ uint32_t osd1_height;
} viu;
struct {
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index d0a6ac8390f3..27bd3503e1e4 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Update Canvas with buffer address */
gem = drm_fb_cma_get_gem_obj(fb, 0);
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
- gem->paddr, fb->pitches[0],
- fb->height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR);
+ priv->viu.osd1_addr = gem->paddr;
+ priv->viu.osd1_stride = fb->pitches[0];
+ priv->viu.osd1_height = fb->height;
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 3e9bba4d6624..6d8e3a9a6fc0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- aspace = NULL;;
+ aspace = NULL;
}
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 69d6e61a01ec..6ed9cb053dfa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
- return conn_status;
+ /* Outputs are only polled while runtime active, so acquiring a
+ * runtime PM ref here is unnecessary (and would deadlock upon
+ * runtime suspend because it waits for polling to finish).
+ */
+ if (!drm_kms_helper_is_poll_worker()) {
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return conn_status;
+ }
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -647,8 +653,10 @@ detect_analog:
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return conn_status;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index dd8d4352ed99..caddce88d2d8 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4477,6 +4477,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_display(dev)->fini = nv50_display_fini;
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
+ dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
if (nouveau_atomic)
dev->driver->driver_features |= DRIVER_ATOMIC;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5012f5e47a1e..2e2ca3c6b47d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -899,9 +899,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -924,8 +926,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1039,9 +1045,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1108,8 +1116,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1173,9 +1183,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->dac_load_detect)
return ret;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1187,8 +1199,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1251,9 +1267,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (radeon_connector->detected_hpd_without_ddc) {
force = true;
@@ -1436,8 +1454,10 @@ out:
}
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1688,9 +1708,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dig_connector->is_mst)
return connector_status_disconnected;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1777,8 +1799,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 8d3e3d2e0090..7828a5e10629 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1365,6 +1365,10 @@ int radeon_device_init(struct radeon_device *rdev,
if ((rdev->flags & RADEON_IS_PCI) &&
(rdev->family <= CHIP_RS740))
rdev->need_dma32 = true;
+#ifdef CONFIG_PPC64
+ if (rdev->family == CHIP_CEDAR)
+ rdev->need_dma32 = true;
+#endif
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 326ad068c15a..4b6542538ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
@@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
- /* allow new DPM state to be picked */
- radeon_pm_compute_clocks_dpm(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
@@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = rdev->pm.dpm.ac_power ?
- POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
restart_search:
/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 2c18996d59c5..0d95888ccc3e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -461,7 +461,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
{
struct drm_sched_job *s_job;
struct drm_sched_entity *entity, *tmp;
- int i;;
+ int i;
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 3c15cf24b503..b3960118deb9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -260,7 +260,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
/* Configure the dot clock */
- clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+ clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000);
/* Set the resolution */
regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
@@ -335,6 +335,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
SUN4I_TCON_GCTL_IOMAP_TCON0);
+
+ /* Enable the output on the pins */
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000);
}
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
@@ -418,7 +421,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
WARN_ON(!tcon->quirks->has_channel_1);
/* Configure the dot clock */
- clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
+ clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5720a0d4ac0a..677ac16c8a6d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -197,6 +197,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_3D_FEATURES:
value = vgdev->has_virgl_3d == true ? 1 : 0;
break;
+ case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
+ value = 1;
+ break;
default:
return -EINVAL;
}
@@ -472,7 +475,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_get_caps *args = data;
- int size;
+ unsigned size, host_caps_size;
int i;
int found_valid = -1;
int ret;
@@ -481,6 +484,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
if (vgdev->num_capsets == 0)
return -ENOSYS;
+ /* don't allow userspace to pass 0 */
+ if (args->size == 0)
+ return -EINVAL;
+
spin_lock(&vgdev->display_info_lock);
for (i = 0; i < vgdev->num_capsets; i++) {
if (vgdev->capsets[i].id == args->cap_set_id) {
@@ -496,11 +503,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
return -EINVAL;
}
- size = vgdev->capsets[found_valid].max_size;
- if (args->size > size) {
- spin_unlock(&vgdev->display_info_lock);
- return -EINVAL;
- }
+ host_caps_size = vgdev->capsets[found_valid].max_size;
+ /* only copy to user the minimum of the host caps size or the guest caps size */
+ size = min(args->size, host_caps_size);
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
if (cache_ent->id == args->cap_set_id &&
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 658fa2d3e40c..48685cddbad1 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1089,7 +1089,7 @@ static void ipu_irq_handler(struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
+ static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
chained_irq_enter(chip, desc);
@@ -1102,7 +1102,7 @@ static void ipu_err_irq_handler(struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- const int int_reg[] = { 4, 5, 8, 9};
+ static const int int_reg[] = { 4, 5, 8, 9};
chained_irq_enter(chip, desc);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index bb9c087e6c0d..9f2d9ec42add 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -788,12 +788,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_GREY:
offset = image->rect.left + image->rect.top * pix->bytesperline;
break;
case V4L2_PIX_FMT_SBGGR16:
case V4L2_PIX_FMT_SGBRG16:
case V4L2_PIX_FMT_SGRBG16:
case V4L2_PIX_FMT_SRGGB16:
+ case V4L2_PIX_FMT_Y16:
offset = image->rect.left * 2 +
image->rect.top * pix->bytesperline;
break;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 24e12b87a0cb..caa05b0702e1 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -288,6 +288,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_Y10_1X10:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW10;
cfg->data_width = IPU_CSI_DATA_WIDTH_10;
@@ -296,6 +297,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
case MEDIA_BUS_FMT_SGBRG12_1X12:
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_Y12_1X12:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW12;
cfg->data_width = IPU_CSI_DATA_WIDTH_12;
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index f1cec3d70498..0f70e8847540 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -129,11 +129,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
if (pre_node == pre->dev->of_node) {
mutex_unlock(&ipu_pre_list_mutex);
device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
+ of_node_put(pre_node);
return pre;
}
}
mutex_unlock(&ipu_pre_list_mutex);
+ of_node_put(pre_node);
+
return NULL;
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 067365c733c6..97b99500153d 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -102,11 +102,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
mutex_unlock(&ipu_prg_list_mutex);
device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
prg->id = ipu_id;
+ of_node_put(prg_node);
return prg;
}
}
mutex_unlock(&ipu_prg_list_mutex);
+ of_node_put(prg_node);
+
return NULL;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a9805c7cb305..e2954fb86d65 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -123,8 +123,10 @@ config I2C_I801
Wildcat Point (PCH)
Wildcat Point-LP (PCH)
BayTrail (SOC)
+ Braswell (SOC)
Sunrise Point-H (PCH)
Sunrise Point-LP (PCH)
+ Kaby Lake-H (PCH)
DNV (SOC)
Broxton (SOC)
Lewisburg (PCH)
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index cd07a69e2e93..44deae78913e 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -50,6 +50,9 @@
#define BCM2835_I2C_S_CLKT BIT(9)
#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */
+#define BCM2835_I2C_FEDL_SHIFT 16
+#define BCM2835_I2C_REDL_SHIFT 0
+
#define BCM2835_I2C_CDIV_MIN 0x0002
#define BCM2835_I2C_CDIV_MAX 0xFFFE
@@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
{
- u32 divider;
+ u32 divider, redl, fedl;
divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),
i2c_dev->bus_clk_rate);
@@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
+ /*
+ * Number of core clocks to wait after falling edge before
+ * outputting the next data bit. Note that both FEDL and REDL
+ * can't be greater than CDIV/2.
+ */
+ fedl = max(divider / 16, 1u);
+
+ /*
+ * Number of core clocks to wait after rising edge before
+ * sampling the next incoming data bit.
+ */
+ redl = max(divider / 4, 1u);
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL,
+ (fedl << BCM2835_I2C_FEDL_SHIFT) |
+ (redl << BCM2835_I2C_REDL_SHIFT));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index ae691884d071..05732531829f 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -209,7 +209,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
i2c_dw_disable_int(dev);
/* Enable the adapter */
- __i2c_dw_enable(dev, true);
+ __i2c_dw_enable_and_wait(dev, true);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
@@ -644,7 +644,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH);
if (IS_ERR(gpio)) {
r = PTR_ERR(gpio);
- if (r == -ENOENT)
+ if (r == -ENOENT || r == -ENOSYS)
return 0;
return r;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 8eac00efadc1..692b34125866 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -58,6 +58,7 @@
* Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes
* Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
* BayTrail (SOC) 0x0f12 32 hard yes yes yes
+ * Braswell (SOC) 0x2292 32 hard yes yes yes
* Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes
* Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
* DNV (SOC) 0x19df 32 hard yes yes yes
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 2fd8b6d00391..87197ece0f90 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -341,7 +341,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, adap);
init_completion(&siic->done);
- /* Controller Initalisation */
+ /* Controller initialisation */
writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
@@ -369,7 +369,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
* but they start to affect the speed when clock is set to faster
* frequencies.
* Through the actual tests, use the different user_div value(which
- * in the divider formular 'Fio / (Fi2c * user_div)') to adapt
+ * in the divider formula 'Fio / (Fi2c * user_div)') to adapt
* the different ranges of i2c bus clock frequency, to make the SCL
* more accurate.
*/
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 35a408d0ae4f..99bc9bd64b9e 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -205,7 +205,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
* for example, an "address" value of 0x12345f000 will
* flush from 0x123440000 to 0x12347ffff (256KiB). */
unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
- unsigned long mask = __rounddown_pow_of_two(address ^ last);;
+ unsigned long mask = __rounddown_pow_of_two(address ^ last);
desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
} else {
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index e40065bdbfc8..0a7e99d62c69 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -157,7 +157,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
- seq_printf (seq, "]");
+ seq_putc(seq, ']');
}
static int multipath_congested(struct mddev *mddev, int bits)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index bc67ab6844f0..254e44e44668 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -801,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio;
int ff = 0;
+ if (!page)
+ return;
+
if (test_bit(Faulty, &rdev->flags))
return;
@@ -5452,6 +5455,7 @@ int md_run(struct mddev *mddev)
* the only valid external interface is through the md
* device.
*/
+ mddev->has_superblocks = false;
rdev_for_each(rdev, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
@@ -5465,6 +5469,9 @@ int md_run(struct mddev *mddev)
set_disk_ro(mddev->gendisk, 1);
}
+ if (rdev->sb_page)
+ mddev->has_superblocks = true;
+
/* perform some consistency tests on the device.
* We don't want the data to overlap the metadata,
* Internal Bitmap issues have been handled elsewhere.
@@ -5497,8 +5504,10 @@ int md_run(struct mddev *mddev)
}
if (mddev->sync_set == NULL) {
mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- if (!mddev->sync_set)
- return -ENOMEM;
+ if (!mddev->sync_set) {
+ err = -ENOMEM;
+ goto abort;
+ }
}
spin_lock(&pers_lock);
@@ -5511,7 +5520,8 @@ int md_run(struct mddev *mddev)
else
pr_warn("md: personality for level %s is not loaded!\n",
mddev->clevel);
- return -EINVAL;
+ err = -EINVAL;
+ goto abort;
}
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
@@ -5524,7 +5534,8 @@ int md_run(struct mddev *mddev)
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
module_put(pers->owner);
- return -EINVAL;
+ err = -EINVAL;
+ goto abort;
}
if (pers->sync_request) {
@@ -5593,7 +5604,7 @@ int md_run(struct mddev *mddev)
mddev->private = NULL;
module_put(pers->owner);
bitmap_destroy(mddev);
- return err;
+ goto abort;
}
if (mddev->queue) {
bool nonrot = true;
@@ -5655,6 +5666,18 @@ int md_run(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
+
+abort:
+ if (mddev->bio_set) {
+ bioset_free(mddev->bio_set);
+ mddev->bio_set = NULL;
+ }
+ if (mddev->sync_set) {
+ bioset_free(mddev->sync_set);
+ mddev->sync_set = NULL;
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(md_run);
@@ -8049,6 +8072,7 @@ EXPORT_SYMBOL(md_done_sync);
bool md_write_start(struct mddev *mddev, struct bio *bi)
{
int did_change = 0;
+
if (bio_data_dir(bi) != WRITE)
return true;
@@ -8081,6 +8105,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
rcu_read_unlock();
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
+ if (!mddev->has_superblocks)
+ return true;
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
mddev->suspended);
@@ -8543,6 +8569,19 @@ void md_do_sync(struct md_thread *thread)
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ mddev->delta_disks > 0 &&
+ mddev->pers->finish_reshape &&
+ mddev->pers->size &&
+ mddev->queue) {
+ mddev_lock_nointr(mddev);
+ md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
+ mddev_unlock(mddev);
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
+
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
@@ -8569,6 +8608,10 @@ static int remove_and_add_spares(struct mddev *mddev,
int removed = 0;
bool remove_some = false;
+ if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ /* Mustn't remove devices when resync thread is running */
+ return 0;
+
rdev_for_each(rdev, mddev) {
if ((this == NULL || rdev == this) &&
rdev->raid_disk >= 0 &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 58cd20a5e85e..fbc925cce810 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -468,6 +468,8 @@ struct mddev {
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
+
+ bool has_superblocks:1;
};
enum recovery_flags {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index b2eae332e1a2..fe872dc6712e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1108,7 +1108,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
bio_copy_data(behind_bio, bio);
skip_copy:
- r1_bio->behind_master_bio = behind_bio;;
+ r1_bio->behind_master_bio = behind_bio;
set_bit(R1BIO_BehindIO, &r1_bio->state);
return;
@@ -1809,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct md_rdev *repl =
conf->mirrors[conf->raid_disks + number].rdev;
freeze_array(conf, 0);
+ if (atomic_read(&repl->nr_pending)) {
+ /* It means that some queued IO of retry_list
+ * hold repl. Thus, we cannot set replacement
+ * as NULL, avoiding rdev NULL pointer
+ * dereference in sync_request_write and
+ * handle_write_finished.
+ */
+ err = -EBUSY;
+ unfreeze_array(conf);
+ goto abort;
+ }
clear_bit(Replacement, &repl->flags);
p->rdev = repl;
conf->mirrors[conf->raid_disks + number].rdev = NULL;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c7294e7557e0..eb84bc68e2fd 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -26,6 +26,18 @@
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
+/* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk.
+ * There are three safe ways to access raid1_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery is known to be happening - i.e. in code that is
+ * called as part of performing resync/recovery.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ * and if it is non-NULL, increment rdev->nr_pending before dropping the
+ * RCU lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if it has
+ * been incremented, the pointer is put back in .rdev.
+ */
+
struct raid1_info {
struct md_rdev *rdev;
sector_t head_position;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 99c9207899a7..c5e6c60fc0d4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -141,7 +141,7 @@ static void r10bio_pool_free(void *r10_bio, void *data)
#define RESYNC_WINDOW (1024*1024)
/* maximum number of concurrent requests, memory permitting */
#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
-#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
+#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
/*
@@ -2655,7 +2655,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
for (m = 0; m < conf->copies; m++) {
int dev = r10_bio->devs[m].devnum;
rdev = conf->mirrors[dev].rdev;
- if (r10_bio->devs[m].bio == NULL)
+ if (r10_bio->devs[m].bio == NULL ||
+ r10_bio->devs[m].bio->bi_end_io == NULL)
continue;
if (!r10_bio->devs[m].bio->bi_status) {
rdev_clear_badblocks(
@@ -2670,7 +2671,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
md_error(conf->mddev, rdev);
}
rdev = conf->mirrors[dev].replacement;
- if (r10_bio->devs[m].repl_bio == NULL)
+ if (r10_bio->devs[m].repl_bio == NULL ||
+ r10_bio->devs[m].repl_bio->bi_end_io == NULL)
continue;
if (!r10_bio->devs[m].repl_bio->bi_status) {
@@ -3782,7 +3784,7 @@ static int raid10_run(struct mddev *mddev)
if (fc > 1 || fo > 0) {
pr_err("only near layout is supported by clustered"
" raid10\n");
- goto out;
+ goto out_free_conf;
}
}
@@ -4830,17 +4832,11 @@ static void raid10_finish_reshape(struct mddev *mddev)
return;
if (mddev->delta_disks > 0) {
- sector_t size = raid10_size(mddev, 0, 0);
- md_set_array_sectors(mddev, size);
if (mddev->recovery_cp > mddev->resync_max_sectors) {
mddev->recovery_cp = mddev->resync_max_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
- mddev->resync_max_sectors = size;
- if (mddev->queue) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
- }
+ mddev->resync_max_sectors = mddev->array_sectors;
} else {
int d;
rcu_read_lock();
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index db2ac22ac1b4..e2e8840de9bf 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -2,6 +2,19 @@
#ifndef _RAID10_H
#define _RAID10_H
+/* Note: raid10_info.rdev can be set to NULL asynchronously by
+ * raid10_remove_disk.
+ * There are three safe ways to access raid10_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery/reshape is known to be happening - i.e. in code
+ * that is called as part of performing resync/recovery/reshape.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ * and if it is non-NULL, increment rdev->nr_pending before dropping the
+ * RCU lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if it has
+ * been incremented, the pointer is put back in .rdev.
+ */
+
struct raid10_info {
struct md_rdev *rdev, *replacement;
sector_t head_position;
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 0c76bcedfc1c..a001808a2b77 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -44,6 +44,7 @@ extern void ppl_write_stripe_run(struct r5conf *conf);
extern void ppl_stripe_write_finished(struct stripe_head *sh);
extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
extern void ppl_quiesce(struct r5conf *conf, int quiesce);
+extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
static inline bool raid5_has_ppl(struct r5conf *conf)
{
@@ -104,7 +105,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
if (conf->log)
ret = r5l_handle_flush_request(conf->log, bio);
else if (raid5_has_ppl(conf))
- ret = 0;
+ ret = ppl_handle_flush_request(conf->log, bio);
return ret;
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 2764c2290062..42890a08375b 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -693,6 +693,16 @@ void ppl_quiesce(struct r5conf *conf, int quiesce)
}
}
+int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
+{
+ if (bio->bi_iter.bi_size == 0) {
+ bio_endio(bio);
+ return 0;
+ }
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ return -EAGAIN;
+}
+
void ppl_stripe_write_finished(struct stripe_head *sh)
{
struct ppl_io_unit *io;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 50d01144b805..b5d2601483e3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2196,15 +2196,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
static int grow_stripes(struct r5conf *conf, int num)
{
struct kmem_cache *sc;
+ size_t namelen = sizeof(conf->cache_name[0]);
int devs = max(conf->raid_disks, conf->previous_raid_disks);
if (conf->mddev->gendisk)
- sprintf(conf->cache_name[0],
+ snprintf(conf->cache_name[0], namelen,
"raid%d-%s", conf->level, mdname(conf->mddev));
else
- sprintf(conf->cache_name[0],
+ snprintf(conf->cache_name[0], namelen,
"raid%d-%p", conf->level, conf->mddev);
- sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
+ snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
@@ -6764,9 +6765,7 @@ static void free_conf(struct r5conf *conf)
log_exit(conf);
- if (conf->shrinker.nr_deferred)
- unregister_shrinker(&conf->shrinker);
-
+ unregister_shrinker(&conf->shrinker);
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
@@ -8001,13 +8000,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
- if (mddev->delta_disks > 0) {
- md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
- if (mddev->queue) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
- }
- } else {
+ if (mddev->delta_disks <= 0) {
int d;
spin_lock_irq(&conf->device_lock);
mddev->degraded = raid5_calc_degraded(conf);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2e6123825095..3f8da26032ac 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -450,6 +450,18 @@ enum {
* HANDLE gets cleared if stripe_handle leaves nothing locked.
*/
+/* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
+ * There are three safe ways to access disk_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
+ * is called as part of performing resync/recovery/reshape.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ * and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
+ * lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if
+ * it has been incremented, the pointer is put back in .rdev.
+ */
+
struct disk_info {
struct md_rdev *rdev, *replacement;
struct page *extra_page; /* extra page to use in prexor */
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 0a7bdbed3a6f..e9c1485c32b9 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -45,8 +45,16 @@
#define REG_TO_DCPU_MBOX 0x10
#define REG_TO_HOST_MBOX 0x14
+/* Macros to process offsets returned by the DCPU */
+#define DRAM_MSG_ADDR_OFFSET 0x0
+#define DRAM_MSG_TYPE_OFFSET 0x1c
+#define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
+#define DRAM_MSG_TYPE_MASK ((1UL << \
+ (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
+
/* Message RAM */
-#define DCPU_MSG_RAM(x) (0x100 + (x) * sizeof(u32))
+#define DCPU_MSG_RAM_START 0x100
+#define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32))
/* DRAM Info Offsets & Masks */
#define DRAM_INFO_INTERVAL 0x0
@@ -255,6 +263,40 @@ static unsigned int get_msg_chksum(const u32 msg[])
return sum;
}
+static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
+ char *buf, ssize_t *size)
+{
+ unsigned int msg_type;
+ unsigned int offset;
+ void __iomem *ptr = NULL;
+
+ msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
+ offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
+
+ /*
+ * msg_type == 1: the offset is relative to the message RAM
+ * msg_type == 0: the offset is relative to the data RAM (this is the
+ * previous way of passing data)
+ * msg_type is anything else: there's critical hardware problem
+ */
+ switch (msg_type) {
+ case 1:
+ ptr = priv->regs + DCPU_MSG_RAM_START + offset;
+ break;
+ case 0:
+ ptr = priv->dmem + offset;
+ break;
+ default:
+ dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
+ response);
+ if (buf && size)
+ *size = sprintf(buf,
+ "FATAL: communication error with DCPU\n");
+ }
+
+ return ptr;
+}
+
static int __send_command(struct private_data *priv, unsigned int cmd,
u32 result[])
{
@@ -507,7 +549,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
{
u32 response[MSG_FIELD_MAX];
unsigned int info;
- int ret;
+ ssize_t ret;
ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);
if (ret)
@@ -528,18 +570,19 @@ static ssize_t show_refresh(struct device *dev,
u32 response[MSG_FIELD_MAX];
void __iomem *info;
struct private_data *priv;
- unsigned int offset;
u8 refresh, sr_abort, ppre, thermal_offs, tuf;
u32 mr4;
- int ret;
+ ssize_t ret;
ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);
if (ret)
return ret;
priv = dev_get_drvdata(dev);
- offset = response[MSG_ARG0];
- info = priv->dmem + offset;
+
+ info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
+ if (!info)
+ return ret;
mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK;
@@ -561,7 +604,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
u32 response[MSG_FIELD_MAX];
struct private_data *priv;
void __iomem *info;
- unsigned int offset;
unsigned long val;
int ret;
@@ -574,8 +616,10 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- offset = response[MSG_ARG0];
- info = priv->dmem + offset;
+ info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
+ if (!info)
+ return -EIO;
+
writel_relaxed(val, info + DRAM_INFO_INTERVAL);
return count;
@@ -587,23 +631,25 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
u32 response[MSG_FIELD_MAX];
struct private_data *priv;
void __iomem *info;
- unsigned int offset;
- int ret;
+ ssize_t ret;
ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);
if (ret)
return ret;
- offset = response[MSG_ARG0];
priv = dev_get_drvdata(dev);
- info = priv->dmem + offset;
+
+ info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
+ if (!info)
+ return ret;
return sprintf(buf, "%#x %#x %#x %#x %#x\n",
readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,
readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,
readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,
readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK,
- readl_relaxed(info + DRAM_VENDOR_ERROR));
+ readl_relaxed(info + DRAM_VENDOR_ERROR) &
+ DRAM_VENDOR_MASK);
}
static int brcmstb_dpfe_resume(struct platform_device *pdev)
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 8d12017b9893..4470630dd545 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2687,6 +2687,8 @@ mptctl_hp_targetinfo(unsigned long arg)
__FILE__, __LINE__, iocnum);
return -ENODEV;
}
+ if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
+ return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
ioc->name));
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 2dd2db9bc1c9..337462e1569f 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -133,8 +133,10 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
if (!rc) {
rc = copy_to_user((u64 __user *) args, &irq_offset,
sizeof(irq_offset));
- if (rc)
+ if (rc) {
ocxl_afu_irq_free(ctx, irq_offset);
+ return -EFAULT;
+ }
}
break;
@@ -329,7 +331,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
used += sizeof(header);
- rc = (ssize_t) used;
+ rc = used;
return rc;
}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 908e4db03535..42d6aa89a48a 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -848,7 +848,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
return 1;
}
- mmc_claim_host(card->host);
err = mmc_send_status(card, &status);
if (err) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
@@ -890,7 +889,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
} while (!err);
out:
- mmc_release_host(card->host);
return err;
}
@@ -932,9 +930,7 @@ static int mmc_read_bkops_status(struct mmc_card *card)
int err;
u8 *ext_csd;
- mmc_claim_host(card->host);
err = mmc_get_ext_csd(card, &ext_csd);
- mmc_release_host(card->host);
if (err)
return err;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 35026795be28..fa41d9422d57 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
+ .num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
.init = dw_mci_exynos_priv_init,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 73fd75c3c824..89cdb3d533bb 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
if (priv->ctrl_id < 0)
priv->ctrl_id = 0;
+ if (priv->ctrl_id >= TIMING_MODE)
+ return -EINVAL;
+
host->priv = priv;
return 0;
}
@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
static const struct dw_mci_drv_data hi6220_data = {
.caps = dw_mci_hi6220_caps,
+ .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps),
.switch_voltage = dw_mci_hi6220_switch_voltage,
.set_ios = dw_mci_hi6220_set_ios,
.parse_dt = dw_mci_hi6220_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index a3f1c2b30145..339295212935 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
static const struct dw_mci_drv_data rk3288_drv_data = {
.caps = dw_mci_rk3288_dwmmc_caps,
+ .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
.set_ios = dw_mci_rk3288_set_ios,
.execute_tuning = dw_mci_rk3288_execute_tuning,
.parse_dt = dw_mci_rk3288_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
index d38e94ae2b85..c06b5393312f 100644
--- a/drivers/mmc/host/dw_mmc-zx.c
+++ b/drivers/mmc/host/dw_mmc-zx.c
@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
static const struct dw_mci_drv_data zx_drv_data = {
.caps = zx_dwmmc_caps,
+ .num_caps = ARRAY_SIZE(zx_dwmmc_caps),
.execute_tuning = dw_mci_zx_execute_tuning,
.prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
.parse_dt = dw_mci_zx_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 0aa39975f33b..d9b4acefed31 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
{
struct dw_mci *host = s->private;
+ pm_runtime_get_sync(host->dev);
+
seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
+ pm_runtime_put_autosuspend(host->dev);
+
return 0;
}
@@ -2778,12 +2782,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
+{
+ struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ struct mmc_host *mmc = slot->mmc;
+ int ctrl_id;
+
+ if (host->pdata->caps)
+ mmc->caps = host->pdata->caps;
+
+ /*
+ * Support MMC_CAP_ERASE by default.
+ * It needs to use trim/discard/erase commands.
+ */
+ mmc->caps |= MMC_CAP_ERASE;
+
+ if (host->pdata->pm_caps)
+ mmc->pm_caps = host->pdata->pm_caps;
+
+ if (host->dev->of_node) {
+ ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
+ if (ctrl_id < 0)
+ ctrl_id = 0;
+ } else {
+ ctrl_id = to_platform_device(host->dev)->id;
+ }
+
+ if (drv_data && drv_data->caps) {
+ if (ctrl_id >= drv_data->num_caps) {
+ dev_err(host->dev, "invalid controller id %d\n",
+ ctrl_id);
+ return -EINVAL;
+ }
+ mmc->caps |= drv_data->caps[ctrl_id];
+ }
+
+ if (host->pdata->caps2)
+ mmc->caps2 = host->pdata->caps2;
+
+ /* Process SDIO IRQs through the sdio_irq_work. */
+ if (mmc->caps & MMC_CAP_SDIO_IRQ)
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+ return 0;
+}
+
static int dw_mci_init_slot(struct dw_mci *host)
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
- const struct dw_mci_drv_data *drv_data = host->drv_data;
- int ctrl_id, ret;
+ int ret;
u32 freq[2];
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
@@ -2817,38 +2866,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
if (!mmc->ocr_avail)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
- if (host->pdata->caps)
- mmc->caps = host->pdata->caps;
-
- /*
- * Support MMC_CAP_ERASE by default.
- * It needs to use trim/discard/erase commands.
- */
- mmc->caps |= MMC_CAP_ERASE;
-
- if (host->pdata->pm_caps)
- mmc->pm_caps = host->pdata->pm_caps;
-
- if (host->dev->of_node) {
- ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
- if (ctrl_id < 0)
- ctrl_id = 0;
- } else {
- ctrl_id = to_platform_device(host->dev)->id;
- }
- if (drv_data && drv_data->caps)
- mmc->caps |= drv_data->caps[ctrl_id];
-
- if (host->pdata->caps2)
- mmc->caps2 = host->pdata->caps2;
-
ret = mmc_of_parse(mmc);
if (ret)
goto err_host_allocated;
- /* Process SDIO IRQs through the sdio_irq_work. */
- if (mmc->caps & MMC_CAP_SDIO_IRQ)
- mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+ ret = dw_mci_init_slot_caps(slot);
+ if (ret)
+ goto err_host_allocated;
/* Useful defaults if platform data is unset. */
if (host->use_dma == TRANS_MODE_IDMAC) {
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index e3124f06a47e..1424bd490dd1 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -543,6 +543,7 @@ struct dw_mci_slot {
/**
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
+ * @num_caps: number of capabilities specified by @caps.
* @init: early implementation specific initialization.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
@@ -554,6 +555,7 @@ struct dw_mci_slot {
*/
struct dw_mci_drv_data {
unsigned long *caps;
+ u32 num_caps;
int (*init)(struct dw_mci *host);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 6d1a983e6227..82c4f05f91d8 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -654,9 +654,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
slot->chip->rpm_retune = intel_host->d3_retune;
}
-static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ int err = sdhci_execute_tuning(mmc, opcode);
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (err)
+ return err;
+
+ /*
+ * Tuning can leave the IP in an active state (Buffer Read Enable bit
+ * set) which prevents the entry to low power states (i.e. S0i3). Data
+ * reset will clear it.
+ */
+ sdhci_reset(host, SDHCI_RESET_DATA);
+
+ return 0;
+}
+
+static void byt_probe_slot(struct sdhci_pci_slot *slot)
{
+ struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
+
byt_read_dsm(slot);
+
+ ops->execute_tuning = intel_execute_tuning;
+}
+
+static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ byt_probe_slot(slot);
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
MMC_CAP_CMD_DURING_TFR |
@@ -779,7 +806,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
int err;
- byt_read_dsm(slot);
+ byt_probe_slot(slot);
err = ni_set_max_freq(slot);
if (err)
@@ -792,7 +819,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
- byt_read_dsm(slot);
+ byt_probe_slot(slot);
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
MMC_CAP_WAIT_WHILE_BUSY;
return 0;
@@ -800,7 +827,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
{
- byt_read_dsm(slot);
+ byt_probe_slot(slot);
slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
slot->cd_idx = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 3e5833cf1fab..eb23f9ba1a9a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
struct net_device *netdev = pdata->netdev;
int ret = 0;
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 22889fc158f2..87c4308b52a7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_ioremap;
self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
+ if (!self->aq_hw) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
for (bar = 0; bar < 4; ++bar) {
@@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
mmio_pa = pci_resource_start(pdev, bar);
if (mmio_pa == 0U) {
err = -EIO;
- goto err_ioremap;
+ goto err_free_aq_hw;
}
reg_sz = pci_resource_len(pdev, bar);
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
err = -EIO;
- goto err_ioremap;
+ goto err_free_aq_hw;
}
self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
- goto err_ioremap;
+ goto err_free_aq_hw;
}
break;
}
@@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
if (bar == 4) {
err = -EIO;
- goto err_ioremap;
+ goto err_free_aq_hw;
}
numvecs = min((u8)AQ_CFG_VECS_DEF,
@@ -290,6 +294,8 @@ err_register:
aq_pci_free_irq_vectors(self);
err_hwinit:
iounmap(self->aq_hw->mmio);
+err_free_aq_hw:
+ kfree(self->aq_hw);
err_ioremap:
free_netdev(ndev);
err_pci_func:
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3bdeb295514b..f5c87bd35fa1 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
{
int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
- bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
-
- /* Remove the FCS from the packet length */
- if (last)
- size -= ETH_FCS_LEN;
if (likely(first)) {
skb_put(skb, size);
} else {
/* the last fragments' length contains the full frame length */
- if (last)
+ if (lstatus & BD_LFLAG(RXBD_LAST))
size -= skb->len;
- /* Add the last fragment if it contains something other than
- * the FCS, otherwise drop it and trim off any part of the FCS
- * that was already received.
- */
- if (size > 0)
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rxb->page_offset + RXBUF_ALIGNMENT,
- size, GFAR_RXB_TRUESIZE);
- else if (size < 0)
- pskb_trim(skb, skb->len + size);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
}
/* try reuse page */
@@ -3069,6 +3057,9 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
if (priv->padding)
skb_pull(skb, priv->padding);
+ /* Trim off the FCS */
+ pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 996f47568f9e..1b3cc8bb0705 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1901,6 +1901,11 @@ restart_poll:
dev_kfree_skb_any(rx_buff->skb);
remove_buff_from_pool(adapter, rx_buff);
continue;
+ } else if (!rx_buff->skb) {
+ /* free the entry */
+ next->rx_comp.first = 0;
+ remove_buff_from_pool(adapter, rx_buff);
+ continue;
}
length = be32_to_cpu(next->rx_comp.len);
@@ -3755,7 +3760,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
DMA_BIDIRECTIONAL);
- release_login_buffer(adapter);
dma_unmap_single(dev, adapter->login_rsp_buf_token,
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
@@ -3786,6 +3790,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
ibmvnic_remove(adapter->vdev);
return -EIO;
}
+ release_login_buffer(adapter);
complete(&adapter->init_done);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 0be4575b58a2..fd509160c8f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
"%pI4");
} else if (ethertype.v == ETH_P_IPV6) {
static const struct in6_addr full_ones = {
- .in6_u.u6_addr32 = {htonl(0xffffffff),
- htonl(0xffffffff),
- htonl(0xffffffff),
- htonl(0xffffffff)},
+ .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff)},
};
DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47bab842c5ee..da94c8cba5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
param->wq.linear = 1;
}
-static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
+static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
}
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
+
return mlx5e_alloc_cq_common(mdev, param, cq);
}
@@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(&rq_param);
+ mlx5e_build_drop_rq_param(mdev, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
if (err)
@@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
}
#endif
-int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0d4bb0688faa..e5c3ab46a24a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,6 +36,7 @@
#include <linux/tcp.h>
#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
+#include <net/ip6_checksum.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
return true;
}
+static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
+{
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
+ (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
+
+ tcp->check = 0;
+ tcp->psh = get_cqe_lro_tcppsh(cqe);
+
+ if (tcp_ack) {
+ tcp->ack = 1;
+ tcp->ack_seq = cqe->lro_ack_seq_num;
+ tcp->window = cqe->lro_tcp_win;
+ }
+}
+
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct tcphdr *tcp;
int network_depth = 0;
+ __wsum check;
__be16 proto;
u16 tot_len;
void *ip_p;
- u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
- u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
- (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
-
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
tot_len = cqe_bcnt - network_depth;
@@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
ipv4->check = 0;
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
ipv4->ihl);
+
+ mlx5e_lro_update_tcp_hdr(cqe, tcp);
+ check = csum_partial(tcp, tcp->doff * 4,
+ csum_unfold((__force __sum16)cqe->check_sum));
+ /* Almost done, don't forget the pseudo header */
+ tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
+ tot_len - sizeof(struct iphdr),
+ IPPROTO_TCP, check);
} else {
+ u16 payload_len = tot_len - sizeof(struct ipv6hdr);
struct ipv6hdr *ipv6 = ip_p;
tcp = ip_p + sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
ipv6->hop_limit = cqe->lro_min_ttl;
- ipv6->payload_len = cpu_to_be16(tot_len -
- sizeof(struct ipv6hdr));
- }
-
- tcp->psh = get_cqe_lro_tcppsh(cqe);
-
- if (tcp_ack) {
- tcp->ack = 1;
- tcp->ack_seq = cqe->lro_ack_seq_num;
- tcp->window = cqe->lro_tcp_win;
+ ipv6->payload_len = cpu_to_be16(payload_len);
+
+ mlx5e_lro_update_tcp_hdr(cqe, tcp);
+ check = csum_partial(tcp, tcp->doff * 4,
+ csum_unfold((__force __sum16)cqe->check_sum));
+ /* Almost done, don't forget the pseudo header */
+ tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
+ IPPROTO_TCP, check);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5a4608281f38..707976482c09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
if (iph->protocol != IPPROTO_UDP)
goto out;
- udph = udp_hdr(skb);
+ /* Don't assume skb_transport_header() was set */
+ udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
if (udph->dest != htons(9))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fd98b0dc610f..fa86a1466718 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
+ if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
+ tcf_vlan_push_prio(a))
return -EOPNOTSUPP;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 569b42a01026..11b4f1089d1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
default:
hlen = mlx5e_skb_l2_header_offset(skb);
}
- return min_t(u16, hlen, skb->len);
+ return min_t(u16, hlen, skb_headlen(skb));
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 5ecf2cddc16d..c2b1d7d351fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+ /* Create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
@@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
- /* create steering drop counters for ingress and egress ACLs */
- if (vport_num && esw->mode == SRIOV_LEGACY)
- esw_vport_create_drop_counters(vport);
-
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c025c98700e4..31fc2cfac3b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_ENCAP |
- MLX5_FLOW_CONTEXT_ACTION_DECAP))
+ MLX5_FLOW_CONTEXT_ACTION_DECAP |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))
return true;
return false;
@@ -1758,8 +1759,11 @@ search_again_locked:
/* Collect all fgs which has a matching match_criteria */
err = build_match_list(&match_head, ft, spec);
- if (err)
+ if (err) {
+ if (take_write)
+ up_write_ref_node(&ft->node);
return ERR_PTR(err);
+ }
if (!take_write)
up_read_ref_node(&ft->node);
@@ -1768,8 +1772,11 @@ search_again_locked:
dest_num, version);
free_match_list(&match_head);
if (!IS_ERR(rule) ||
- (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
+ (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
+ if (take_write)
+ up_write_ref_node(&ft->node);
return rule;
+ }
if (!take_write) {
nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index e159243e0fcf..857035583ccd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -34,6 +34,7 @@
#include <linux/highmem.h>
#include <rdma/mlx5-abi.h>
#include "en.h"
+#include "clock.h"
enum {
MLX5_CYCLES_SHIFT = 23
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2ef641c91c26..ae391e4b7070 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap,
set_hca_cap,
cache_line_128byte,
- cache_line_size() == 128 ? 1 : 0);
+ cache_line_size() >= 128 ? 1 : 0);
if (MLX5_CAP_GEN_MAX(dev, dct))
MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 63aca9f847e1..4c2f612e4414 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -20,7 +20,7 @@ if NET_VENDOR_SMSC
config SMC9194
tristate "SMC 9194 support"
- depends on (ISA || MAC && BROKEN)
+ depends on ISA
select CRC32
---help---
This is support for the SMC9xxx based Ethernet cards. Choose this
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a0f2be81d52e..8fc02d9db3d0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1451,7 +1451,7 @@ destroy_macvlan_port:
/* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid.
*/
- if (create && macvlan_port_get_rtnl(dev))
+ if (create && macvlan_port_get_rtnl(lowerdev))
macvlan_port_destroy(port->dev);
return err;
}
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index d0a113743195..7a6a1fe79309 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
/* it's racing here! */
ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
- if (ret < 0)
+ if (ret < 0) {
netdev_warn(dev->net, "Error writing RFE_CTL\n");
-
- return ret;
+ return ret;
+ }
+ return 0;
}
static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 626c27352ae2..9bb9e562b893 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
- if (unlikely(err)) {
- struct page *page = virt_to_head_page(xdp->data);
-
- put_page(page);
- return false;
- }
+ if (unlikely(err))
+ return false; /* Caller handle free/refcnt */
return true;
}
@@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
{
struct virtnet_info *vi = netdev_priv(dev);
- bool sent = __virtnet_xdp_xmit(vi, xdp);
+ struct receive_queue *rq = vi->rq;
+ struct bpf_prog *xdp_prog;
+ bool sent;
+ /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
+ * indicate XDP resources have been successfully allocated.
+ */
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (!xdp_prog)
+ return -ENXIO;
+
+ sent = __virtnet_xdp_xmit(vi, xdp);
if (!sent)
return -ENOSPC;
return 0;
@@ -546,8 +552,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
struct page *page = virt_to_head_page(buf);
- unsigned int delta = 0, err;
+ unsigned int delta = 0;
struct page *xdp_page;
+ bool sent;
+ int err;
+
len -= vi->hdr_len;
rcu_read_lock();
@@ -558,7 +567,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
void *orig_data;
u32 act;
- if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+ if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
@@ -596,16 +605,19 @@ static struct sk_buff *receive_small(struct net_device *dev,
delta = orig_data - xdp.data;
break;
case XDP_TX:
- if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
+ sent = __virtnet_xdp_xmit(vi, &xdp);
+ if (unlikely(!sent)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
- else
- *xdp_xmit = true;
+ goto err_xdp;
+ }
+ *xdp_xmit = true;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
err = xdp_do_redirect(dev, &xdp, xdp_prog);
- if (!err)
- *xdp_xmit = true;
+ if (err)
+ goto err_xdp;
+ *xdp_xmit = true;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -677,7 +689,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct bpf_prog *xdp_prog;
unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- int err;
+ bool sent;
head_skb = NULL;
@@ -746,20 +758,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
break;
case XDP_TX:
- if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
+ sent = __virtnet_xdp_xmit(vi, &xdp);
+ if (unlikely(!sent)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
- else
- *xdp_xmit = true;
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
+ goto err_xdp;
+ }
+ *xdp_xmit = true;
if (unlikely(xdp_page != page))
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
- case XDP_REDIRECT:
- err = xdp_do_redirect(dev, &xdp, xdp_prog);
- if (!err)
- *xdp_xmit = true;
- rcu_read_unlock();
- goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1cf22e62e3dd..6e0af815f25e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3516,7 +3516,7 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
- hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
+ hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
if (!hwsim_wq)
return -ENOMEM;
rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 7bc5eee96b31..0c2ed11c0603 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -17,7 +17,6 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/perf/arm_pmu.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/spinlock.h>
@@ -26,6 +25,9 @@
#include <asm/irq_regs.h>
+static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
+static DEFINE_PER_CPU(int, cpu_irq);
+
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
@@ -320,17 +322,9 @@ validate_group(struct perf_event *event)
return 0;
}
-static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
-{
- struct platform_device *pdev = armpmu->plat_device;
-
- return pdev ? dev_get_platdata(&pdev->dev) : NULL;
-}
-
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
- struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
@@ -341,14 +335,11 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
-
- plat = armpmu_get_platdata(armpmu);
+ if (WARN_ON_ONCE(!armpmu))
+ return IRQ_NONE;
start_clock = sched_clock();
- if (plat && plat->handle_irq)
- ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
- else
- ret = armpmu->handle_irq(irq, armpmu);
+ ret = armpmu->handle_irq(irq, armpmu);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
@@ -531,54 +522,41 @@ int perf_num_counters(void)
}
EXPORT_SYMBOL_GPL(perf_num_counters);
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
+static int armpmu_count_irq_users(const int irq)
{
- struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- int irq = per_cpu(hw_events->irq, cpu);
+ int cpu, count = 0;
- if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
- return;
-
- if (irq_is_percpu_devid(irq)) {
- free_percpu_irq(irq, &hw_events->percpu_pmu);
- cpumask_clear(&armpmu->active_irqs);
- return;
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(cpu_irq, cpu) == irq)
+ count++;
}
- free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ return count;
}
-void armpmu_free_irqs(struct arm_pmu *armpmu)
+void armpmu_free_irq(int irq, int cpu)
{
- int cpu;
+ if (per_cpu(cpu_irq, cpu) == 0)
+ return;
+ if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
+ return;
+
+ if (!irq_is_percpu_devid(irq))
+ free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
+ else if (armpmu_count_irq_users(irq) == 1)
+ free_percpu_irq(irq, &cpu_armpmu);
- for_each_cpu(cpu, &armpmu->supported_cpus)
- armpmu_free_irq(armpmu, cpu);
+ per_cpu(cpu_irq, cpu) = 0;
}
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
+int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
- struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
const irq_handler_t handler = armpmu_dispatch_irq;
- int irq = per_cpu(hw_events->irq, cpu);
if (!irq)
return 0;
- if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) {
- err = request_percpu_irq(irq, handler, "arm-pmu",
- &hw_events->percpu_pmu);
- } else if (irq_is_percpu_devid(irq)) {
- int other_cpu = cpumask_first(&armpmu->active_irqs);
- int other_irq = per_cpu(hw_events->irq, other_cpu);
-
- if (irq != other_irq) {
- pr_warn("mismatched PPIs detected.\n");
- err = -EINVAL;
- goto err_out;
- }
- } else {
- struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
+ if (!irq_is_percpu_devid(irq)) {
unsigned long irq_flags;
err = irq_force_affinity(irq, cpumask_of(cpu));
@@ -589,22 +567,22 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
goto err_out;
}
- if (platdata && platdata->irq_flags) {
- irq_flags = platdata->irq_flags;
- } else {
- irq_flags = IRQF_PERCPU |
- IRQF_NOBALANCING |
- IRQF_NO_THREAD;
- }
+ irq_flags = IRQF_PERCPU |
+ IRQF_NOBALANCING |
+ IRQF_NO_THREAD;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
err = request_irq(irq, handler, irq_flags, "arm-pmu",
- per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ per_cpu_ptr(&cpu_armpmu, cpu));
+ } else if (armpmu_count_irq_users(irq) == 0) {
+ err = request_percpu_irq(irq, handler, "arm-pmu",
+ &cpu_armpmu);
}
if (err)
goto err_out;
- cpumask_set_cpu(cpu, &armpmu->active_irqs);
+ per_cpu(cpu_irq, cpu) = irq;
return 0;
err_out:
@@ -612,19 +590,6 @@ err_out:
return err;
}
-int armpmu_request_irqs(struct arm_pmu *armpmu)
-{
- int cpu, err;
-
- for_each_cpu(cpu, &armpmu->supported_cpus) {
- err = armpmu_request_irq(armpmu, cpu);
- if (err)
- break;
- }
-
- return err;
-}
-
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
@@ -647,12 +612,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (pmu->reset)
pmu->reset(pmu);
+ per_cpu(cpu_armpmu, cpu) = pmu;
+
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq) {
- if (irq_is_percpu_devid(irq)) {
+ if (irq_is_percpu_devid(irq))
enable_percpu_irq(irq, IRQ_TYPE_NONE);
- return 0;
- }
+ else
+ enable_irq(irq);
}
return 0;
@@ -667,8 +634,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq && irq_is_percpu_devid(irq))
- disable_percpu_irq(irq);
+ if (irq) {
+ if (irq_is_percpu_devid(irq))
+ disable_percpu_irq(irq);
+ else
+ disable_irq(irq);
+ }
+
+ per_cpu(cpu_armpmu, cpu) = NULL;
return 0;
}
@@ -800,18 +773,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
&cpu_pmu->node);
}
-struct arm_pmu *armpmu_alloc(void)
+static struct arm_pmu *__armpmu_alloc(gfp_t flags)
{
struct arm_pmu *pmu;
int cpu;
- pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ pmu = kzalloc(sizeof(*pmu), flags);
if (!pmu) {
pr_info("failed to allocate PMU device!\n");
goto out;
}
- pmu->hw_events = alloc_percpu(struct pmu_hw_events);
+ pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu;
@@ -857,6 +830,17 @@ out:
return NULL;
}
+struct arm_pmu *armpmu_alloc(void)
+{
+ return __armpmu_alloc(GFP_KERNEL);
+}
+
+struct arm_pmu *armpmu_alloc_atomic(void)
+{
+ return __armpmu_alloc(GFP_ATOMIC);
+}
+
+
void armpmu_free(struct arm_pmu *pmu)
{
free_percpu(pmu->hw_events);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 705f1a390e31..0f197516d708 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -11,6 +11,8 @@
#include <linux/acpi.h>
#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/percpu.h>
#include <linux/perf/arm_pmu.h>
@@ -87,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void)
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
}
+ /*
+ * Log and request the IRQ so the core arm_pmu code can manage
+ * it. We'll have to sanity-check IRQs later when we associate
+ * them with their PMUs.
+ */
per_cpu(pmu_irqs, cpu) = irq;
+ armpmu_request_irq(irq, cpu);
}
return 0;
@@ -127,7 +135,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
return pmu;
}
- pmu = armpmu_alloc();
+ pmu = armpmu_alloc_atomic();
if (!pmu) {
pr_warn("Unable to allocate PMU for CPU%d\n",
smp_processor_id());
@@ -140,6 +148,35 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
}
/*
+ * Check whether the new IRQ is compatible with those already associated with
+ * the PMU (e.g. we don't have mismatched PPIs).
+ */
+static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
+{
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ int cpu;
+
+ if (!irq)
+ return true;
+
+ for_each_cpu(cpu, &pmu->supported_cpus) {
+ int other_irq = per_cpu(hw_events->irq, cpu);
+ if (!other_irq)
+ continue;
+
+ if (irq == other_irq)
+ continue;
+ if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
+ continue;
+
+ pr_warn("mismatched PPIs detected\n");
+ return false;
+ }
+
+ return true;
+}
+
+/*
* This must run before the common arm_pmu hotplug logic, so that we can
* associate a CPU and its interrupt before the common code tries to manage the
* affinity and so on.
@@ -164,19 +201,14 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
if (!pmu)
return -ENOMEM;
- cpumask_set_cpu(cpu, &pmu->supported_cpus);
-
per_cpu(probed_pmus, cpu) = pmu;
- /*
- * Log and request the IRQ so the core arm_pmu code can manage it. In
- * some situations (e.g. mismatched PPIs), we may fail to request the
- * IRQ. However, it may be too late for us to do anything about it.
- * The common ARM PMU code will log a warning in this case.
- */
- hw_events = pmu->hw_events;
- per_cpu(hw_events->irq, cpu) = irq;
- armpmu_request_irq(pmu, cpu);
+ if (pmu_irq_matches(pmu, irq)) {
+ hw_events = pmu->hw_events;
+ per_cpu(hw_events->irq, cpu) = irq;
+ }
+
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
/*
* Ideally, we'd probe the PMU here when we find the first matching
@@ -247,11 +279,6 @@ static int arm_pmu_acpi_init(void)
if (acpi_disabled)
return 0;
- /*
- * We can't request IRQs yet, since we don't know the cookie value
- * until we know which CPUs share the same logical PMU. We'll handle
- * that in arm_pmu_acpi_cpu_starting().
- */
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 46501cc79fd7..7729eda5909d 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -127,13 +127,6 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
pdev->dev.of_node);
}
- /*
- * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
- * special platdata function that attempts to demux them.
- */
- if (dev_get_platdata(&pdev->dev))
- cpumask_setall(&pmu->supported_cpus);
-
for (i = 0; i < num_irqs; i++) {
int cpu, irq;
@@ -164,6 +157,36 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
return 0;
}
+static int armpmu_request_irqs(struct arm_pmu *armpmu)
+{
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+ int cpu, err;
+
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ int irq = per_cpu(hw_events->irq, cpu);
+ if (!irq)
+ continue;
+
+ err = armpmu_request_irq(irq, cpu);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static void armpmu_free_irqs(struct arm_pmu *armpmu)
+{
+ int cpu;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ int irq = per_cpu(hw_events->irq, cpu);
+
+ armpmu_free_irq(irq, cpu);
+ }
+}
+
int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table)
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 1fda9d6c7ea3..4b91ff74779b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -716,7 +716,7 @@ static const char * const uart_b_groups[] = {
"uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x",
};
-static const char * const uart_ao_b_gpioz_groups[] = {
+static const char * const uart_ao_b_z_groups[] = {
"uart_ao_tx_b_z", "uart_ao_rx_b_z",
"uart_ao_cts_b_z", "uart_ao_rts_b_z",
};
@@ -855,7 +855,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(nand),
FUNCTION(uart_a),
FUNCTION(uart_b),
- FUNCTION(uart_ao_b_gpioz),
+ FUNCTION(uart_ao_b_z),
FUNCTION(i2c0),
FUNCTION(i2c1),
FUNCTION(i2c2),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index d1a01311c1a2..5e3df194723e 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -376,6 +376,7 @@ static int intel_hid_remove(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
+ device_init_wakeup(&device->dev, false);
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
intel_hid_set_enable(&device->dev, false);
intel_button_array_enable(&device->dev, false);
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index b703d6f5b099..c13780b8dabb 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/kernel.h>
@@ -97,9 +98,35 @@ out_unknown:
dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
}
-static int intel_vbtn_probe(struct platform_device *device)
+static void detect_tablet_mode(struct platform_device *device)
{
+ const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int m;
+
+ if (!(chassis_type && strcmp(chassis_type, "31") == 0))
+ goto out;
+
+ status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ obj = vgbs_output.pointer;
+ if (!(obj && obj->type == ACPI_TYPE_INTEGER))
+ goto out;
+
+ m = !(obj->integer.value & TABLET_MODE_FLAG);
+ input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
+out:
+ kfree(vgbs_output.pointer);
+}
+
+static int intel_vbtn_probe(struct platform_device *device)
+{
acpi_handle handle = ACPI_HANDLE(&device->dev);
struct intel_vbtn_priv *priv;
acpi_status status;
@@ -122,22 +149,7 @@ static int intel_vbtn_probe(struct platform_device *device)
return err;
}
- /*
- * VGBS being present and returning something means we have
- * a tablet mode switch.
- */
- status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
- if (ACPI_SUCCESS(status)) {
- union acpi_object *obj = vgbs_output.pointer;
-
- if (obj && obj->type == ACPI_TYPE_INTEGER) {
- int m = !(obj->integer.value & TABLET_MODE_FLAG);
-
- input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
- }
- }
-
- kfree(vgbs_output.pointer);
+ detect_tablet_mode(device);
status = acpi_install_notify_handler(handle,
ACPI_DEVICE_NOTIFY,
@@ -154,6 +166,7 @@ static int intel_vbtn_remove(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
+ device_init_wakeup(&device->dev, false);
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
/*
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index c0c8945603cb..8796211ef24a 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -945,7 +945,7 @@ static int wmi_dev_probe(struct device *dev)
wblock->char_dev.mode = 0444;
ret = misc_register(&wblock->char_dev);
if (ret) {
- dev_warn(dev, "failed to register char dev: %d", ret);
+ dev_warn(dev, "failed to register char dev: %d\n", ret);
ret = -ENOMEM;
goto probe_misc_failure;
}
@@ -1048,7 +1048,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
if (result) {
dev_warn(wmi_bus_dev,
- "%s data block query control method not found",
+ "%s data block query control method not found\n",
method);
return result;
}
@@ -1198,7 +1198,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
retval = device_add(&wblock->dev.dev);
if (retval) {
- dev_err(wmi_bus_dev, "failed to register %pULL\n",
+ dev_err(wmi_bus_dev, "failed to register %pUL\n",
wblock->gblock.guid);
if (debug_event)
wmi_method_enable(wblock, 0);
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index fcfd28d2884c..de1b3fce936d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -185,7 +185,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
-oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean
clean-files := 53c700_d.h 53c700_u.h
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index b3b931ab77eb..2664ea0df35f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1693,8 +1693,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
* Map in the registers from the adapter.
*/
aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
- if ((*aac_drivers[index].init)(aac))
+ if ((*aac_drivers[index].init)(aac)) {
+ error = -ENODEV;
goto out_unmap;
+ }
if (aac->sync_mode) {
if (aac_sync_mode)
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
deleted file mode 100644
index 828ae3d9a510..000000000000
--- a/drivers/scsi/aic7xxx/aiclib.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Implementation of Utility functions for all SCSI device types.
- *
- * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
- * Copyright (c) 1997, 1998 Kenneth D. Merry.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $
- * $Id$
- */
-
-#include "aiclib.h"
-
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8e2f767147cb..5a645b8b9af1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1889,6 +1889,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* we will not receive ABTS response for this IO */
BNX2FC_IO_DBG(io_req, "Timer context finished processing "
"this scsi cmd\n");
+ return;
}
/* Cancel the timeout_work, as we received IO completion */
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index be5ee2d37815..7dbbbb81a1e7 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -114,7 +114,7 @@ static enum csio_ln_ev fwevt_to_lnevt[] = {
static struct csio_lnode *
csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
{
- struct csio_lnode *ln = hw->rln;
+ struct csio_lnode *ln;
struct list_head *tmp;
/* Match siblings lnode with portid */
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 022e421c2185..4b44325d1a82 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -876,6 +876,11 @@ static void alua_rtpg_work(struct work_struct *work)
/**
* alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ * @pg: ALUA port group associated with @sdev.
+ * @sdev: SCSI device for which to submit an RTPG.
+ * @qdata: Information about the callback to invoke after the RTPG.
+ * @force: Whether or not to submit an RTPG if a work item that will submit an
+ * RTPG already has been scheduled.
*
* Returns true if and only if alua_rtpg_work() will be called asynchronously.
* That function is responsible for calling @qdata->fn().
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 9a0696f68f37..b81a53c4a9a8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes {
};
struct ibmvfc_fcp_rsp_info {
- __be16 reserved;
+ u8 reserved[3];
u8 rsp_code;
u8 reserved2[4];
}__attribute__((packed, aligned (2)));
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 13d6e4ec3022..59a87ca328d3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2410,8 +2410,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
continue;
}
- for_each_cpu(cpu, mask)
+ for_each_cpu_and(cpu, mask, cpu_online_mask) {
+ if (cpu >= ioc->cpu_msix_table_sz)
+ break;
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
+ }
}
return;
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 029e2e69b29f..f57a94b4f0d9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1724,7 +1724,6 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
{
struct qedi_ctx *qedi = data;
struct nvm_iscsi_initiator *initiator;
- char *str = buf;
int rc = 1;
u32 ipv6_en, dhcp_en, ip_len;
struct nvm_iscsi_block *block;
@@ -1758,32 +1757,32 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_IP_ADDR:
- rc = snprintf(str, ip_len, fmt, ip);
+ rc = snprintf(buf, ip_len, fmt, ip);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- rc = snprintf(str, ip_len, fmt, sub);
+ rc = snprintf(buf, ip_len, fmt, sub);
break;
case ISCSI_BOOT_ETH_GATEWAY:
- rc = snprintf(str, ip_len, fmt, gw);
+ rc = snprintf(buf, ip_len, fmt, gw);
break;
case ISCSI_BOOT_ETH_FLAGS:
- rc = snprintf(str, 3, "%hhd\n",
+ rc = snprintf(buf, 3, "%hhd\n",
SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
- rc = snprintf(str, 3, "0\n");
+ rc = snprintf(buf, 3, "0\n");
break;
case ISCSI_BOOT_ETH_MAC:
- rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
+ rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
break;
case ISCSI_BOOT_ETH_VLAN:
- rc = snprintf(str, 12, "%d\n",
+ rc = snprintf(buf, 12, "%d\n",
GET_FIELD2(initiator->generic_cont0,
NVM_ISCSI_CFG_INITIATOR_VLAN));
break;
case ISCSI_BOOT_ETH_ORIGIN:
if (dhcp_en)
- rc = snprintf(str, 3, "3\n");
+ rc = snprintf(buf, 3, "3\n");
break;
default:
rc = 0;
@@ -1819,7 +1818,6 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
{
struct qedi_ctx *qedi = data;
struct nvm_iscsi_initiator *initiator;
- char *str = buf;
int rc;
struct nvm_iscsi_block *block;
@@ -1831,8 +1829,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
- initiator->initiator_name.byte);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+ initiator->initiator_name.byte);
break;
default:
rc = 0;
@@ -1860,7 +1858,6 @@ static ssize_t
qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
char *buf, enum qedi_nvm_tgts idx)
{
- char *str = buf;
int rc = 1;
u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
struct nvm_iscsi_block *block;
@@ -1899,48 +1896,48 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
switch (type) {
case ISCSI_BOOT_TGT_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
- block->target[idx].target_name.byte);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+ block->target[idx].target_name.byte);
break;
case ISCSI_BOOT_TGT_IP_ADDR:
if (ipv6_en)
- rc = snprintf(str, ip_len, "%pI6\n",
+ rc = snprintf(buf, ip_len, "%pI6\n",
block->target[idx].ipv6_addr.byte);
else
- rc = snprintf(str, ip_len, "%pI4\n",
+ rc = snprintf(buf, ip_len, "%pI4\n",
block->target[idx].ipv4_addr.byte);
break;
case ISCSI_BOOT_TGT_PORT:
- rc = snprintf(str, 12, "%d\n",
+ rc = snprintf(buf, 12, "%d\n",
GET_FIELD2(block->target[idx].generic_cont0,
NVM_ISCSI_CFG_TARGET_TCP_PORT));
break;
case ISCSI_BOOT_TGT_LUN:
- rc = snprintf(str, 22, "%.*d\n",
+ rc = snprintf(buf, 22, "%.*d\n",
block->target[idx].lun.value[1],
block->target[idx].lun.value[0]);
break;
case ISCSI_BOOT_TGT_CHAP_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
- chap_name);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
- chap_secret);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
- mchap_name);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
- mchap_secret);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
- rc = snprintf(str, 3, "0\n");
+ rc = snprintf(buf, 3, "0\n");
break;
default:
rc = 0;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aececf664654..2dea1129d396 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -59,8 +59,6 @@ qla2x00_sp_timeout(struct timer_list *t)
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- if (sp->type != SRB_ELS_DCMD)
- sp->free(sp);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
@@ -102,7 +100,6 @@ qla2x00_async_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct event_arg ea;
if (fcport) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
@@ -117,25 +114,13 @@ qla2x00_async_iocb_timeout(void *data)
switch (sp->type) {
case SRB_LOGIN_CMD:
- if (!fcport)
- break;
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_PLOGI_DONE;
- ea.fcport = sp->fcport;
- ea.data[0] = lio->u.logio.data[0];
- ea.data[1] = lio->u.logio.data[1];
- ea.sp = sp;
- qla24xx_handle_plogi_done_event(fcport->vha, &ea);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
break;
case SRB_LOGOUT_CMD:
- if (!fcport)
- break;
- qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
- break;
case SRB_CT_PTHRU_CMD:
case SRB_MB_IOCB:
case SRB_NACK_PLOGI:
@@ -235,12 +220,10 @@ static void
qla2x00_async_logout_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *lio = &sp->u.iocb_cmd;
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
- qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
- lio->u.logio.data);
+ sp->fcport->login_gen++;
+ qlt_logo_completion_handler(sp->fcport, res);
sp->free(sp);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 1b62e943ec49..8d00d559bd26 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -3275,12 +3275,11 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle =
- cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
- aio->u.abt.cmd_hndl));
+ abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->handle_to_abort =
- cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+ cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ aio->u.abt.cmd_hndl));
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 14109d86c3f6..89f93ebd819d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
/* Read all mbox registers? */
- mboxes = (1 << ha->mbx_count) - 1;
+ WARN_ON_ONCE(ha->mbx_count > 32);
+ mboxes = (1ULL << ha->mbx_count) - 1;
if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
else
@@ -2880,7 +2881,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Read all mbox registers? */
- mboxes = (1 << ha->mbx_count) - 1;
+ WARN_ON_ONCE(ha->mbx_count > 32);
+ mboxes = (1ULL << ha->mbx_count) - 1;
if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
else
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 12ee6e02d146..afcb5567998a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3625,6 +3625,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
+ qla2x00_wait_for_sess_deletion(base_vha);
+
/*
* if UNLOAD flag is already set, then continue unload,
* where it was set first.
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index fc89af8fe256..896b2d8bd803 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -4871,8 +4871,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess);
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
- spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
- flags);
break;
}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index fc233717355f..817f312023a9 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -168,6 +168,8 @@
#define DEV_DB_NON_PERSISTENT 0
#define DEV_DB_PERSISTENT 1
+#define QL4_ISP_REG_DISCONNECT 0xffffffffU
+
#define COPY_ISID(dst_isid, src_isid) { \
int i, j; \
for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 82e889bbe0ed..fc2c97d9a0d6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
static struct scsi_transport_template *qla4xxx_scsi_transport;
+static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
+{
+ u32 reg_val = 0;
+ int rval = QLA_SUCCESS;
+
+ if (is_qla8022(ha))
+ reg_val = readl(&ha->qla4_82xx_reg->host_status);
+ else if (is_qla8032(ha) || is_qla8042(ha))
+ reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+ else
+ reg_val = readw(&ha->reg->ctrl_status);
+
+ if (reg_val == QL4_ISP_REG_DISCONNECT)
+ rval = QLA_ERROR;
+
+ return rval;
+}
+
static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
uint32_t iface_type, uint32_t payload_size,
uint32_t pid, struct sockaddr *dst_addr)
@@ -9186,10 +9204,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
struct srb *srb = NULL;
int ret = SUCCESS;
int wait = 0;
+ int rval;
ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
ha->host_no, id, lun, cmd, cmd->cmnd[0]);
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
srb = (struct srb *) CMD_SP(cmd);
if (!srb) {
@@ -9241,6 +9266,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
int ret = FAILED, stat;
+ int rval;
if (!ddb_entry)
return ret;
@@ -9260,6 +9286,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
/* FIXME: wait for hba to go online */
stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
if (stat != QLA_SUCCESS) {
@@ -9303,6 +9335,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
int stat, ret;
+ int rval;
if (!ddb_entry)
return FAILED;
@@ -9320,6 +9353,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
stat = qla4xxx_reset_target(ha, ddb_entry);
if (stat != QLA_SUCCESS) {
starget_printk(KERN_INFO, scsi_target(cmd->device),
@@ -9374,9 +9413,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
int return_status = FAILED;
struct scsi_qla_host *ha;
+ int rval;
ha = to_qla_host(cmd->device->host);
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
qla4_83xx_set_idc_dontreset(ha);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 40fc7a590e81..6be5ab32c94f 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1657,7 +1657,7 @@ static struct scsi_host_template scsi_driver = {
.eh_timed_out = storvsc_eh_timed_out,
.slave_alloc = storvsc_device_alloc,
.slave_configure = storvsc_device_configure,
- .cmd_per_lun = 255,
+ .cmd_per_lun = 2048,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ca360daa6a25..378af306fda1 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa
* Look for the greatest clock divisor that allows an
* input speed faster than the period.
*/
- while (div-- > 0)
+ while (--div > 0)
if (kpc >= (div_10M[div] << 2)) break;
/*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a355d989b414..c7da2c185990 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4352,6 +4352,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
/* REPORT SUPPORTED OPERATION CODES is not supported */
sdev->no_report_opcodes = 1;
+ /* WRITE_SAME command is not supported */
+ sdev->no_write_same = 1;
ufshcd_set_queue_depth(sdev);
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 53f7275d6cbd..750f93197411 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -348,7 +348,7 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,
if (i == 1) {
domain->supply = devm_regulator_get(dev, "pu");
if (IS_ERR(domain->supply))
- return PTR_ERR(domain->supply);;
+ return PTR_ERR(domain->supply);
ret = imx_pgc_get_clocks(dev, domain);
if (ret)
@@ -470,13 +470,21 @@ static int imx_gpc_probe(struct platform_device *pdev)
static int imx_gpc_remove(struct platform_device *pdev)
{
+ struct device_node *pgc_node;
int ret;
+ pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
+
+ /* bail out if DT too old and doesn't provide the necessary info */
+ if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
+ !pgc_node)
+ return 0;
+
/*
* If the old DT binding is used the toplevel driver needs to
* de-register the power domains
*/
- if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) {
+ if (!pgc_node) {
of_genpd_del_provider(pdev->dev.of_node);
ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2435af56b87e..a50d7813e3ea 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -572,7 +572,7 @@ out:
}
static bool
-validate_bitmap_values(unsigned long mask)
+validate_bitmap_values(unsigned int mask)
{
return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}
@@ -596,17 +596,15 @@ __be32 nfs4_callback_recallany(void *argp, void *resp,
goto out;
status = cpu_to_be32(NFS4_OK);
- if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
- &args->craa_type_mask))
+ if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
flags = FMODE_READ;
- if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
- &args->craa_type_mask))
+ if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
flags |= FMODE_WRITE;
- if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
- &args->craa_type_mask))
- pnfs_recall_all_layouts(cps->clp);
if (flags)
nfs_expire_unused_delegation_types(cps->clp, flags);
+
+ if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
+ pnfs_recall_all_layouts(cps->clp);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 49f848fd1f04..7327930ad970 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -873,7 +873,7 @@ static void nfs3_nlm_release_call(void *data)
}
}
-const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = {
+static const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = {
.nlmclnt_alloc_call = nfs3_nlm_alloc_call,
.nlmclnt_unlock_prepare = nfs3_nlm_unlock_prepare,
.nlmclnt_release_call = nfs3_nlm_release_call,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 04612c24d394..979631411a0e 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -868,8 +868,10 @@ static int nfs4_set_client(struct nfs_server *server,
if (IS_ERR(clp))
return PTR_ERR(clp);
- if (server->nfs_client == clp)
+ if (server->nfs_client == clp) {
+ nfs_put_client(clp);
return -ELOOP;
+ }
/*
* Query for the lease time on clientid setup or renewal
@@ -1244,11 +1246,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
clp->cl_proto, clnt->cl_timeout,
clp->cl_minorversion, net);
clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
- nfs_put_client(clp);
if (error != 0) {
nfs_server_insert_lists(server);
return error;
}
+ nfs_put_client(clp);
if (server->nfs_client->cl_hostname == NULL)
server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL);
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index fd975524f460..05c66e05ae20 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -767,7 +767,7 @@ int
xfs_scrub_agfl(
struct xfs_scrub_context *sc)
{
- struct xfs_scrub_agfl_info sai = { 0 };
+ struct xfs_scrub_agfl_info sai;
struct xfs_agf *agf;
xfs_agnumber_t agno;
unsigned int agflcount;
@@ -795,6 +795,7 @@ xfs_scrub_agfl(
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
goto out;
}
+ memset(&sai, 0, sizeof(sai));
sai.sz_entries = agflcount;
sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
if (!sai.entries) {
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 3a55d6fc271b..7a39f40645f7 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -23,6 +23,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
+#include "xfs_shared.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_trans.h"
@@ -456,10 +457,12 @@ xfs_cui_recover(
* transaction. Normally, any work that needs to be deferred
* gets attached to the same defer_ops that scheduled the
* refcount update. However, we're in log recovery here, so we
- * we create our own defer_ops and use that to finish up any
- * work that doesn't fit.
+ * we use the passed in defer_ops and to finish up any work that
+ * doesn't fit. We need to reserve enough blocks to handle a
+ * full btree split on either end of the refcount range.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+ mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
if (error)
return error;
cudp = xfs_trans_get_cud(tp, cuip);
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index f3b139c9aa16..49d3124863a8 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -23,6 +23,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
+#include "xfs_shared.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_trans.h"
@@ -470,7 +471,8 @@ xfs_rui_recover(
}
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+ mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
if (error)
return error;
rudp = xfs_trans_get_rud(tp, ruip);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 7aba628dc527..93588ea3d3d2 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -250,6 +250,7 @@ xfs_parseargs(
return -EINVAL;
break;
case Opt_logdev:
+ kfree(mp->m_logname);
mp->m_logname = match_strdup(args);
if (!mp->m_logname)
return -ENOMEM;
@@ -258,6 +259,7 @@ xfs_parseargs(
xfs_warn(mp, "%s option not allowed on this system", p);
return -EINVAL;
case Opt_rtdev:
+ kfree(mp->m_rtname);
mp->m_rtname = match_strdup(args);
if (!mp->m_rtname)
return -ENOMEM;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 1c27526c499e..cf13842a6dbd 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -134,6 +134,15 @@ struct drm_crtc_commit {
* &drm_pending_vblank_event pointer to clean up private events.
*/
struct drm_pending_vblank_event *event;
+
+ /**
+ * @abort_completion:
+ *
+ * A flag that's set after drm_atomic_helper_setup_commit takes a second
+ * reference for the completion of $drm_crtc_state.event. It's used by
+ * the free code to remove the second reference if commit fails.
+ */
+ bool abort_completion;
};
struct __drm_planes_state {
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 76e237bd989b..6914633037a5 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
+bool drm_kms_helper_is_poll_worker(void);
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index d32b688eb346..d23dcdd1bd95 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -56,6 +56,7 @@ struct drm_printer;
#define DRIVER_ATOMIC 0x10000
#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
#define DRIVER_SYNCOBJ 0x40000
+#define DRIVER_PREFER_XBGR_30BPP 0x80000
/**
* struct drm_driver - DRM driver structure
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index d02a4df3f473..d3f264a5b04d 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -27,3 +27,8 @@
#if __has_feature(address_sanitizer)
#define __SANITIZE_ADDRESS__
#endif
+
+/* Clang doesn't have a way to turn it off per-function, yet. */
+#ifdef __noretpoline
+#undef __noretpoline
+#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 901c1ccb3374..e2c7f4369eff 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -93,6 +93,10 @@
#define __weak __attribute__((weak))
#define __alias(symbol) __attribute__((alias(#symbol)))
+#ifdef RETPOLINE
+#define __noretpoline __attribute__((indirect_branch("keep")))
+#endif
+
/*
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
diff --git a/include/linux/init.h b/include/linux/init.h
index 506a98151131..bc27cf03c41e 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -6,10 +6,10 @@
#include <linux/types.h>
/* Built-in __init functions needn't be compiled with retpoline */
-#if defined(RETPOLINE) && !defined(MODULE)
-#define __noretpoline __attribute__((indirect_branch("keep")))
+#if defined(__noretpoline) && !defined(MODULE)
+#define __noinitretpoline __noretpoline
#else
-#define __noretpoline
+#define __noinitretpoline
#endif
/* These macros are used to mark some functions or
@@ -47,7 +47,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold __latent_entropy __noretpoline
+#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b6a29c126cc4..2168cc6b8b30 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -151,6 +151,7 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
+extern void jump_label_invalidate_init(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -198,6 +199,8 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
+static inline void jump_label_invalidate_init(void) {}
+
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(static_key_count(key) > 0))
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ce51455e2adf..3fd291503576 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -472,6 +472,7 @@ extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
+extern int init_kernel_text(unsigned long addr);
extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ac0062b74aed..6930c63126c7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1105,7 +1105,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm)
{
}
#endif
-void kvm_arch_irq_routing_update(struct kvm *kvm);
static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
@@ -1114,6 +1113,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
+void kvm_arch_irq_routing_update(struct kvm *kvm);
+
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
@@ -1272,4 +1273,7 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+
#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f25c13423bd4..cb3bbed4e633 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -66,6 +66,11 @@ struct mutex {
#endif
};
+/*
+ * Internal helper function; C doesn't allow us to hide it :/
+ *
+ * DO NOT USE (outside of mutex code).
+ */
static inline struct task_struct *__mutex_owner(struct mutex *lock)
{
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index fbc98e2c8228..e791ebc65c9c 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -5,6 +5,7 @@
#ifndef _LINUX_NOSPEC_H
#define _LINUX_NOSPEC_H
+#include <asm/barrier.h>
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
@@ -30,26 +31,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
#endif
/*
- * Warn developers about inappropriate array_index_nospec() usage.
- *
- * Even if the CPU speculates past the WARN_ONCE branch, the
- * sign bit of @index is taken into account when generating the
- * mask.
- *
- * This warning is compiled out when the compiler can infer that
- * @index and @size are less than LONG_MAX.
- */
-#define array_index_mask_nospec_check(index, size) \
-({ \
- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
- "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
- _mask = 0; \
- else \
- _mask = array_index_mask_nospec(index, size); \
- _mask; \
-})
-
-/*
* array_index_nospec - sanitize an array index after a bounds check
*
* For a code sequence like:
@@ -67,12 +48,11 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
({ \
typeof(index) _i = (index); \
typeof(size) _s = (size); \
- unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
+ unsigned long _mask = array_index_mask_nospec(_i, _s); \
\
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
\
- _i &= _mask; \
- _i; \
+ (typeof(_i)) (_i & _mask); \
})
#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index af0f44effd44..40036a57d072 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -14,26 +14,10 @@
#include <linux/interrupt.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/sysfs.h>
#include <asm/cputype.h>
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- * interrupt and passed the address of the low level handler,
- * and can be used to implement any platform specific handling
- * before or after calling it.
- *
- * @irq_flags: if non-zero, these flags will be passed to request_irq
- * when requesting interrupts for this PMU device.
- */
-struct arm_pmu_platdata {
- irqreturn_t (*handle_irq)(int irq, void *dev,
- irq_handler_t pmu_handler);
- unsigned long irq_flags;
-};
-
#ifdef CONFIG_ARM_PMU
/*
@@ -92,7 +76,6 @@ enum armpmu_attr_groups {
struct arm_pmu {
struct pmu pmu;
- cpumask_t active_irqs;
cpumask_t supported_cpus;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
@@ -174,12 +157,11 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
+struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irqs(struct arm_pmu *armpmu);
-void armpmu_free_irqs(struct arm_pmu *armpmu);
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
+int armpmu_request_irq(int irq, int cpu);
+void armpmu_free_irq(int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4a54ef96aff5..bc0cda180c8b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -465,6 +465,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
+extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 906e90223066..c96511fa9198 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4149,7 +4149,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
*/
-#define IEEE80211_TX_STATUS_HEADROOM 14
+#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
/**
* ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebc5a2ed8631..f83cacce3308 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -78,7 +78,7 @@ struct regulatory_request {
int wiphy_idx;
enum nl80211_reg_initiator initiator;
enum nl80211_user_reg_hint_type user_reg_hint_type;
- char alpha2[2];
+ char alpha2[3];
enum nl80211_dfs_regions dfs_region;
bool intersect;
bool processed;
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index c2d1b15da136..a91f25151a5b 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -15,6 +15,7 @@
#define ARC_REG_MCIP_BCR 0x0d0
#define ARC_REG_MCIP_IDU_BCR 0x0D5
+#define ARC_REG_GFRC_BUILD 0x0D6
#define ARC_REG_MCIP_CMD 0x600
#define ARC_REG_MCIP_WDATA 0x601
#define ARC_REG_MCIP_READBACK 0x602
@@ -36,10 +37,14 @@ struct mcip_cmd {
#define CMD_SEMA_RELEASE 0x12
#define CMD_DEBUG_SET_MASK 0x34
+#define CMD_DEBUG_READ_MASK 0x35
#define CMD_DEBUG_SET_SELECT 0x36
+#define CMD_DEBUG_READ_SELECT 0x37
#define CMD_GFRC_READ_LO 0x42
#define CMD_GFRC_READ_HI 0x43
+#define CMD_GFRC_SET_CORE 0x47
+#define CMD_GFRC_READ_CORE 0x48
#define CMD_IDU_ENABLE 0x71
#define CMD_IDU_DISABLE 0x72
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 91a31ffed828..9a781f0611df 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
struct drm_virtgpu_getparam {
__u64 param;
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 3d77fe91239a..9008f31c7eb6 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -42,7 +42,7 @@ typedef enum {
SEV_RET_INVALID_PLATFORM_STATE,
SEV_RET_INVALID_GUEST_STATE,
SEV_RET_INAVLID_CONFIG,
- SEV_RET_INVALID_len,
+ SEV_RET_INVALID_LEN,
SEV_RET_ALREADY_OWNED,
SEV_RET_INVALID_CERTIFICATE,
SEV_RET_POLICY_FAILURE,
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index e46d82b91166..d5a1b8a492b9 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -69,8 +69,8 @@ struct ptrace_peeksiginfo_args {
#define PTRACE_SECCOMP_GET_METADATA 0x420d
struct seccomp_metadata {
- unsigned long filter_off; /* Input: which filter */
- unsigned int flags; /* Output: filter's flags */
+ __u64 filter_off; /* Input: which filter */
+ __u64 flags; /* Output: filter's flags */
};
/* Read signals from a shared (process wide) queue */
diff --git a/init/main.c b/init/main.c
index a8100b954839..969eaf140ef0 100644
--- a/init/main.c
+++ b/init/main.c
@@ -89,6 +89,7 @@
#include <linux/io.h>
#include <linux/cache.h>
#include <linux/rodata_test.h>
+#include <linux/jump_label.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -1000,6 +1001,7 @@ static int __ref kernel_init(void *unused)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
ftrace_free_init_mem();
+ jump_label_invalidate_init();
free_initmem();
mark_readonly();
system_state = SYSTEM_RUNNING;
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index b1f66480135b..14750e7c5ee4 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
{
int i;
- for (i = 0; i < array->map.max_entries; i++)
+ for (i = 0; i < array->map.max_entries; i++) {
free_percpu(array->pptrs[i]);
+ cond_resched();
+ }
}
static int bpf_array_alloc_percpu(struct bpf_array *array)
@@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
return -ENOMEM;
}
array->pptrs[i] = ptr;
+ cond_resched();
}
return 0;
@@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
- int numa_node = bpf_map_attr_numa_node(attr);
+ int ret, numa_node = bpf_map_attr_numa_node(attr);
u32 elem_size, index_mask, max_entries;
bool unpriv = !capable(CAP_SYS_ADMIN);
+ u64 cost, array_size, mask64;
struct bpf_array *array;
- u64 array_size, mask64;
elem_size = round_up(attr->value_size, 8);
@@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array_size += (u64) max_entries * elem_size;
/* make sure there is no u32 overflow later in round_up() */
- if (array_size >= U32_MAX - PAGE_SIZE)
+ cost = array_size;
+ if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
+ if (percpu) {
+ cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
+ if (cost >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-ENOMEM);
+ }
+ cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ ret = bpf_map_precharge_memlock(cost);
+ if (ret < 0)
+ return ERR_PTR(ret);
/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
@@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
+ array->map.pages = cost;
array->elem_size = elem_size;
- if (!percpu)
- goto out;
-
- array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
-
- if (array_size >= U32_MAX - PAGE_SIZE ||
- bpf_array_alloc_percpu(array)) {
+ if (percpu && bpf_array_alloc_percpu(array)) {
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
-out:
- array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
return &array->map;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 29ca9208dcfa..d315b393abdd 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
* so always copy 'cnt' prog_ids to the user.
* In a rare race the user will see zero prog_ids
*/
- ids = kcalloc(cnt, sizeof(u32), GFP_USER);
+ ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
if (!ids)
return -ENOMEM;
rcu_read_lock();
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index fbfdada6caee..a4bb0b34375a 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
int map_id)
{
- gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct bpf_cpu_map_entry *rcpu;
int numa, err;
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 7b469d10d0e9..b4b5b81e7251 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
struct lpm_trie_node __rcu **slot;
struct lpm_trie_node *node;
- raw_spin_lock(&trie->lock);
+ /* Wait for outstanding programs to complete
+ * update/lookup/delete/get_next_key and free the trie.
+ */
+ synchronize_rcu();
/* Always start at the root and walk down to a node that has no
* children. Then free that node, nullify its reference in the parent
@@ -566,10 +569,9 @@ static void trie_free(struct bpf_map *map)
slot = &trie->root;
for (;;) {
- node = rcu_dereference_protected(*slot,
- lockdep_is_held(&trie->lock));
+ node = rcu_dereference_protected(*slot, 1);
if (!node)
- goto unlock;
+ goto out;
if (rcu_access_pointer(node->child[0])) {
slot = &node->child[0];
@@ -587,8 +589,8 @@ static void trie_free(struct bpf_map *map)
}
}
-unlock:
- raw_spin_unlock(&trie->lock);
+out:
+ kfree(trie);
}
static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 48c33417d13c..a927e89dad6e 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
struct bpf_stab *stab;
- int err = -EINVAL;
u64 cost;
+ int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
/* make sure page count doesn't overflow */
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
+ err = -EINVAL;
if (cost >= U32_MAX - PAGE_SIZE)
goto free_stab;
diff --git a/kernel/extable.c b/kernel/extable.c
index a17fdb63dc3e..6a5b61ebc66c 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -64,7 +64,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
return e;
}
-static inline int init_kernel_text(unsigned long addr)
+int init_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_sinittext &&
addr < (unsigned long)_einittext)
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5187dfe809ac..4c5770407031 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -16,6 +16,7 @@ struct cpumap {
unsigned int available;
unsigned int allocated;
unsigned int managed;
+ bool initialized;
bool online;
unsigned long alloc_map[IRQ_MATRIX_SIZE];
unsigned long managed_map[IRQ_MATRIX_SIZE];
@@ -81,9 +82,11 @@ void irq_matrix_online(struct irq_matrix *m)
BUG_ON(cm->online);
- bitmap_zero(cm->alloc_map, m->matrix_bits);
- cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
- cm->allocated = 0;
+ if (!cm->initialized) {
+ cm->available = m->alloc_size;
+ cm->available -= cm->managed + m->systembits_inalloc;
+ cm->initialized = true;
+ }
m->global_available += cm->available;
cm->online = true;
m->online_maps++;
@@ -370,14 +373,16 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
return;
- if (cm->online) {
- clear_bit(bit, cm->alloc_map);
- cm->allocated--;
+ clear_bit(bit, cm->alloc_map);
+ cm->allocated--;
+
+ if (cm->online)
m->total_allocated--;
- if (!managed) {
- cm->available++;
+
+ if (!managed) {
+ cm->available++;
+ if (cm->online)
m->global_available++;
- }
}
trace_irq_matrix_free(bit, cpu, m, cm);
}
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index b4517095db6a..52a0a7af8640 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -366,12 +366,15 @@ static void __jump_label_update(struct static_key *key,
{
for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
/*
- * entry->code set to 0 invalidates module init text sections
- * kernel_text_address() verifies we are not in core kernel
- * init code, see jump_label_invalidate_module_init().
+ * An entry->code of 0 indicates an entry which has been
+ * disabled because it was in an init text area.
*/
- if (entry->code && kernel_text_address(entry->code))
- arch_jump_label_transform(entry, jump_label_type(entry));
+ if (entry->code) {
+ if (kernel_text_address(entry->code))
+ arch_jump_label_transform(entry, jump_label_type(entry));
+ else
+ WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code);
+ }
}
}
@@ -417,6 +420,19 @@ void __init jump_label_init(void)
cpus_read_unlock();
}
+/* Disable any jump label entries in __init code */
+void __init jump_label_invalidate_init(void)
+{
+ struct jump_entry *iter_start = __start___jump_table;
+ struct jump_entry *iter_stop = __stop___jump_table;
+ struct jump_entry *iter;
+
+ for (iter = iter_start; iter < iter_stop; iter++) {
+ if (init_kernel_text(iter->code))
+ iter->code = 0;
+ }
+}
+
#ifdef CONFIG_MODULES
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
@@ -633,6 +649,7 @@ static void jump_label_del_module(struct module *mod)
}
}
+/* Disable any jump label entries in module init code */
static void jump_label_invalidate_module_init(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index fc1123583fa6..f274fbef821d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2397,7 +2397,7 @@ skip:
if (console_lock_spinning_disable_and_check()) {
printk_safe_exit_irqrestore(flags);
- return;
+ goto out;
}
printk_safe_exit_irqrestore(flags);
@@ -2430,6 +2430,7 @@ skip:
if (retry && console_trylock())
goto again;
+out:
if (wake_klogd)
wake_up_klogd();
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 940fa408a288..dc77548167ef 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -1076,14 +1076,16 @@ long seccomp_get_metadata(struct task_struct *task,
size = min_t(unsigned long, size, sizeof(kmd));
- if (copy_from_user(&kmd, data, size))
+ if (size < sizeof(kmd.filter_off))
+ return -EINVAL;
+
+ if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
return -EFAULT;
filter = get_nth_filter(task, kmd.filter_off);
if (IS_ERR(filter))
return PTR_ERR(filter);
- memset(&kmd, 0, sizeof(kmd));
if (filter->log)
kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index fc2838ac8b78..c0a9e310d715 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
return -EINVAL;
if (copy_from_user(&query, uquery, sizeof(query)))
return -EFAULT;
+ if (query.ids_len > BPF_TRACE_MAX_PROGS)
+ return -E2BIG;
mutex_lock(&bpf_event_mutex);
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 017044c26233..bb9a519cbf50 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4180,6 +4180,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
/**
+ * current_work - retrieve %current task's work struct
+ *
+ * Determine if %current task is a workqueue worker and what it's working on.
+ * Useful to find out the context that the %current task is running in.
+ *
+ * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
+ */
+struct work_struct *current_work(void)
+{
+ struct worker *worker = current_wq_worker();
+
+ return worker ? worker->current_work : NULL;
+}
+EXPORT_SYMBOL(current_work);
+
+/**
* current_is_workqueue_rescuer - is %current workqueue rescuer?
*
* Determine whether %current is a workqueue rescuer. Can be used from
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 1b34d210452c..7f5cdc1e6b29 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1491,12 +1491,12 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
if (unlikely(virt == NULL))
return;
- entry = dma_entry_alloc();
- if (!entry)
+ /* handle vmalloc and linear addresses */
+ if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
return;
- /* handle vmalloc and linear addresses */
- if (!is_vmalloc_addr(virt) && !virt_to_page(virt))
+ entry = dma_entry_alloc();
+ if (!entry)
return;
entry->type = dma_debug_coherent;
@@ -1528,7 +1528,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
};
/* handle vmalloc and linear addresses */
- if (!is_vmalloc_addr(virt) && !virt_to_page(virt))
+ if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
return;
if (is_vmalloc_addr(virt))
diff --git a/lib/idr.c b/lib/idr.c
index 99ec5bc89d25..823b813f08f8 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -36,8 +36,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
{
struct radix_tree_iter iter;
void __rcu **slot;
- int base = idr->idr_base;
- int id = *nextid;
+ unsigned int base = idr->idr_base;
+ unsigned int id = *nextid;
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return -EINVAL;
@@ -204,10 +204,11 @@ int idr_for_each(const struct idr *idr,
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
int ret;
+ unsigned long id = iter.index + base;
- if (WARN_ON_ONCE(iter.index > INT_MAX))
+ if (WARN_ON_ONCE(id > INT_MAX))
break;
- ret = fn(iter.index + base, rcu_dereference_raw(*slot), data);
+ ret = fn(id, rcu_dereference_raw(*slot), data);
if (ret)
return ret;
}
@@ -230,8 +231,8 @@ void *idr_get_next(struct idr *idr, int *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
- int base = idr->idr_base;
- int id = *nextid;
+ unsigned long base = idr->idr_base;
+ unsigned long id = *nextid;
id = (id < base) ? 0 : id - base;
slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 77ee6ced11b1..d7a708f82559 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1849,7 +1849,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
{
const int default_width = 2 * sizeof(void *);
- if (!ptr && *fmt != 'K') {
+ if (!ptr && *fmt != 'K' && *fmt != 'x') {
/*
* Print (null) with the same width as a pointer so it makes
* tabular output look nice.
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 279527f8b1fe..ce7152a12bd8 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -187,17 +187,17 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) {
- pr_info("wrong size: %d against expected %d, rounded to %zd\n",
- em->match_size, expected_length,
- EBT_ALIGN(expected_length));
+ pr_err_ratelimited("wrong size: %d against expected %d, rounded to %zd\n",
+ em->match_size, expected_length,
+ EBT_ALIGN(expected_length));
return -EINVAL;
}
if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
- pr_info("dst integrity fail: %x\n", -err);
+ pr_err_ratelimited("dst integrity fail: %x\n", -err);
return -EINVAL;
}
if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
- pr_info("src integrity fail: %x\n", -err);
+ pr_err_ratelimited("src integrity fail: %x\n", -err);
return -EINVAL;
}
return 0;
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index 61a9f1be1263..165b9d678cf1 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -72,8 +72,8 @@ static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
/* Check for overflow. */
if (info->burst == 0 ||
user2credits(info->avg * info->burst) < user2credits(info->avg)) {
- pr_info("overflow, try lower: %u/%u\n",
- info->avg, info->burst);
+ pr_info_ratelimited("overflow, try lower: %u/%u\n",
+ info->avg, info->burst);
return -EINVAL;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 08ab4c65a998..0c121adbdbaa 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3381,17 +3381,13 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
struct sock *sk = bpf_sock->sk;
int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
- if (!sk_fullsock(sk))
+ if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
return -EINVAL;
-#ifdef CONFIG_INET
if (val)
tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
-#else
- return -EINVAL;
-#endif
}
static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 0a3f88f08727..98fd12721221 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,6 +66,7 @@ struct net_rate_estimator {
static void est_fetch_counters(struct net_rate_estimator *e,
struct gnet_stats_basic_packed *b)
{
+ memset(b, 0, sizeof(*b));
if (e->stats_lock)
spin_lock(e->stats_lock);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 008be04ac1cc..9c41a0cef1a5 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1567,10 +1567,7 @@ int ip_getsockopt(struct sock *sk, int level,
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
- err = nf_getsockopt(sk, PF_INET, optname, optval,
- &len);
- release_sock(sk);
+ err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
return err;
@@ -1602,9 +1599,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
- release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 4ffe302f9b82..e3e420f3ba7b 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -252,6 +252,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
}
if (table_base + v
!= arpt_next_entry(e)) {
+ if (unlikely(stackidx >= private->stacksize)) {
+ verdict = NF_DROP;
+ break;
+ }
jumpstack[stackidx++] = e;
}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 9a71f3149507..e38395a8dcf2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -330,8 +330,13 @@ ipt_do_table(struct sk_buff *skb,
continue;
}
if (table_base + v != ipt_next_entry(e) &&
- !(e->ip.flags & IPT_F_GOTO))
+ !(e->ip.flags & IPT_F_GOTO)) {
+ if (unlikely(stackidx >= private->stacksize)) {
+ verdict = NF_DROP;
+ break;
+ }
jumpstack[stackidx++] = e;
+ }
e = get_entry(table_base, v);
continue;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 3a84a60f6b39..4b02ab39ebc5 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
local_bh_disable();
if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
- list_del_rcu(&c->list);
- spin_unlock(&cn->lock);
- local_bh_enable();
-
- unregister_netdevice_notifier(&c->notifier);
-
/* In case anyone still accesses the file, the open/close
* functions are also incrementing the refcount on their own,
* so it's safe to remove the entry even if it's in use. */
@@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
if (cn->procdir)
proc_remove(c->pde);
#endif
+ list_del_rcu(&c->list);
+ spin_unlock(&cn->lock);
+ local_bh_enable();
+
+ unregister_netdevice_notifier(&c->notifier);
+
return;
}
local_bh_enable();
@@ -154,8 +154,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
#endif
if (unlikely(!refcount_inc_not_zero(&c->refcount)))
c = NULL;
- else if (entry)
- refcount_inc(&c->entries);
+ else if (entry) {
+ if (unlikely(!refcount_inc_not_zero(&c->entries))) {
+ clusterip_config_put(c);
+ c = NULL;
+ }
+ }
}
rcu_read_unlock_bh();
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 270765236f5e..aaaf9a81fbc9 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -98,17 +98,15 @@ static int ecn_tg_check(const struct xt_tgchk_param *par)
const struct ipt_ECN_info *einfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
- if (einfo->operation & IPT_ECN_OP_MASK) {
- pr_info("unsupported ECN operation %x\n", einfo->operation);
+ if (einfo->operation & IPT_ECN_OP_MASK)
return -EINVAL;
- }
- if (einfo->ip_ect & ~IPT_ECN_IP_MASK) {
- pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect);
+
+ if (einfo->ip_ect & ~IPT_ECN_IP_MASK)
return -EINVAL;
- }
+
if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) &&
(e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
- pr_info("cannot use TCP operations on a non-tcp rule\n");
+ pr_info_ratelimited("cannot use operation on non-tcp rule\n");
return -EINVAL;
}
return 0;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 8bd0d7b26632..e8bed3390e58 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -74,13 +74,13 @@ static int reject_tg_check(const struct xt_tgchk_param *par)
const struct ipt_entry *e = par->entryinfo;
if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
- pr_info("ECHOREPLY no longer supported.\n");
+ pr_info_ratelimited("ECHOREPLY no longer supported.\n");
return -EINVAL;
} else if (rejinfo->with == IPT_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (e->ip.proto != IPPROTO_TCP ||
(e->ip.invflags & XT_INV_PROTO)) {
- pr_info("TCP_RESET invalid for non-tcp\n");
+ pr_info_ratelimited("TCP_RESET invalid for non-tcp\n");
return -EINVAL;
}
}
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 37fb9552e858..fd01f13c896a 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -105,14 +105,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par)
const struct xt_rpfilter_info *info = par->matchinfo;
unsigned int options = ~XT_RPFILTER_OPTION_MASK;
if (info->flags & options) {
- pr_info("unknown options encountered");
+ pr_info_ratelimited("unknown options\n");
return -EINVAL;
}
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "raw") != 0) {
- pr_info("match only valid in the \'raw\' "
- "or \'mangle\' tables, not \'%s\'.\n", par->table);
+ pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
+ par->table);
return -EINVAL;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 49cc1c1df1ba..a4f44d815a61 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1826,6 +1826,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
return skb_get_hash_raw(skb) >> 1;
memset(&hash_keys, 0, sizeof(hash_keys));
skb_flow_dissect_flow_keys(skb, &keys, flag);
+
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
hash_keys.ports.src = keys.ports.src;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b2bca373f8be..6818042cd8a9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
*/
segs = max_t(u32, bytes / mss_now, min_tso_segs);
- return min_t(u32, segs, sk->sk_gso_max_segs);
+ return segs;
}
EXPORT_SYMBOL(tcp_tso_autosize);
@@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
- return tso_segs ? :
- tcp_tso_autosize(sk, mss_now,
- sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
+ if (!tso_segs)
+ tso_segs = tcp_tso_autosize(sk, mss_now,
+ sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
+ return min_t(u32, tso_segs, sk->sk_gso_max_segs);
}
/* Returns the portion of skb which can be sent right away */
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d78d41fc4b1a..24535169663d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -1367,10 +1367,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
- err = nf_getsockopt(sk, PF_INET6, optname, optval,
- &len);
- release_sock(sk);
+ err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
}
@@ -1409,10 +1406,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
- err = compat_nf_getsockopt(sk, PF_INET6,
- optname, optval, &len);
- release_sock(sk);
+ err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index af4c917e0836..62358b93bbac 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -352,6 +352,10 @@ ip6t_do_table(struct sk_buff *skb,
}
if (table_base + v != ip6t_next_entry(e) &&
!(e->ipv6.flags & IP6T_F_GOTO)) {
+ if (unlikely(stackidx >= private->stacksize)) {
+ verdict = NF_DROP;
+ break;
+ }
jumpstack[stackidx++] = e;
}
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index fa51a205918d..38dea8ff680f 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -85,14 +85,14 @@ static int reject_tg6_check(const struct xt_tgchk_param *par)
const struct ip6t_entry *e = par->entryinfo;
if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
- pr_info("ECHOREPLY is not supported.\n");
+ pr_info_ratelimited("ECHOREPLY is not supported\n");
return -EINVAL;
} else if (rejinfo->with == IP6T_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (!(e->ipv6.flags & IP6T_F_PROTO) ||
e->ipv6.proto != IPPROTO_TCP ||
(e->ipv6.invflags & XT_INV_PROTO)) {
- pr_info("TCP_RESET illegal for non-tcp\n");
+ pr_info_ratelimited("TCP_RESET illegal for non-tcp\n");
return -EINVAL;
}
}
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index b12e61b7b16c..94deb69bbbda 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -103,14 +103,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par)
unsigned int options = ~XT_RPFILTER_OPTION_MASK;
if (info->flags & options) {
- pr_info("unknown options encountered");
+ pr_info_ratelimited("unknown options\n");
return -EINVAL;
}
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "raw") != 0) {
- pr_info("match only valid in the \'raw\' "
- "or \'mangle\' tables, not \'%s\'.\n", par->table);
+ pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
+ par->table);
return -EINVAL;
}
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
index 9642164107ce..33719d5560c8 100644
--- a/net/ipv6/netfilter/ip6t_srh.c
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -122,12 +122,14 @@ static int srh_mt6_check(const struct xt_mtchk_param *par)
const struct ip6t_srh *srhinfo = par->matchinfo;
if (srhinfo->mt_flags & ~IP6T_SRH_MASK) {
- pr_err("unknown srh match flags %X\n", srhinfo->mt_flags);
+ pr_info_ratelimited("unknown srh match flags %X\n",
+ srhinfo->mt_flags);
return -EINVAL;
}
if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) {
- pr_err("unknown srh invflags %X\n", srhinfo->mt_invflags);
+ pr_info_ratelimited("unknown srh invflags %X\n",
+ srhinfo->mt_invflags);
return -EINVAL;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3873d3877135..3a1775a62973 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -182,7 +182,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel *t = netdev_priv(dev);
- if (t->dev == sitn->fb_tunnel_dev) {
+ if (dev == sitn->fb_tunnel_dev) {
ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
t->ip6rd.relay_prefix = 0;
t->ip6rd.prefixlen = 16;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a8b1616cec41..1f3188d03840 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -8,6 +8,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -304,9 +305,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
* driver so reject the timeout update.
*/
status = WLAN_STATUS_REQUEST_DECLINED;
- ieee80211_send_addba_resp(sta->sdata, sta->sta.addr,
- tid, dialog_token, status,
- 1, buf_size, timeout);
goto end;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 46028e12e216..f4195a0f0279 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2892,7 +2892,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
}
if (beacon->probe_resp_len) {
new_beacon->probe_resp_len = beacon->probe_resp_len;
- beacon->probe_resp = pos;
+ new_beacon->probe_resp = pos;
memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
pos += beacon->probe_resp_len;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 26900025de2f..ae9c33cd8ada 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1467,7 +1467,7 @@ struct ieee802_11_elems {
const struct ieee80211_timeout_interval_ie *timeout_int;
const u8 *opmode_notif;
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
- const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
+ struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie;
/* length of them, respectively */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 73ac607beb5d..6a381cbe1e33 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1255,13 +1255,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
}
static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt, size_t len)
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee802_11_elems *elems)
{
struct ieee80211_mgmt *mgmt_fwd;
struct sk_buff *skb;
struct ieee80211_local *local = sdata->local;
- u8 *pos = mgmt->u.action.u.chan_switch.variable;
- size_t offset_ttl;
skb = dev_alloc_skb(local->tx_headroom + len);
if (!skb)
@@ -1269,13 +1268,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
skb_reserve(skb, local->tx_headroom);
mgmt_fwd = skb_put(skb, len);
- /* offset_ttl is based on whether the secondary channel
- * offset is available or not. Subtract 1 from the mesh TTL
- * and disable the initiator flag before forwarding.
- */
- offset_ttl = (len < 42) ? 7 : 10;
- *(pos + offset_ttl) -= 1;
- *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
+ elems->mesh_chansw_params_ie->mesh_ttl--;
+ elems->mesh_chansw_params_ie->mesh_flags &=
+ ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
memcpy(mgmt_fwd, mgmt, len);
eth_broadcast_addr(mgmt_fwd->da);
@@ -1323,7 +1318,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
/* forward or re-broadcast the CSA frame */
if (fwd_csa) {
- if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0)
+ if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0)
mcsa_dbg(sdata, "Failed to forward the CSA frame");
}
}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index ee0181778a42..029334835747 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -8,6 +8,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2008, Intel Corporation
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
u32 sta_flags, u8 *bssid,
struct ieee80211_csa_ie *csa_ie)
{
- enum nl80211_band new_band;
+ enum nl80211_band new_band = current_band;
int new_freq;
u8 new_chan_no;
struct ieee80211_channel *new_chan;
@@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
elems->ext_chansw_ie->new_operating_class,
&new_band)) {
sdata_info(sdata,
- "cannot understand ECSA IE operating class %d, disconnecting\n",
+ "cannot understand ECSA IE operating class, %d, ignoring\n",
elems->ext_chansw_ie->new_operating_class);
- return -EINVAL;
}
new_chan_no = elems->ext_chansw_ie->new_ch_num;
csa_ie->count = elems->ext_chansw_ie->count;
csa_ie->mode = elems->ext_chansw_ie->mode;
} else if (elems->ch_switch_ie) {
- new_band = current_band;
new_chan_no = elems->ch_switch_ie->new_ch_num;
csa_ie->count = elems->ch_switch_ie->count;
csa_ie->mode = elems->ch_switch_ie->mode;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 0c5627f8a104..af0b608ee8ed 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (ieee80211_hw_check(hw, USES_RSS)) {
sta->pcpu_rx_stats =
- alloc_percpu(struct ieee80211_sta_rx_stats);
+ alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
if (!sta->pcpu_rx_stats)
goto free;
}
@@ -433,6 +433,7 @@ free_txq:
if (sta->sta.txq[0])
kfree(to_txq_info(sta->sta.txq[0]));
free:
+ free_percpu(sta->pcpu_rx_stats);
#ifdef CONFIG_MAC80211_MESH
kfree(sta->mesh);
#endif
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
index fbce552a796e..7d7466dbf663 100644
--- a/net/netfilter/nf_nat_proto_common.c
+++ b/net/netfilter/nf_nat_proto_common.c
@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
const struct nf_conn *ct,
u16 *rover)
{
- unsigned int range_size, min, i;
+ unsigned int range_size, min, max, i;
__be16 *portptr;
u_int16_t off;
@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
}
} else {
min = ntohs(range->min_proto.all);
- range_size = ntohs(range->max_proto.all) - min + 1;
+ max = ntohs(range->max_proto.all);
+ if (unlikely(max < min))
+ swap(max, min);
+ range_size = max - min + 1;
}
if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 2f685ee1f9c8..fa1655aff8d3 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -434,36 +434,35 @@ int xt_check_match(struct xt_mtchk_param *par,
* ebt_among is exempt from centralized matchsize checking
* because it uses a dynamic-size data set.
*/
- pr_err("%s_tables: %s.%u match: invalid size "
- "%u (kernel) != (user) %u\n",
- xt_prefix[par->family], par->match->name,
- par->match->revision,
- XT_ALIGN(par->match->matchsize), size);
+ pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
+ xt_prefix[par->family], par->match->name,
+ par->match->revision,
+ XT_ALIGN(par->match->matchsize), size);
return -EINVAL;
}
if (par->match->table != NULL &&
strcmp(par->match->table, par->table) != 0) {
- pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
- xt_prefix[par->family], par->match->name,
- par->match->table, par->table);
+ pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
+ xt_prefix[par->family], par->match->name,
+ par->match->table, par->table);
return -EINVAL;
}
if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
char used[64], allow[64];
- pr_err("%s_tables: %s match: used from hooks %s, but only "
- "valid from %s\n",
- xt_prefix[par->family], par->match->name,
- textify_hooks(used, sizeof(used), par->hook_mask,
- par->family),
- textify_hooks(allow, sizeof(allow), par->match->hooks,
- par->family));
+ pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
+ xt_prefix[par->family], par->match->name,
+ textify_hooks(used, sizeof(used),
+ par->hook_mask, par->family),
+ textify_hooks(allow, sizeof(allow),
+ par->match->hooks,
+ par->family));
return -EINVAL;
}
if (par->match->proto && (par->match->proto != proto || inv_proto)) {
- pr_err("%s_tables: %s match: only valid for protocol %u\n",
- xt_prefix[par->family], par->match->name,
- par->match->proto);
+ pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
+ xt_prefix[par->family], par->match->name,
+ par->match->proto);
return -EINVAL;
}
if (par->match->checkentry != NULL) {
@@ -814,36 +813,35 @@ int xt_check_target(struct xt_tgchk_param *par,
int ret;
if (XT_ALIGN(par->target->targetsize) != size) {
- pr_err("%s_tables: %s.%u target: invalid size "
- "%u (kernel) != (user) %u\n",
- xt_prefix[par->family], par->target->name,
- par->target->revision,
- XT_ALIGN(par->target->targetsize), size);
+ pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
+ xt_prefix[par->family], par->target->name,
+ par->target->revision,
+ XT_ALIGN(par->target->targetsize), size);
return -EINVAL;
}
if (par->target->table != NULL &&
strcmp(par->target->table, par->table) != 0) {
- pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
- xt_prefix[par->family], par->target->name,
- par->target->table, par->table);
+ pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
+ xt_prefix[par->family], par->target->name,
+ par->target->table, par->table);
return -EINVAL;
}
if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
char used[64], allow[64];
- pr_err("%s_tables: %s target: used from hooks %s, but only "
- "usable from %s\n",
- xt_prefix[par->family], par->target->name,
- textify_hooks(used, sizeof(used), par->hook_mask,
- par->family),
- textify_hooks(allow, sizeof(allow), par->target->hooks,
- par->family));
+ pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
+ xt_prefix[par->family], par->target->name,
+ textify_hooks(used, sizeof(used),
+ par->hook_mask, par->family),
+ textify_hooks(allow, sizeof(allow),
+ par->target->hooks,
+ par->family));
return -EINVAL;
}
if (par->target->proto && (par->target->proto != proto || inv_proto)) {
- pr_err("%s_tables: %s target: only valid for protocol %u\n",
- xt_prefix[par->family], par->target->name,
- par->target->proto);
+ pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
+ xt_prefix[par->family], par->target->name,
+ par->target->proto);
return -EINVAL;
}
if (par->target->checkentry != NULL) {
@@ -1004,10 +1002,6 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
if (sz < sizeof(*info))
return NULL;
- /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
- if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
- return NULL;
-
/* __GFP_NORETRY is not fully supported by kvmalloc but it should
* work reasonably well if sz is too large and bail out rather
* than shoot all processes down before realizing there is nothing
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index c502419d6306..f368ee6741db 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -120,8 +120,8 @@ static int audit_tg_check(const struct xt_tgchk_param *par)
const struct xt_audit_info *info = par->targinfo;
if (info->type > XT_AUDIT_TYPE_MAX) {
- pr_info("Audit type out of range (valid range: 0..%hhu)\n",
- XT_AUDIT_TYPE_MAX);
+ pr_info_ratelimited("Audit type out of range (valid range: 0..%hhu)\n",
+ XT_AUDIT_TYPE_MAX);
return -ERANGE;
}
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
index 0f642ef8cd26..9f4151ec3e06 100644
--- a/net/netfilter/xt_CHECKSUM.c
+++ b/net/netfilter/xt_CHECKSUM.c
@@ -36,13 +36,13 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
const struct xt_CHECKSUM_info *einfo = par->targinfo;
if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
- pr_info("unsupported CHECKSUM operation %x\n", einfo->operation);
+ pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
+ einfo->operation);
return -EINVAL;
}
- if (!einfo->operation) {
- pr_info("no CHECKSUM operation enabled\n");
+ if (!einfo->operation)
return -EINVAL;
- }
+
return 0;
}
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index da56c06a443c..f3f1caac949b 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -91,8 +91,8 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par)
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "security") != 0) {
- pr_info("target only valid in the \'mangle\' "
- "or \'security\' tables, not \'%s\'.\n", par->table);
+ pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
+ par->table);
return -EINVAL;
}
@@ -102,14 +102,14 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par)
break;
default:
- pr_info("invalid mode: %hu\n", info->mode);
+ pr_info_ratelimited("invalid mode: %hu\n", info->mode);
return -EINVAL;
}
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
return ret;
}
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 5a152e2acfd5..8790190c6feb 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -82,15 +82,14 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
proto = xt_ct_find_proto(par);
if (!proto) {
- pr_info("You must specify a L4 protocol, and not use "
- "inversions on it.\n");
+ pr_info_ratelimited("You must specify a L4 protocol and not use inversions on it\n");
return -ENOENT;
}
helper = nf_conntrack_helper_try_module_get(helper_name, par->family,
proto);
if (helper == NULL) {
- pr_info("No such helper \"%s\"\n", helper_name);
+ pr_info_ratelimited("No such helper \"%s\"\n", helper_name);
return -ENOENT;
}
@@ -124,6 +123,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
const struct nf_conntrack_l4proto *l4proto;
struct ctnl_timeout *timeout;
struct nf_conn_timeout *timeout_ext;
+ const char *errmsg = NULL;
int ret = 0;
u8 proto;
@@ -131,29 +131,29 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
if (timeout_find_get == NULL) {
ret = -ENOENT;
- pr_info("Timeout policy base is empty\n");
+ errmsg = "Timeout policy base is empty";
goto out;
}
proto = xt_ct_find_proto(par);
if (!proto) {
ret = -EINVAL;
- pr_info("You must specify a L4 protocol, and not use "
- "inversions on it.\n");
+ errmsg = "You must specify a L4 protocol and not use inversions on it";
goto out;
}
timeout = timeout_find_get(par->net, timeout_name);
if (timeout == NULL) {
ret = -ENOENT;
- pr_info("No such timeout policy \"%s\"\n", timeout_name);
+ pr_info_ratelimited("No such timeout policy \"%s\"\n",
+ timeout_name);
goto out;
}
if (timeout->l3num != par->family) {
ret = -EINVAL;
- pr_info("Timeout policy `%s' can only be used by L3 protocol "
- "number %d\n", timeout_name, timeout->l3num);
+ pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
+ timeout_name, 3, timeout->l3num);
goto err_put_timeout;
}
/* Make sure the timeout policy matches any existing protocol tracker,
@@ -162,9 +162,8 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
l4proto = __nf_ct_l4proto_find(par->family, proto);
if (timeout->l4proto->l4proto != l4proto->l4proto) {
ret = -EINVAL;
- pr_info("Timeout policy `%s' can only be used by L4 protocol "
- "number %d\n",
- timeout_name, timeout->l4proto->l4proto);
+ pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
+ timeout_name, 4, timeout->l4proto->l4proto);
goto err_put_timeout;
}
timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
@@ -180,6 +179,8 @@ err_put_timeout:
__xt_ct_tg_timeout_put(timeout);
out:
rcu_read_unlock();
+ if (errmsg)
+ pr_info_ratelimited("%s\n", errmsg);
return ret;
#else
return -EOPNOTSUPP;
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index 3f83d38c4e5b..098ed851b7a7 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -66,10 +66,8 @@ static int dscp_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_DSCP_info *info = par->targinfo;
- if (info->dscp > XT_DSCP_MAX) {
- pr_info("dscp %x out of range\n", info->dscp);
+ if (info->dscp > XT_DSCP_MAX)
return -EDOM;
- }
return 0;
}
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c
index 1535e87ed9bd..4653b071bed4 100644
--- a/net/netfilter/xt_HL.c
+++ b/net/netfilter/xt_HL.c
@@ -105,10 +105,8 @@ static int ttl_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_TTL_info *info = par->targinfo;
- if (info->mode > IPT_TTL_MAXMODE) {
- pr_info("TTL: invalid or unknown mode %u\n", info->mode);
+ if (info->mode > IPT_TTL_MAXMODE)
return -EINVAL;
- }
if (info->mode != IPT_TTL_SET && info->ttl == 0)
return -EINVAL;
return 0;
@@ -118,15 +116,10 @@ static int hl_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_HL_info *info = par->targinfo;
- if (info->mode > IP6T_HL_MAXMODE) {
- pr_info("invalid or unknown mode %u\n", info->mode);
+ if (info->mode > IP6T_HL_MAXMODE)
return -EINVAL;
- }
- if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
- pr_info("increment/decrement does not "
- "make sense with value 0\n");
+ if (info->mode != IP6T_HL_SET && info->hop_limit == 0)
return -EINVAL;
- }
return 0;
}
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 60e6dbe12460..9c75f419cd80 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -9,6 +9,8 @@
* the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
@@ -312,29 +314,30 @@ hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
static int hmark_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_hmark_info *info = par->targinfo;
+ const char *errmsg = "proto mask must be zero with L3 mode";
- if (!info->hmodulus) {
- pr_info("xt_HMARK: hash modulus can't be zero\n");
+ if (!info->hmodulus)
return -EINVAL;
- }
+
if (info->proto_mask &&
- (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) {
- pr_info("xt_HMARK: proto mask must be zero with L3 mode\n");
- return -EINVAL;
- }
+ (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)))
+ goto err;
+
if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
(info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
- XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) {
- pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n");
+ XT_HMARK_FLAG(XT_HMARK_DPORT_MASK))))
return -EINVAL;
- }
+
if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
(info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
- pr_info("xt_HMARK: spi-set and port-set can't be combined\n");
- return -EINVAL;
+ errmsg = "spi-set and port-set can't be combined";
+ goto err;
}
return 0;
+err:
+ pr_info_ratelimited("%s\n", errmsg);
+ return -EINVAL;
}
static struct xt_target hmark_tg_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 6c2482b709b1..1ac6600bfafd 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
info->timer->refcnt = 1;
+ INIT_WORK(&info->timer->work, idletimer_tg_work);
+
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
- INIT_WORK(&info->timer->work, idletimer_tg_work);
-
return 0;
out_free_attr:
@@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
pr_debug("timeout value is zero\n");
return -EINVAL;
}
-
+ if (info->timeout >= INT_MAX / 1000) {
+ pr_debug("timeout value is too big\n");
+ return -EINVAL;
+ }
if (info->label[0] == '\0' ||
strnlen(info->label,
MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 1dcad893df78..19846445504d 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -111,10 +111,8 @@ static int led_tg_check(const struct xt_tgchk_param *par)
struct xt_led_info_internal *ledinternal;
int err;
- if (ledinfo->id[0] == '\0') {
- pr_info("No 'id' parameter given.\n");
+ if (ledinfo->id[0] == '\0')
return -EINVAL;
- }
mutex_lock(&xt_led_mutex);
@@ -138,13 +136,14 @@ static int led_tg_check(const struct xt_tgchk_param *par)
err = led_trigger_register(&ledinternal->netfilter_led_trigger);
if (err) {
- pr_err("Trigger name is already in use.\n");
+ pr_info_ratelimited("Trigger name is already in use.\n");
goto exit_alloc;
}
- /* See if we need to set up a timer */
- if (ledinfo->delay > 0)
- timer_setup(&ledinternal->timer, led_timeout_callback, 0);
+ /* Since the letinternal timer can be shared between multiple targets,
+ * always set it up, even if the current target does not need it
+ */
+ timer_setup(&ledinternal->timer, led_timeout_callback, 0);
list_add_tail(&ledinternal->list, &xt_led_triggers);
@@ -181,8 +180,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
list_del(&ledinternal->list);
- if (ledinfo->delay > 0)
- del_timer_sync(&ledinternal->timer);
+ del_timer_sync(&ledinternal->timer);
led_trigger_unregister(&ledinternal->netfilter_led_trigger);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index a360b99a958a..a9aca80a32ae 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -8,6 +8,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -67,13 +69,13 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par)
init_hashrandom(&jhash_initval);
if (info->queues_total == 0) {
- pr_err("NFQUEUE: number of total queues is 0\n");
+ pr_info_ratelimited("number of total queues is 0\n");
return -EINVAL;
}
maxid = info->queues_total - 1 + info->queuenum;
if (maxid > 0xffff) {
- pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n",
- info->queues_total, maxid);
+ pr_info_ratelimited("number of queues (%u) out of range (got %u)\n",
+ info->queues_total, maxid);
return -ERANGE;
}
if (par->target->revision == 2 && info->flags > 1)
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 9faf5e050b79..4ad5fe27e08b 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -60,18 +60,20 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
&info->secid);
if (err) {
if (err == -EINVAL)
- pr_info("invalid security context \'%s\'\n", info->secctx);
+ pr_info_ratelimited("invalid security context \'%s\'\n",
+ info->secctx);
return err;
}
if (!info->secid) {
- pr_info("unable to map security context \'%s\'\n", info->secctx);
+ pr_info_ratelimited("unable to map security context \'%s\'\n",
+ info->secctx);
return -ENOENT;
}
err = security_secmark_relabel_packet(info->secid);
if (err) {
- pr_info("unable to obtain relabeling permission\n");
+ pr_info_ratelimited("unable to obtain relabeling permission\n");
return err;
}
@@ -86,14 +88,14 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "security") != 0) {
- pr_info("target only valid in the \'mangle\' "
- "or \'security\' tables, not \'%s\'.\n", par->table);
+ pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
+ par->table);
return -EINVAL;
}
if (mode && mode != info->mode) {
- pr_info("mode already set to %hu cannot mix with "
- "rules for mode %hu\n", mode, info->mode);
+ pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n",
+ mode, info->mode);
return -EINVAL;
}
@@ -101,7 +103,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
case SECMARK_MODE_SEL:
break;
default:
- pr_info("invalid mode: %hu\n", info->mode);
+ pr_info_ratelimited("invalid mode: %hu\n", info->mode);
return -EINVAL;
}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 99bb8e410f22..98efb202f8b4 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -273,8 +273,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
- pr_info("path-MTU clamping only supported in "
- "FORWARD, OUTPUT and POSTROUTING hooks\n");
+ pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
if (par->nft_compat)
@@ -283,7 +282,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
return 0;
- pr_info("Only works on TCP SYN packets\n");
+ pr_info_ratelimited("Only works on TCP SYN packets\n");
return -EINVAL;
}
@@ -298,8 +297,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
- pr_info("path-MTU clamping only supported in "
- "FORWARD, OUTPUT and POSTROUTING hooks\n");
+ pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
if (par->nft_compat)
@@ -308,7 +306,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
return 0;
- pr_info("Only works on TCP SYN packets\n");
+ pr_info_ratelimited("Only works on TCP SYN packets\n");
return -EINVAL;
}
#endif
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 17d7705e3bd4..8c89323c06af 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -540,8 +540,7 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par)
!(i->invflags & IP6T_INV_PROTO))
return 0;
- pr_info("Can be used only in combination with "
- "either -p tcp or -p udp\n");
+ pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
return -EINVAL;
}
#endif
@@ -559,8 +558,7 @@ static int tproxy_tg4_check(const struct xt_tgchk_param *par)
&& !(i->invflags & IPT_INV_PROTO))
return 0;
- pr_info("Can be used only in combination with "
- "either -p tcp or -p udp\n");
+ pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
return -EINVAL;
}
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 911a7c0da504..89e281b3bfc2 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -164,48 +164,47 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
{
+ const char *errmsg = "both incoming and outgoing interface limitation cannot be selected";
struct xt_addrtype_info_v1 *info = par->matchinfo;
if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
- info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
- pr_info("both incoming and outgoing "
- "interface limitation cannot be selected\n");
- return -EINVAL;
- }
+ info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
+ goto err;
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) &&
info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
- pr_info("output interface limitation "
- "not valid in PREROUTING and INPUT\n");
- return -EINVAL;
+ errmsg = "output interface limitation not valid in PREROUTING and INPUT";
+ goto err;
}
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT)) &&
info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) {
- pr_info("input interface limitation "
- "not valid in POSTROUTING and OUTPUT\n");
- return -EINVAL;
+ errmsg = "input interface limitation not valid in POSTROUTING and OUTPUT";
+ goto err;
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
if (par->family == NFPROTO_IPV6) {
if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
- pr_err("ipv6 BLACKHOLE matching not supported\n");
- return -EINVAL;
+ errmsg = "ipv6 BLACKHOLE matching not supported";
+ goto err;
}
if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
- pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
- return -EINVAL;
+ errmsg = "ipv6 PROHIBIT (THROW, NAT ..) matching not supported";
+ goto err;
}
if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
- pr_err("ipv6 does not support BROADCAST matching\n");
- return -EINVAL;
+ errmsg = "ipv6 does not support BROADCAST matching";
+ goto err;
}
}
#endif
return 0;
+err:
+ pr_info_ratelimited("%s\n", errmsg);
+ return -EINVAL;
}
static struct xt_match addrtype_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 06b090d8e901..a2cf8a6236d6 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -7,6 +7,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/skbuff.h>
@@ -34,7 +36,7 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
program.filter = insns;
if (bpf_prog_create(ret, &program)) {
- pr_info("bpf: check failed: parse error\n");
+ pr_info_ratelimited("check failed: parse error\n");
return -EINVAL;
}
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index 891f4e7e8ea7..7df2dece57d3 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -12,6 +12,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
@@ -48,7 +50,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
}
if (info->has_path && info->has_classid) {
- pr_info("xt_cgroup: both path and classid specified\n");
+ pr_info_ratelimited("path and classid specified\n");
return -EINVAL;
}
@@ -56,8 +58,8 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
if (info->has_path) {
cgrp = cgroup_get_from_path(info->path);
if (IS_ERR(cgrp)) {
- pr_info("xt_cgroup: invalid path, errno=%ld\n",
- PTR_ERR(cgrp));
+ pr_info_ratelimited("invalid path, errno=%ld\n",
+ PTR_ERR(cgrp));
return -EINVAL;
}
info->priv = cgrp;
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 57ef175dfbfa..0068688995c8 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -135,14 +135,12 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
struct xt_cluster_match_info *info = par->matchinfo;
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
- pr_info("you have exceeded the maximum "
- "number of cluster nodes (%u > %u)\n",
- info->total_nodes, XT_CLUSTER_NODES_MAX);
+ pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
+ info->total_nodes, XT_CLUSTER_NODES_MAX);
return -EINVAL;
}
if (info->node_mask >= (1ULL << info->total_nodes)) {
- pr_info("this node mask cannot be "
- "higher than the total number of nodes\n");
+ pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
return -EDOM;
}
return 0;
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index cad0b7b5eb35..93cb018c3055 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -112,8 +112,8 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par)
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
/*
* This filter cannot function correctly unless connection tracking
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c
index 23372879e6e3..4fa4efd24353 100644
--- a/net/netfilter/xt_connlabel.c
+++ b/net/netfilter/xt_connlabel.c
@@ -57,14 +57,15 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par)
int ret;
if (info->options & ~options) {
- pr_err("Unknown options in mask %x\n", info->options);
+ pr_info_ratelimited("Unknown options in mask %x\n",
+ info->options);
return -EINVAL;
}
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0) {
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
return ret;
}
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index ec377cc6a369..809639ce6f5a 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -79,8 +79,8 @@ static int connmark_tg_check(const struct xt_tgchk_param *par)
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
return ret;
}
@@ -109,8 +109,8 @@ static int connmark_mt_check(const struct xt_mtchk_param *par)
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
return ret;
}
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 39cf1d019240..df80fe7d391c 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -272,8 +272,8 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
return ret;
}
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c
index 236ac8008909..a4c2b862f820 100644
--- a/net/netfilter/xt_dscp.c
+++ b/net/netfilter/xt_dscp.c
@@ -46,10 +46,8 @@ static int dscp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_dscp_info *info = par->matchinfo;
- if (info->dscp > XT_DSCP_MAX) {
- pr_info("dscp %x out of range\n", info->dscp);
+ if (info->dscp > XT_DSCP_MAX)
return -EDOM;
- }
return 0;
}
diff --git a/net/netfilter/xt_ecn.c b/net/netfilter/xt_ecn.c
index 3c831a8efebc..c7ad4afa5fb8 100644
--- a/net/netfilter/xt_ecn.c
+++ b/net/netfilter/xt_ecn.c
@@ -97,7 +97,7 @@ static int ecn_mt_check4(const struct xt_mtchk_param *par)
if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
(ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
- pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+ pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
return -EINVAL;
}
@@ -139,7 +139,7 @@ static int ecn_mt_check6(const struct xt_mtchk_param *par)
if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
(ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) {
- pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+ pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
return -EINVAL;
}
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index ca6847403ca2..66f5aca62a08 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -523,7 +523,8 @@ static u64 user2rate(u64 user)
if (user != 0) {
return div64_u64(XT_HASHLIMIT_SCALE_v2, user);
} else {
- pr_warn("invalid rate from userspace: %llu\n", user);
+ pr_info_ratelimited("invalid rate from userspace: %llu\n",
+ user);
return 0;
}
}
@@ -774,7 +775,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
if (!dh->rateinfo.prev_window &&
(dh->rateinfo.current_rate <= dh->rateinfo.burst)) {
spin_unlock(&dh->lock);
- rcu_read_unlock_bh();
+ local_bh_enable();
return !(cfg->mode & XT_HASHLIMIT_INVERT);
} else {
goto overlimit;
@@ -865,33 +866,34 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
}
if (cfg->mode & ~XT_HASHLIMIT_ALL) {
- pr_info("Unknown mode mask %X, kernel too old?\n",
- cfg->mode);
+ pr_info_ratelimited("Unknown mode mask %X, kernel too old?\n",
+ cfg->mode);
return -EINVAL;
}
/* Check for overflow. */
if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) {
if (cfg->avg == 0 || cfg->avg > U32_MAX) {
- pr_info("hashlimit invalid rate\n");
+ pr_info_ratelimited("invalid rate\n");
return -ERANGE;
}
if (cfg->interval == 0) {
- pr_info("hashlimit invalid interval\n");
+ pr_info_ratelimited("invalid interval\n");
return -EINVAL;
}
} else if (cfg->mode & XT_HASHLIMIT_BYTES) {
if (user2credits_byte(cfg->avg) == 0) {
- pr_info("overflow, rate too high: %llu\n", cfg->avg);
+ pr_info_ratelimited("overflow, rate too high: %llu\n",
+ cfg->avg);
return -EINVAL;
}
} else if (cfg->burst == 0 ||
- user2credits(cfg->avg * cfg->burst, revision) <
- user2credits(cfg->avg, revision)) {
- pr_info("overflow, try lower: %llu/%llu\n",
- cfg->avg, cfg->burst);
- return -ERANGE;
+ user2credits(cfg->avg * cfg->burst, revision) <
+ user2credits(cfg->avg, revision)) {
+ pr_info_ratelimited("overflow, try lower: %llu/%llu\n",
+ cfg->avg, cfg->burst);
+ return -ERANGE;
}
mutex_lock(&hashlimit_mutex);
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c
index 38a78151c0e9..fd077aeaaed9 100644
--- a/net/netfilter/xt_helper.c
+++ b/net/netfilter/xt_helper.c
@@ -61,8 +61,8 @@ static int helper_mt_check(const struct xt_mtchk_param *par)
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0) {
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
return ret;
}
info->name[sizeof(info->name) - 1] = '\0';
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 7ca64a50db04..57f1df575701 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -72,7 +72,7 @@ static int comp_mt_check(const struct xt_mtchk_param *par)
/* Must specify no unknown invflags */
if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) {
- pr_err("unknown flags %X\n", compinfo->invflags);
+ pr_info_ratelimited("unknown flags %X\n", compinfo->invflags);
return -EINVAL;
}
return 0;
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index 42540d26c2b8..1d950a6100af 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -158,7 +158,8 @@ static int ipvs_mt_check(const struct xt_mtchk_param *par)
&& par->family != NFPROTO_IPV6
#endif
) {
- pr_info("protocol family %u not supported\n", par->family);
+ pr_info_ratelimited("protocol family %u not supported\n",
+ par->family);
return -EINVAL;
}
diff --git a/net/netfilter/xt_l2tp.c b/net/netfilter/xt_l2tp.c
index 8aee572771f2..c43482bf48e6 100644
--- a/net/netfilter/xt_l2tp.c
+++ b/net/netfilter/xt_l2tp.c
@@ -216,7 +216,7 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
/* Check for invalid flags */
if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION |
XT_L2TP_TYPE)) {
- pr_info("unknown flags: %x\n", info->flags);
+ pr_info_ratelimited("unknown flags: %x\n", info->flags);
return -EINVAL;
}
@@ -225,7 +225,8 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
(!(info->flags & XT_L2TP_SID)) &&
((!(info->flags & XT_L2TP_TYPE)) ||
(info->type != XT_L2TP_TYPE_CONTROL))) {
- pr_info("invalid flags combination: %x\n", info->flags);
+ pr_info_ratelimited("invalid flags combination: %x\n",
+ info->flags);
return -EINVAL;
}
@@ -234,19 +235,22 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
*/
if (info->flags & XT_L2TP_VERSION) {
if ((info->version < 2) || (info->version > 3)) {
- pr_info("wrong L2TP version: %u\n", info->version);
+ pr_info_ratelimited("wrong L2TP version: %u\n",
+ info->version);
return -EINVAL;
}
if (info->version == 2) {
if ((info->flags & XT_L2TP_TID) &&
(info->tid > 0xffff)) {
- pr_info("v2 tid > 0xffff: %u\n", info->tid);
+ pr_info_ratelimited("v2 tid > 0xffff: %u\n",
+ info->tid);
return -EINVAL;
}
if ((info->flags & XT_L2TP_SID) &&
(info->sid > 0xffff)) {
- pr_info("v2 sid > 0xffff: %u\n", info->sid);
+ pr_info_ratelimited("v2 sid > 0xffff: %u\n",
+ info->sid);
return -EINVAL;
}
}
@@ -268,13 +272,13 @@ static int l2tp_mt_check4(const struct xt_mtchk_param *par)
if ((ip->proto != IPPROTO_UDP) &&
(ip->proto != IPPROTO_L2TP)) {
- pr_info("missing protocol rule (udp|l2tpip)\n");
+ pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
return -EINVAL;
}
if ((ip->proto == IPPROTO_L2TP) &&
(info->version == 2)) {
- pr_info("v2 doesn't support IP mode\n");
+ pr_info_ratelimited("v2 doesn't support IP mode\n");
return -EINVAL;
}
@@ -295,13 +299,13 @@ static int l2tp_mt_check6(const struct xt_mtchk_param *par)
if ((ip->proto != IPPROTO_UDP) &&
(ip->proto != IPPROTO_L2TP)) {
- pr_info("missing protocol rule (udp|l2tpip)\n");
+ pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
return -EINVAL;
}
if ((ip->proto == IPPROTO_L2TP) &&
(info->version == 2)) {
- pr_info("v2 doesn't support IP mode\n");
+ pr_info_ratelimited("v2 doesn't support IP mode\n");
return -EINVAL;
}
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 61403b77361c..55d18cd67635 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -106,8 +106,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
/* Check for overflow. */
if (r->burst == 0
|| user2credits(r->avg * r->burst) < user2credits(r->avg)) {
- pr_info("Overflow, try lower: %u/%u\n",
- r->avg, r->burst);
+ pr_info_ratelimited("Overflow, try lower: %u/%u\n",
+ r->avg, r->burst);
return -ERANGE;
}
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index 0fd14d1eb09d..bdb689cdc829 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
@@ -19,8 +21,7 @@ static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
if (mr->rangesize != 1) {
- pr_info("%s: multiple ranges no longer supported\n",
- par->target->name);
+ pr_info_ratelimited("multiple ranges no longer supported\n");
return -EINVAL;
}
return nf_ct_netns_get(par->net, par->family);
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index 6f92d25590a8..c8674deed4eb 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -6,6 +6,8 @@
* it under the terms of the GNU General Public License version 2 (or any
* later at your option) as published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -39,8 +41,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
nfacct = nfnl_acct_find_get(par->net, info->name);
if (nfacct == NULL) {
- pr_info("xt_nfacct: accounting object with name `%s' "
- "does not exists\n", info->name);
+ pr_info_ratelimited("accounting object `%s' does not exists\n",
+ info->name);
return -ENOENT;
}
info->nfacct = nfacct;
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index bb33598e4530..9d6d67b953ac 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -107,9 +107,7 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
- pr_info("using --physdev-out and --physdev-is-out are only "
- "supported in the FORWARD and POSTROUTING chains with "
- "bridged traffic.\n");
+ pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
return -EINVAL;
}
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 5639fb03bdd9..13f8ccf946d6 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -132,26 +132,29 @@ policy_mt(const struct sk_buff *skb, struct xt_action_param *par)
static int policy_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_policy_info *info = par->matchinfo;
+ const char *errmsg = "neither incoming nor outgoing policy selected";
+
+ if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT)))
+ goto err;
- if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
- pr_info("neither incoming nor outgoing policy selected\n");
- return -EINVAL;
- }
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) {
- pr_info("output policy not valid in PREROUTING and INPUT\n");
- return -EINVAL;
+ errmsg = "output policy not valid in PREROUTING and INPUT";
+ goto err;
}
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) {
- pr_info("input policy not valid in POSTROUTING and OUTPUT\n");
- return -EINVAL;
+ errmsg = "input policy not valid in POSTROUTING and OUTPUT";
+ goto err;
}
if (info->len > XT_POLICY_MAX_ELEM) {
- pr_info("too many policy elements\n");
- return -EINVAL;
+ errmsg = "too many policy elements";
+ goto err;
}
return 0;
+err:
+ pr_info_ratelimited("%s\n", errmsg);
+ return -EINVAL;
}
static struct xt_match policy_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 245fa350a7a8..6d232d18faff 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -342,8 +342,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
net_get_random_once(&hash_rnd, sizeof(hash_rnd));
if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
- pr_info("Unsupported user space flags (%08x)\n",
- info->check_set);
+ pr_info_ratelimited("Unsupported userspace flags (%08x)\n",
+ info->check_set);
return -EINVAL;
}
if (hweight8(info->check_set &
@@ -357,8 +357,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
return -EINVAL;
if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
- pr_info("hitcount (%u) is larger than allowed maximum (%u)\n",
- info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
+ pr_info_ratelimited("hitcount (%u) is larger than allowed maximum (%u)\n",
+ info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
return -EINVAL;
}
if (info->name[0] == '\0' ||
@@ -587,7 +587,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
add = true;
break;
default:
- pr_info("Need \"+ip\", \"-ip\" or \"/\"\n");
+ pr_info_ratelimited("Need \"+ip\", \"-ip\" or \"/\"\n");
return -EINVAL;
}
@@ -601,10 +601,8 @@ recent_mt_proc_write(struct file *file, const char __user *input,
succ = in4_pton(c, size, (void *)&addr, '\n', NULL);
}
- if (!succ) {
- pr_info("illegal address written to procfs\n");
+ if (!succ)
return -EINVAL;
- }
spin_lock_bh(&recent_lock);
e = recent_entry_lookup(t, &addr, family, 0);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 16b6b11ee83f..6f4c5217d835 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -92,12 +92,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find set identified by id %u to match\n",
- info->match_set.index);
+ pr_info_ratelimited("Cannot find set identified by id %u to match\n",
+ info->match_set.index);
return -ENOENT;
}
if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
- pr_warn("Protocol error: set match dimension is over the limit!\n");
+ pr_info_ratelimited("set match dimension is over the limit!\n");
ip_set_nfnl_put(par->net, info->match_set.index);
return -ERANGE;
}
@@ -143,12 +143,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par)
index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find set identified by id %u to match\n",
- info->match_set.index);
+ pr_info_ratelimited("Cannot find set identified by id %u to match\n",
+ info->match_set.index);
return -ENOENT;
}
if (info->match_set.dim > IPSET_DIM_MAX) {
- pr_warn("Protocol error: set match dimension is over the limit!\n");
+ pr_info_ratelimited("set match dimension is over the limit!\n");
ip_set_nfnl_put(par->net, info->match_set.index);
return -ERANGE;
}
@@ -241,8 +241,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
if (info->add_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find add_set index %u as target\n",
- info->add_set.index);
+ pr_info_ratelimited("Cannot find add_set index %u as target\n",
+ info->add_set.index);
return -ENOENT;
}
}
@@ -250,8 +250,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
if (info->del_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find del_set index %u as target\n",
- info->del_set.index);
+ pr_info_ratelimited("Cannot find del_set index %u as target\n",
+ info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
return -ENOENT;
@@ -259,7 +259,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
}
if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 ||
info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
- pr_warn("Protocol error: SET target dimension is over the limit!\n");
+ pr_info_ratelimited("SET target dimension over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
@@ -316,8 +316,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
if (info->add_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find add_set index %u as target\n",
- info->add_set.index);
+ pr_info_ratelimited("Cannot find add_set index %u as target\n",
+ info->add_set.index);
return -ENOENT;
}
}
@@ -325,8 +325,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
if (info->del_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find del_set index %u as target\n",
- info->del_set.index);
+ pr_info_ratelimited("Cannot find del_set index %u as target\n",
+ info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
return -ENOENT;
@@ -334,7 +334,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
}
if (info->add_set.dim > IPSET_DIM_MAX ||
info->del_set.dim > IPSET_DIM_MAX) {
- pr_warn("Protocol error: SET target dimension is over the limit!\n");
+ pr_info_ratelimited("SET target dimension over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
@@ -444,8 +444,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
index = ip_set_nfnl_get_byindex(par->net,
info->add_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find add_set index %u as target\n",
- info->add_set.index);
+ pr_info_ratelimited("Cannot find add_set index %u as target\n",
+ info->add_set.index);
return -ENOENT;
}
}
@@ -454,8 +454,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
index = ip_set_nfnl_get_byindex(par->net,
info->del_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find del_set index %u as target\n",
- info->del_set.index);
+ pr_info_ratelimited("Cannot find del_set index %u as target\n",
+ info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net,
info->add_set.index);
@@ -465,7 +465,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
if (info->map_set.index != IPSET_INVALID_ID) {
if (strncmp(par->table, "mangle", 7)) {
- pr_warn("--map-set only usable from mangle table\n");
+ pr_info_ratelimited("--map-set only usable from mangle table\n");
return -EINVAL;
}
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
@@ -473,14 +473,14 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
!(par->hook_mask & (1 << NF_INET_FORWARD |
1 << NF_INET_LOCAL_OUT |
1 << NF_INET_POST_ROUTING))) {
- pr_warn("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
+ pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
return -EINVAL;
}
index = ip_set_nfnl_get_byindex(par->net,
info->map_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warn("Cannot find map_set index %u as target\n",
- info->map_set.index);
+ pr_info_ratelimited("Cannot find map_set index %u as target\n",
+ info->map_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net,
info->add_set.index);
@@ -494,7 +494,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
if (info->add_set.dim > IPSET_DIM_MAX ||
info->del_set.dim > IPSET_DIM_MAX ||
info->map_set.dim > IPSET_DIM_MAX) {
- pr_warn("Protocol error: SET target dimension is over the limit!\n");
+ pr_info_ratelimited("SET target dimension over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 575d2153e3b8..2ac7f674d19b 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -171,7 +171,8 @@ static int socket_mt_v1_check(const struct xt_mtchk_param *par)
return err;
if (info->flags & ~XT_SOCKET_FLAGS_V1) {
- pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V1);
+ pr_info_ratelimited("unknown flags 0x%x\n",
+ info->flags & ~XT_SOCKET_FLAGS_V1);
return -EINVAL;
}
return 0;
@@ -187,7 +188,8 @@ static int socket_mt_v2_check(const struct xt_mtchk_param *par)
return err;
if (info->flags & ~XT_SOCKET_FLAGS_V2) {
- pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V2);
+ pr_info_ratelimited("unknown flags 0x%x\n",
+ info->flags & ~XT_SOCKET_FLAGS_V2);
return -EINVAL;
}
return 0;
@@ -203,8 +205,8 @@ static int socket_mt_v3_check(const struct xt_mtchk_param *par)
if (err)
return err;
if (info->flags & ~XT_SOCKET_FLAGS_V3) {
- pr_info("unknown flags 0x%x\n",
- info->flags & ~XT_SOCKET_FLAGS_V3);
+ pr_info_ratelimited("unknown flags 0x%x\n",
+ info->flags & ~XT_SOCKET_FLAGS_V3);
return -EINVAL;
}
return 0;
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index 5fbd79194d21..0b41c0befe3c 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -44,8 +44,8 @@ static int state_mt_check(const struct xt_mtchk_param *par)
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
- pr_info("cannot load conntrack support for proto=%u\n",
- par->family);
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
return ret;
}
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 1b01eec1fbda..0160f505e337 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -235,13 +235,13 @@ static int time_mt_check(const struct xt_mtchk_param *par)
if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
info->daytime_stop > XT_TIME_MAX_DAYTIME) {
- pr_info("invalid argument - start or "
- "stop time greater than 23:59:59\n");
+ pr_info_ratelimited("invalid argument - start or stop time greater than 23:59:59\n");
return -EDOM;
}
if (info->flags & ~XT_TIME_ALL_FLAGS) {
- pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS);
+ pr_info_ratelimited("unknown flags 0x%x\n",
+ info->flags & ~XT_TIME_ALL_FLAGS);
return -EINVAL;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2ad445c1d27c..07e8478068f0 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2308,7 +2308,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
if (cb->start) {
ret = cb->start(cb);
if (ret)
- goto error_unlock;
+ goto error_put;
}
nlk->cb_running = true;
@@ -2328,6 +2328,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
*/
return -EINTR;
+error_put:
+ module_put(control->module);
error_unlock:
sock_put(sk);
mutex_unlock(nlk->cb_mutex);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 42410e910aff..cf73dc006c3b 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -445,7 +445,7 @@ send_fragmentable:
(char *)&opt, sizeof(opt));
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
- iov, 1, iov[0].iov_len);
+ iov, 2, len);
opt = IPV6_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a7dc7271042a..247b7cc20c13 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1397,13 +1397,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
nla_get_u32(tca[TCA_CHAIN]) != chain->index)
continue;
if (!tcf_chain_dump(chain, q, parent, skb, cb,
- index_start, &index))
+ index_start, &index)) {
+ err = -EMSGSIZE;
break;
+ }
}
cb->args[0] = index;
out:
+ /* If we did no progress, the error (EMSGSIZE) is real */
+ if (skb->len == 0 && err)
+ return err;
return skb->len;
}
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 51aa55618ef7..b12da6ef3c12 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -170,9 +170,28 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
enum nl80211_bss_scan_width scan_width;
struct ieee80211_supported_band *sband =
rdev->wiphy.bands[setup->chandef.chan->band];
- scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
- setup->basic_rates = ieee80211_mandatory_rates(sband,
- scan_width);
+
+ if (setup->chandef.chan->band == NL80211_BAND_2GHZ) {
+ int i;
+
+ /*
+ * Older versions selected the mandatory rates for
+ * 2.4 GHz as well, but were broken in that only
+ * 1 Mbps was regarded as a mandatory rate. Keep
+ * using just 1 Mbps as the default basic rate for
+ * mesh to be interoperable with older versions.
+ */
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].bitrate == 10) {
+ setup->basic_rates = BIT(i);
+ break;
+ }
+ }
+ } else {
+ scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
+ setup->basic_rates = ieee80211_mandatory_rates(sband,
+ scan_width);
+ }
}
err = cfg80211_chandef_dfs_required(&rdev->wiphy,
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index fdb3646274a5..701cfd7acc1b 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -1032,6 +1032,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
wdev->current_bss = NULL;
wdev->ssid_len = 0;
wdev->conn_owner_nlportid = 0;
+ kzfree(wdev->connect_keys);
+ wdev->connect_keys = NULL;
nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index 0e349b80686e..ba942e3ead89 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+ifndef CROSS_COMPILE
hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct
HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include
@@ -16,7 +17,6 @@ HOSTCFLAGS_bpf-direct.o += -idirafter $(objtree)/include
bpf-direct-objs := bpf-direct.o
# Try to match the kernel target.
-ifndef CROSS_COMPILE
ifndef CONFIG_64BIT
# s390 has -m31 flag to build 31 bit binaries
@@ -35,12 +35,4 @@ HOSTLOADLIBES_bpf-fancy += $(MFLAG)
HOSTLOADLIBES_dropper += $(MFLAG)
endif
always := $(hostprogs-m)
-else
-# MIPS system calls are defined based on the -mabi that is passed
-# to the toolchain which may or may not be a valid option
-# for the host toolchain. So disable tests if target architecture
-# is MIPS but the host isn't.
-ifndef CONFIG_MIPS
-always := $(hostprogs-m)
-endif
endif
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 47cddf32aeba..4f2b25d43ec9 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -256,6 +256,8 @@ __objtool_obj := $(objtree)/tools/objtool/objtool
objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check)
+objtool_args += $(if $(part-of-module), --module,)
+
ifndef CONFIG_FRAME_POINTER
objtool_args += --no-fp
endif
@@ -264,6 +266,12 @@ objtool_args += --no-unreachable
else
objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
endif
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_CFLAGS),)
+ objtool_args += --retpoline
+endif
+endif
+
ifdef CONFIG_MODVERSIONS
objtool_o = $(@D)/.tmp_$(@F)
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 6f9e4ce568cd..9bb0a7f2863e 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -18,6 +18,7 @@
#include <linux/cred.h>
#include <linux/key-type.h>
#include <linux/digsig.h>
+#include <linux/vmalloc.h>
#include <crypto/public_key.h>
#include <keys/system_keyring.h>
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 929e14978c42..fa728f662a6f 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -22,6 +22,13 @@
#include <keys/big_key-type.h>
#include <crypto/aead.h>
+struct big_key_buf {
+ unsigned int nr_pages;
+ void *virt;
+ struct scatterlist *sg;
+ struct page *pages[];
+};
+
/*
* Layout of key payload words.
*/
@@ -91,10 +98,9 @@ static DEFINE_MUTEX(big_key_aead_lock);
/*
* Encrypt/decrypt big_key data
*/
-static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
+static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key)
{
int ret;
- struct scatterlist sgio;
struct aead_request *aead_req;
/* We always use a zero nonce. The reason we can get away with this is
* because we're using a different randomly generated key for every
@@ -109,8 +115,7 @@ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
return -ENOMEM;
memset(zero_nonce, 0, sizeof(zero_nonce));
- sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0));
- aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
+ aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
aead_request_set_ad(aead_req, 0);
@@ -130,21 +135,81 @@ error:
}
/*
+ * Free up the buffer.
+ */
+static void big_key_free_buffer(struct big_key_buf *buf)
+{
+ unsigned int i;
+
+ if (buf->virt) {
+ memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
+ vunmap(buf->virt);
+ }
+
+ for (i = 0; i < buf->nr_pages; i++)
+ if (buf->pages[i])
+ __free_page(buf->pages[i]);
+
+ kfree(buf);
+}
+
+/*
+ * Allocate a buffer consisting of a set of pages with a virtual mapping
+ * applied over them.
+ */
+static void *big_key_alloc_buffer(size_t len)
+{
+ struct big_key_buf *buf;
+ unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned int i, l;
+
+ buf = kzalloc(sizeof(struct big_key_buf) +
+ sizeof(struct page) * npg +
+ sizeof(struct scatterlist) * npg,
+ GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->nr_pages = npg;
+ buf->sg = (void *)(buf->pages + npg);
+ sg_init_table(buf->sg, npg);
+
+ for (i = 0; i < buf->nr_pages; i++) {
+ buf->pages[i] = alloc_page(GFP_KERNEL);
+ if (!buf->pages[i])
+ goto nomem;
+
+ l = min_t(size_t, len, PAGE_SIZE);
+ sg_set_page(&buf->sg[i], buf->pages[i], l, 0);
+ len -= l;
+ }
+
+ buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!buf->virt)
+ goto nomem;
+
+ return buf;
+
+nomem:
+ big_key_free_buffer(buf);
+ return NULL;
+}
+
+/*
* Preparse a big key
*/
int big_key_preparse(struct key_preparsed_payload *prep)
{
+ struct big_key_buf *buf;
struct path *path = (struct path *)&prep->payload.data[big_key_path];
struct file *file;
u8 *enckey;
- u8 *data = NULL;
ssize_t written;
- size_t datalen = prep->datalen;
+ size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE;
int ret;
- ret = -EINVAL;
if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
- goto error;
+ return -EINVAL;
/* Set an arbitrary quota */
prep->quotalen = 16;
@@ -157,13 +222,12 @@ int big_key_preparse(struct key_preparsed_payload *prep)
*
* File content is stored encrypted with randomly generated key.
*/
- size_t enclen = datalen + ENC_AUTHTAG_SIZE;
loff_t pos = 0;
- data = kmalloc(enclen, GFP_KERNEL);
- if (!data)
+ buf = big_key_alloc_buffer(enclen);
+ if (!buf)
return -ENOMEM;
- memcpy(data, prep->data, datalen);
+ memcpy(buf->virt, prep->data, datalen);
/* generate random key */
enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
@@ -176,7 +240,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
goto err_enckey;
/* encrypt aligned data */
- ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
+ ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey);
if (ret)
goto err_enckey;
@@ -187,7 +251,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
goto err_enckey;
}
- written = kernel_write(file, data, enclen, &pos);
+ written = kernel_write(file, buf->virt, enclen, &pos);
if (written != enclen) {
ret = written;
if (written >= 0)
@@ -202,7 +266,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
*path = file->f_path;
path_get(path);
fput(file);
- kzfree(data);
+ big_key_free_buffer(buf);
} else {
/* Just store the data in a buffer */
void *data = kmalloc(datalen, GFP_KERNEL);
@@ -220,7 +284,7 @@ err_fput:
err_enckey:
kzfree(enckey);
error:
- kzfree(data);
+ big_key_free_buffer(buf);
return ret;
}
@@ -298,15 +362,15 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
return datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct big_key_buf *buf;
struct path *path = (struct path *)&key->payload.data[big_key_path];
struct file *file;
- u8 *data;
u8 *enckey = (u8 *)key->payload.data[big_key_data];
size_t enclen = datalen + ENC_AUTHTAG_SIZE;
loff_t pos = 0;
- data = kmalloc(enclen, GFP_KERNEL);
- if (!data)
+ buf = big_key_alloc_buffer(enclen);
+ if (!buf)
return -ENOMEM;
file = dentry_open(path, O_RDONLY, current_cred());
@@ -316,26 +380,26 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
}
/* read file to kernel and decrypt */
- ret = kernel_read(file, data, enclen, &pos);
+ ret = kernel_read(file, buf->virt, enclen, &pos);
if (ret >= 0 && ret != enclen) {
ret = -EIO;
goto err_fput;
}
- ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey);
+ ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey);
if (ret)
goto err_fput;
ret = datalen;
/* copy decrypted data to user */
- if (copy_to_user(buffer, data, datalen) != 0)
+ if (copy_to_user(buffer, buf->virt, datalen) != 0)
ret = -EFAULT;
err_fput:
fput(file);
error:
- kzfree(data);
+ big_key_free_buffer(buf);
} else {
ret = datalen;
if (copy_to_user(buffer, key->payload.data[big_key_data],
diff --git a/sound/core/control.c b/sound/core/control.c
index 0b3026d937b1..8a77620a3854 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -889,7 +889,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
- if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL)
+ if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
return -EPERM;
snd_ctl_build_ioff(&control->id, kctl, index_offset);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c71dcacea807..96143df19b21 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -181,7 +181,7 @@ static const struct kernel_param_ops param_ops_xint = {
};
#define param_check_xint param_check_int
-static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
+static int power_save = -1;
module_param(power_save, xint, 0644);
MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
"(in second, 0 = disable).");
@@ -2186,6 +2186,24 @@ out_free:
return err;
}
+#ifdef CONFIG_PM
+/* On some boards setting power_save to a non 0 value leads to clicking /
+ * popping sounds when ever we enter/leave powersaving mode. Ideally we would
+ * figure out how to avoid these sounds, but that is not always feasible.
+ * So we keep a list of devices where we disable powersaving as its known
+ * to causes problems on these devices.
+ */
+static struct snd_pci_quirk power_save_blacklist[] = {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+ SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+ {}
+};
+#endif /* CONFIG_PM */
+
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
[AZX_DRIVER_NVIDIA] = 8,
@@ -2198,6 +2216,7 @@ static int azx_probe_continue(struct azx *chip)
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
int dev = chip->dev_index;
+ int val;
int err;
hda->probe_continued = 1;
@@ -2278,7 +2297,22 @@ static int azx_probe_continue(struct azx *chip)
chip->running = 1;
azx_add_card_list(chip);
- snd_hda_set_power_save(&chip->bus, power_save * 1000);
+
+ val = power_save;
+#ifdef CONFIG_PM
+ if (val == -1) {
+ const struct snd_pci_quirk *q;
+
+ val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
+ q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+ if (q && val) {
+ dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+ q->subvendor, q->subdevice);
+ val = 0;
+ }
+ }
+#endif /* CONFIG_PM */
+ snd_hda_set_power_save(&chip->bus, val * 1000);
if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
pm_runtime_put_autosuspend(&pci->dev);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ce28f7ce64e6..b9c93fa0a51c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4997,13 +4997,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ } else if (action == HDA_FIXUP_ACT_INIT) {
/* Enable DOCK device */
snd_hda_codec_write(codec, 0x17, 0,
AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
/* Enable DOCK device */
snd_hda_codec_write(codec, 0x19, 0,
AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
- snd_hda_apply_pincfgs(codec, pincfgs);
}
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 50252046b01d..754e632a27bd 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3325,4 +3325,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
},
+{
+ /*
+ * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
+ * even though it advertises more. The capture interface doesn't work
+ * even on windows.
+ */
+ USB_DEVICE(0x19b5, 0x0021),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ /* Capture */
+ {
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* Playback */
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+ UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x03,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ }
+ }
+},
+
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index a0951505c7f5..4ed9d0c41843 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -50,6 +50,7 @@
/*standard module options for ALSA. This module supports only one card*/
static int hdmi_card_index = SNDRV_DEFAULT_IDX1;
static char *hdmi_card_id = SNDRV_DEFAULT_STR1;
+static bool single_port;
module_param_named(index, hdmi_card_index, int, 0444);
MODULE_PARM_DESC(index,
@@ -57,6 +58,9 @@ MODULE_PARM_DESC(index,
module_param_named(id, hdmi_card_id, charp, 0444);
MODULE_PARM_DESC(id,
"ID string for INTEL Intel HDMI Audio controller.");
+module_param(single_port, bool, 0444);
+MODULE_PARM_DESC(single_port,
+ "Single-port mode (for compatibility)");
/*
* ELD SA bits in the CEA Speaker Allocation data block
@@ -1579,7 +1583,11 @@ static irqreturn_t display_pipe_interrupt_handler(int irq, void *dev_id)
static void notify_audio_lpe(struct platform_device *pdev, int port)
{
struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev);
- struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
+ struct snd_intelhad *ctx;
+
+ ctx = &card_ctx->pcm_ctx[single_port ? 0 : port];
+ if (single_port)
+ ctx->port = port;
schedule_work(&ctx->hdmi_audio_wq);
}
@@ -1743,6 +1751,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
{
struct snd_card *card;
struct snd_intelhad_card *card_ctx;
+ struct snd_intelhad *ctx;
struct snd_pcm *pcm;
struct intel_hdmi_lpe_audio_pdata *pdata;
int irq;
@@ -1787,6 +1796,21 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, card_ctx);
+ card_ctx->num_pipes = pdata->num_pipes;
+ card_ctx->num_ports = single_port ? 1 : pdata->num_ports;
+
+ for_each_port(card_ctx, port) {
+ ctx = &card_ctx->pcm_ctx[port];
+ ctx->card_ctx = card_ctx;
+ ctx->dev = card_ctx->dev;
+ ctx->port = single_port ? -1 : port;
+ ctx->pipe = -1;
+
+ spin_lock_init(&ctx->had_spinlock);
+ mutex_init(&ctx->mutex);
+ INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
+ }
+
dev_dbg(&pdev->dev, "%s: mmio_start = 0x%x, mmio_end = 0x%x\n",
__func__, (unsigned int)res_mmio->start,
(unsigned int)res_mmio->end);
@@ -1816,19 +1840,12 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
init_channel_allocations();
card_ctx->num_pipes = pdata->num_pipes;
- card_ctx->num_ports = pdata->num_ports;
+ card_ctx->num_ports = single_port ? 1 : pdata->num_ports;
for_each_port(card_ctx, port) {
- struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
int i;
- ctx->card_ctx = card_ctx;
- ctx->dev = card_ctx->dev;
- ctx->port = port;
- ctx->pipe = -1;
-
- INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
-
+ ctx = &card_ctx->pcm_ctx[port];
ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS,
MAX_CAP_STREAMS, &pcm);
if (ret)
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 3a0396d87c42..185acfa229b5 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -244,7 +244,7 @@ static int do_batch(int argc, char **argv)
}
if (errno && errno != ENOENT) {
- perror("reading batch file failed");
+ p_err("reading batch file failed: %s", strerror(errno));
err = -1;
} else {
p_info("processed %d lines", lines);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index e8e2baaf93c2..e549e329be82 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -774,6 +774,9 @@ static int do_dump(int argc, char **argv)
n < 0 ? strerror(errno) : "short write");
goto err_free;
}
+
+ if (json_output)
+ jsonw_null(json_wtr);
} else {
if (member_len == &info.jited_prog_len) {
const char *name = NULL;
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index a5684d0968b4..5898c22ba310 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -33,7 +33,7 @@ import resource
import struct
import re
import subprocess
-from collections import defaultdict
+from collections import defaultdict, namedtuple
VMX_EXIT_REASONS = {
'EXCEPTION_NMI': 0,
@@ -228,6 +228,7 @@ IOCTL_NUMBERS = {
}
ENCODING = locale.getpreferredencoding(False)
+TRACE_FILTER = re.compile(r'^[^\(]*$')
class Arch(object):
@@ -260,6 +261,11 @@ class Arch(object):
return ArchX86(SVM_EXIT_REASONS)
return
+ def tracepoint_is_child(self, field):
+ if (TRACE_FILTER.match(field)):
+ return None
+ return field.split('(', 1)[0]
+
class ArchX86(Arch):
def __init__(self, exit_reasons):
@@ -267,6 +273,10 @@ class ArchX86(Arch):
self.ioctl_numbers = IOCTL_NUMBERS
self.exit_reasons = exit_reasons
+ def debugfs_is_child(self, field):
+ """ Returns name of parent if 'field' is a child, None otherwise """
+ return None
+
class ArchPPC(Arch):
def __init__(self):
@@ -282,6 +292,10 @@ class ArchPPC(Arch):
self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16
self.exit_reasons = {}
+ def debugfs_is_child(self, field):
+ """ Returns name of parent if 'field' is a child, None otherwise """
+ return None
+
class ArchA64(Arch):
def __init__(self):
@@ -289,6 +303,10 @@ class ArchA64(Arch):
self.ioctl_numbers = IOCTL_NUMBERS
self.exit_reasons = AARCH64_EXIT_REASONS
+ def debugfs_is_child(self, field):
+ """ Returns name of parent if 'field' is a child, None otherwise """
+ return None
+
class ArchS390(Arch):
def __init__(self):
@@ -296,6 +314,12 @@ class ArchS390(Arch):
self.ioctl_numbers = IOCTL_NUMBERS
self.exit_reasons = None
+ def debugfs_is_child(self, field):
+ """ Returns name of parent if 'field' is a child, None otherwise """
+ if field.startswith('instruction_'):
+ return 'exit_instruction'
+
+
ARCH = Arch.get_arch()
@@ -331,9 +355,6 @@ class perf_event_attr(ctypes.Structure):
PERF_TYPE_TRACEPOINT = 2
PERF_FORMAT_GROUP = 1 << 3
-PATH_DEBUGFS_TRACING = '/sys/kernel/debug/tracing'
-PATH_DEBUGFS_KVM = '/sys/kernel/debug/kvm'
-
class Group(object):
"""Represents a perf event group."""
@@ -376,8 +397,8 @@ class Event(object):
self.syscall = self.libc.syscall
self.name = name
self.fd = None
- self.setup_event(group, trace_cpu, trace_pid, trace_point,
- trace_filter, trace_set)
+ self._setup_event(group, trace_cpu, trace_pid, trace_point,
+ trace_filter, trace_set)
def __del__(self):
"""Closes the event's file descriptor.
@@ -390,7 +411,7 @@ class Event(object):
if self.fd:
os.close(self.fd)
- def perf_event_open(self, attr, pid, cpu, group_fd, flags):
+ def _perf_event_open(self, attr, pid, cpu, group_fd, flags):
"""Wrapper for the sys_perf_evt_open() syscall.
Used to set up performance events, returns a file descriptor or -1
@@ -409,7 +430,7 @@ class Event(object):
ctypes.c_int(pid), ctypes.c_int(cpu),
ctypes.c_int(group_fd), ctypes.c_long(flags))
- def setup_event_attribute(self, trace_set, trace_point):
+ def _setup_event_attribute(self, trace_set, trace_point):
"""Returns an initialized ctype perf_event_attr struct."""
id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set,
@@ -419,8 +440,8 @@ class Event(object):
event_attr.config = int(open(id_path).read())
return event_attr
- def setup_event(self, group, trace_cpu, trace_pid, trace_point,
- trace_filter, trace_set):
+ def _setup_event(self, group, trace_cpu, trace_pid, trace_point,
+ trace_filter, trace_set):
"""Sets up the perf event in Linux.
Issues the syscall to register the event in the kernel and
@@ -428,7 +449,7 @@ class Event(object):
"""
- event_attr = self.setup_event_attribute(trace_set, trace_point)
+ event_attr = self._setup_event_attribute(trace_set, trace_point)
# First event will be group leader.
group_leader = -1
@@ -437,8 +458,8 @@ class Event(object):
if group.events:
group_leader = group.events[0].fd
- fd = self.perf_event_open(event_attr, trace_pid,
- trace_cpu, group_leader, 0)
+ fd = self._perf_event_open(event_attr, trace_pid,
+ trace_cpu, group_leader, 0)
if fd == -1:
err = ctypes.get_errno()
raise OSError(err, os.strerror(err),
@@ -475,6 +496,10 @@ class Event(object):
class Provider(object):
"""Encapsulates functionalities used by all providers."""
+ def __init__(self, pid):
+ self.child_events = False
+ self.pid = pid
+
@staticmethod
def is_field_wanted(fields_filter, field):
"""Indicate whether field is valid according to fields_filter."""
@@ -500,12 +525,12 @@ class TracepointProvider(Provider):
"""
def __init__(self, pid, fields_filter):
self.group_leaders = []
- self.filters = self.get_filters()
+ self.filters = self._get_filters()
self.update_fields(fields_filter)
- self.pid = pid
+ super(TracepointProvider, self).__init__(pid)
@staticmethod
- def get_filters():
+ def _get_filters():
"""Returns a dict of trace events, their filter ids and
the values that can be filtered.
@@ -521,8 +546,8 @@ class TracepointProvider(Provider):
filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons)
return filters
- def get_available_fields(self):
- """Returns a list of available event's of format 'event name(filter
+ def _get_available_fields(self):
+ """Returns a list of available events of format 'event name(filter
name)'.
All available events have directories under
@@ -549,11 +574,12 @@ class TracepointProvider(Provider):
def update_fields(self, fields_filter):
"""Refresh fields, applying fields_filter"""
- self.fields = [field for field in self.get_available_fields()
- if self.is_field_wanted(fields_filter, field)]
+ self.fields = [field for field in self._get_available_fields()
+ if self.is_field_wanted(fields_filter, field) or
+ ARCH.tracepoint_is_child(field)]
@staticmethod
- def get_online_cpus():
+ def _get_online_cpus():
"""Returns a list of cpu id integers."""
def parse_int_list(list_string):
"""Returns an int list from a string of comma separated integers and
@@ -575,17 +601,17 @@ class TracepointProvider(Provider):
cpu_string = cpu_list.readline()
return parse_int_list(cpu_string)
- def setup_traces(self):
+ def _setup_traces(self):
"""Creates all event and group objects needed to be able to retrieve
data."""
- fields = self.get_available_fields()
+ fields = self._get_available_fields()
if self._pid > 0:
# Fetch list of all threads of the monitored pid, as qemu
# starts a thread for each vcpu.
path = os.path.join('/proc', str(self._pid), 'task')
groupids = self.walkdir(path)[1]
else:
- groupids = self.get_online_cpus()
+ groupids = self._get_online_cpus()
# The constant is needed as a buffer for python libs, std
# streams and other files that the script opens.
@@ -663,7 +689,7 @@ class TracepointProvider(Provider):
# The garbage collector will get rid of all Event/Group
# objects and open files after removing the references.
self.group_leaders = []
- self.setup_traces()
+ self._setup_traces()
self.fields = self._fields
def read(self, by_guest=0):
@@ -671,8 +697,12 @@ class TracepointProvider(Provider):
ret = defaultdict(int)
for group in self.group_leaders:
for name, val in group.read().items():
- if name in self._fields:
- ret[name] += val
+ if name not in self._fields:
+ continue
+ parent = ARCH.tracepoint_is_child(name)
+ if parent:
+ name += ' ' + parent
+ ret[name] += val
return ret
def reset(self):
@@ -690,11 +720,11 @@ class DebugfsProvider(Provider):
self._baseline = {}
self.do_read = True
self.paths = []
- self.pid = pid
+ super(DebugfsProvider, self).__init__(pid)
if include_past:
- self.restore()
+ self._restore()
- def get_available_fields(self):
+ def _get_available_fields(self):
""""Returns a list of available fields.
The fields are all available KVM debugfs files
@@ -704,8 +734,9 @@ class DebugfsProvider(Provider):
def update_fields(self, fields_filter):
"""Refresh fields, applying fields_filter"""
- self._fields = [field for field in self.get_available_fields()
- if self.is_field_wanted(fields_filter, field)]
+ self._fields = [field for field in self._get_available_fields()
+ if self.is_field_wanted(fields_filter, field) or
+ ARCH.debugfs_is_child(field)]
@property
def fields(self):
@@ -758,7 +789,7 @@ class DebugfsProvider(Provider):
paths.append(dir)
for path in paths:
for field in self._fields:
- value = self.read_field(field, path)
+ value = self._read_field(field, path)
key = path + field
if reset == 1:
self._baseline[key] = value
@@ -766,20 +797,21 @@ class DebugfsProvider(Provider):
self._baseline[key] = 0
if self._baseline.get(key, -1) == -1:
self._baseline[key] = value
- increment = (results.get(field, 0) + value -
- self._baseline.get(key, 0))
- if by_guest:
- pid = key.split('-')[0]
- if pid in results:
- results[pid] += increment
- else:
- results[pid] = increment
+ parent = ARCH.debugfs_is_child(field)
+ if parent:
+ field = field + ' ' + parent
+ else:
+ if by_guest:
+ field = key.split('-')[0] # set 'field' to 'pid'
+ increment = value - self._baseline.get(key, 0)
+ if field in results:
+ results[field] += increment
else:
results[field] = increment
return results
- def read_field(self, field, path):
+ def _read_field(self, field, path):
"""Returns the value of a single field from a specific VM."""
try:
return int(open(os.path.join(PATH_DEBUGFS_KVM,
@@ -794,12 +826,15 @@ class DebugfsProvider(Provider):
self._baseline = {}
self.read(1)
- def restore(self):
+ def _restore(self):
"""Reset field counters"""
self._baseline = {}
self.read(2)
+EventStat = namedtuple('EventStat', ['value', 'delta'])
+
+
class Stats(object):
"""Manages the data providers and the data they provide.
@@ -808,13 +843,13 @@ class Stats(object):
"""
def __init__(self, options):
- self.providers = self.get_providers(options)
+ self.providers = self._get_providers(options)
self._pid_filter = options.pid
self._fields_filter = options.fields
self.values = {}
+ self._child_events = False
- @staticmethod
- def get_providers(options):
+ def _get_providers(self, options):
"""Returns a list of data providers depending on the passed options."""
providers = []
@@ -826,7 +861,7 @@ class Stats(object):
return providers
- def update_provider_filters(self):
+ def _update_provider_filters(self):
"""Propagates fields filters to providers."""
# As we reset the counters when updating the fields we can
# also clear the cache of old values.
@@ -847,7 +882,7 @@ class Stats(object):
def fields_filter(self, fields_filter):
if fields_filter != self._fields_filter:
self._fields_filter = fields_filter
- self.update_provider_filters()
+ self._update_provider_filters()
@property
def pid_filter(self):
@@ -861,16 +896,33 @@ class Stats(object):
for provider in self.providers:
provider.pid = self._pid_filter
+ @property
+ def child_events(self):
+ return self._child_events
+
+ @child_events.setter
+ def child_events(self, val):
+ self._child_events = val
+ for provider in self.providers:
+ provider.child_events = val
+
def get(self, by_guest=0):
"""Returns a dict with field -> (value, delta to last value) of all
- provider data."""
+ provider data.
+ Key formats:
+ * plain: 'key' is event name
+ * child-parent: 'key' is in format '<child> <parent>'
+ * pid: 'key' is the pid of the guest, and the record contains the
+ aggregated event data
+ These formats are generated by the providers, and handled in class TUI.
+ """
for provider in self.providers:
new = provider.read(by_guest=by_guest)
- for key in new if by_guest else provider.fields:
- oldval = self.values.get(key, (0, 0))[0]
+ for key in new:
+ oldval = self.values.get(key, EventStat(0, 0)).value
newval = new.get(key, 0)
newdelta = newval - oldval
- self.values[key] = (newval, newdelta)
+ self.values[key] = EventStat(newval, newdelta)
return self.values
def toggle_display_guests(self, to_pid):
@@ -899,10 +951,10 @@ class Stats(object):
self.get(to_pid)
return 0
+
DELAY_DEFAULT = 3.0
MAX_GUEST_NAME_LEN = 48
MAX_REGEX_LEN = 44
-DEFAULT_REGEX = r'^[^\(]*$'
SORT_DEFAULT = 0
@@ -969,7 +1021,7 @@ class Tui(object):
return res
- def print_all_gnames(self, row):
+ def _print_all_gnames(self, row):
"""Print a list of all running guests along with their pids."""
self.screen.addstr(row, 2, '%8s %-60s' %
('Pid', 'Guest Name (fuzzy list, might be '
@@ -1032,19 +1084,13 @@ class Tui(object):
return name
- def update_drilldown(self):
- """Sets or removes a filter that only allows fields without braces."""
- if not self.stats.fields_filter:
- self.stats.fields_filter = DEFAULT_REGEX
-
- elif self.stats.fields_filter == DEFAULT_REGEX:
- self.stats.fields_filter = None
-
- def update_pid(self, pid):
+ def _update_pid(self, pid):
"""Propagates pid selection to stats object."""
+ self.screen.addstr(4, 1, 'Updating pid filter...')
+ self.screen.refresh()
self.stats.pid_filter = pid
- def refresh_header(self, pid=None):
+ def _refresh_header(self, pid=None):
"""Refreshes the header."""
if pid is None:
pid = self.stats.pid_filter
@@ -1059,8 +1105,7 @@ class Tui(object):
.format(pid, gname), curses.A_BOLD)
else:
self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
- if self.stats.fields_filter and self.stats.fields_filter \
- != DEFAULT_REGEX:
+ if self.stats.fields_filter:
regex = self.stats.fields_filter
if len(regex) > MAX_REGEX_LEN:
regex = regex[:MAX_REGEX_LEN] + '...'
@@ -1075,56 +1120,99 @@ class Tui(object):
self.screen.addstr(4, 1, 'Collecting data...')
self.screen.refresh()
- def refresh_body(self, sleeptime):
+ def _refresh_body(self, sleeptime):
+ def is_child_field(field):
+ return field.find('(') != -1
+
+ def insert_child(sorted_items, child, values, parent):
+ num = len(sorted_items)
+ for i in range(0, num):
+ # only add child if parent is present
+ if parent.startswith(sorted_items[i][0]):
+ sorted_items.insert(i + 1, (' ' + child, values))
+
+ def get_sorted_events(self, stats):
+ """ separate parent and child events """
+ if self._sorting == SORT_DEFAULT:
+ def sortkey((_k, v)):
+ # sort by (delta value, overall value)
+ return (v.delta, v.value)
+ else:
+ def sortkey((_k, v)):
+ # sort by overall value
+ return v.value
+
+ childs = []
+ sorted_items = []
+ # we can't rule out child events to appear prior to parents even
+ # when sorted - separate out all children first, and add in later
+ for key, values in sorted(stats.items(), key=sortkey,
+ reverse=True):
+ if values == (0, 0):
+ continue
+ if key.find(' ') != -1:
+ if not self.stats.child_events:
+ continue
+ childs.insert(0, (key, values))
+ else:
+ sorted_items.append((key, values))
+ if self.stats.child_events:
+ for key, values in childs:
+ (child, parent) = key.split(' ')
+ insert_child(sorted_items, child, values, parent)
+
+ return sorted_items
+
row = 3
self.screen.move(row, 0)
self.screen.clrtobot()
stats = self.stats.get(self._display_guests)
-
- def sortCurAvg(x):
- # sort by current events if available
- if stats[x][1]:
- return (-stats[x][1], -stats[x][0])
+ total = 0.
+ ctotal = 0.
+ for key, values in stats.items():
+ if self._display_guests:
+ if self.get_gname_from_pid(key):
+ total += values.value
+ continue
+ if not key.find(' ') != -1:
+ total += values.value
else:
- return (0, -stats[x][0])
+ ctotal += values.value
+ if total == 0.:
+ # we don't have any fields, or all non-child events are filtered
+ total = ctotal
- def sortTotal(x):
- # sort by totals
- return (0, -stats[x][0])
- total = 0.
- for key in stats.keys():
- if key.find('(') is -1:
- total += stats[key][0]
- if self._sorting == SORT_DEFAULT:
- sortkey = sortCurAvg
- else:
- sortkey = sortTotal
+ # print events
tavg = 0
- for key in sorted(stats.keys(), key=sortkey):
- if row >= self.screen.getmaxyx()[0] - 1:
- break
- values = stats[key]
- if not values[0] and not values[1]:
+ tcur = 0
+ for key, values in get_sorted_events(self, stats):
+ if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
break
- if values[0] is not None:
- cur = int(round(values[1] / sleeptime)) if values[1] else ''
- if self._display_guests:
- key = self.get_gname_from_pid(key)
- self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
- (key, values[0], values[0] * 100 / total,
- cur))
- if cur is not '' and key.find('(') is -1:
- tavg += cur
+ if self._display_guests:
+ key = self.get_gname_from_pid(key)
+ if not key:
+ continue
+ cur = int(round(values.delta / sleeptime)) if values.delta else ''
+ if key[0] != ' ':
+ if values.delta:
+ tcur += values.delta
+ ptotal = values.value
+ ltotal = total
+ else:
+ ltotal = ptotal
+ self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % (key,
+ values.value,
+ values.value * 100 / float(ltotal), cur))
row += 1
if row == 3:
self.screen.addstr(4, 1, 'No matching events reported yet')
- else:
+ if row > 4:
+ tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
self.screen.addstr(row, 1, '%-40s %10d %8s' %
- ('Total', total, tavg if tavg else ''),
- curses.A_BOLD)
+ ('Total', total, tavg), curses.A_BOLD)
self.screen.refresh()
- def show_msg(self, text):
+ def _show_msg(self, text):
"""Display message centered text and exit on key press"""
hint = 'Press any key to continue'
curses.cbreak()
@@ -1139,16 +1227,16 @@ class Tui(object):
curses.A_STANDOUT)
self.screen.getkey()
- def show_help_interactive(self):
+ def _show_help_interactive(self):
"""Display help with list of interactive commands"""
msg = (' b toggle events by guests (debugfs only, honors'
' filters)',
' c clear filter',
' f filter by regular expression',
- ' g filter by guest name',
+ ' g filter by guest name/PID',
' h display interactive commands reference',
' o toggle sorting order (Total vs CurAvg/s)',
- ' p filter by PID',
+ ' p filter by guest name/PID',
' q quit',
' r reset stats',
' s set update interval',
@@ -1165,14 +1253,15 @@ class Tui(object):
self.screen.addstr(row, 0, line)
row += 1
self.screen.getkey()
- self.refresh_header()
+ self._refresh_header()
- def show_filter_selection(self):
+ def _show_filter_selection(self):
"""Draws filter selection mask.
Asks for a valid regex and sets the fields filter accordingly.
"""
+ msg = ''
while True:
self.screen.erase()
self.screen.addstr(0, 0,
@@ -1181,61 +1270,25 @@ class Tui(object):
self.screen.addstr(2, 0,
"Current regex: {0}"
.format(self.stats.fields_filter))
+ self.screen.addstr(5, 0, msg)
self.screen.addstr(3, 0, "New regex: ")
curses.echo()
regex = self.screen.getstr().decode(ENCODING)
curses.noecho()
if len(regex) == 0:
- self.stats.fields_filter = DEFAULT_REGEX
- self.refresh_header()
+ self.stats.fields_filter = ''
+ self._refresh_header()
return
try:
re.compile(regex)
self.stats.fields_filter = regex
- self.refresh_header()
+ self._refresh_header()
return
except re.error:
+ msg = '"' + regex + '": Not a valid regular expression'
continue
- def show_vm_selection_by_pid(self):
- """Draws PID selection mask.
-
- Asks for a pid until a valid pid or 0 has been entered.
-
- """
- msg = ''
- while True:
- self.screen.erase()
- self.screen.addstr(0, 0,
- 'Show statistics for specific pid.',
- curses.A_BOLD)
- self.screen.addstr(1, 0,
- 'This might limit the shown data to the trace '
- 'statistics.')
- self.screen.addstr(5, 0, msg)
- self.print_all_gnames(7)
-
- curses.echo()
- self.screen.addstr(3, 0, "Pid [0 or pid]: ")
- pid = self.screen.getstr().decode(ENCODING)
- curses.noecho()
-
- try:
- if len(pid) > 0:
- pid = int(pid)
- if pid != 0 and not os.path.isdir(os.path.join('/proc/',
- str(pid))):
- msg = '"' + str(pid) + '": Not a running process'
- continue
- else:
- pid = 0
- self.refresh_header(pid)
- self.update_pid(pid)
- break
- except ValueError:
- msg = '"' + str(pid) + '": Not a valid pid'
-
- def show_set_update_interval(self):
+ def _show_set_update_interval(self):
"""Draws update interval selection mask."""
msg = ''
while True:
@@ -1265,60 +1318,67 @@ class Tui(object):
except ValueError:
msg = '"' + str(val) + '": Invalid value'
- self.refresh_header()
+ self._refresh_header()
- def show_vm_selection_by_guest_name(self):
+ def _show_vm_selection_by_guest(self):
"""Draws guest selection mask.
- Asks for a guest name until a valid guest name or '' is entered.
+ Asks for a guest name or pid until a valid guest name or '' is entered.
"""
msg = ''
while True:
self.screen.erase()
self.screen.addstr(0, 0,
- 'Show statistics for specific guest.',
+ 'Show statistics for specific guest or pid.',
curses.A_BOLD)
self.screen.addstr(1, 0,
'This might limit the shown data to the trace '
'statistics.')
self.screen.addstr(5, 0, msg)
- self.print_all_gnames(7)
+ self._print_all_gnames(7)
curses.echo()
- self.screen.addstr(3, 0, "Guest [ENTER or guest]: ")
- gname = self.screen.getstr().decode(ENCODING)
+ curses.curs_set(1)
+ self.screen.addstr(3, 0, "Guest or pid [ENTER exits]: ")
+ guest = self.screen.getstr().decode(ENCODING)
curses.noecho()
- if not gname:
- self.refresh_header(0)
- self.update_pid(0)
+ pid = 0
+ if not guest or guest == '0':
break
- else:
- pids = []
- try:
- pids = self.get_pid_from_gname(gname)
- except:
- msg = '"' + gname + '": Internal error while searching, ' \
- 'use pid filter instead'
- continue
- if len(pids) == 0:
- msg = '"' + gname + '": Not an active guest'
+ if guest.isdigit():
+ if not os.path.isdir(os.path.join('/proc/', guest)):
+ msg = '"' + guest + '": Not a running process'
continue
- if len(pids) > 1:
- msg = '"' + gname + '": Multiple matches found, use pid ' \
- 'filter instead'
- continue
- self.refresh_header(pids[0])
- self.update_pid(pids[0])
+ pid = int(guest)
break
+ pids = []
+ try:
+ pids = self.get_pid_from_gname(guest)
+ except:
+ msg = '"' + guest + '": Internal error while searching, ' \
+ 'use pid filter instead'
+ continue
+ if len(pids) == 0:
+ msg = '"' + guest + '": Not an active guest'
+ continue
+ if len(pids) > 1:
+ msg = '"' + guest + '": Multiple matches found, use pid ' \
+ 'filter instead'
+ continue
+ pid = pids[0]
+ break
+ curses.curs_set(0)
+ self._refresh_header(pid)
+ self._update_pid(pid)
def show_stats(self):
"""Refreshes the screen and processes user input."""
sleeptime = self._delay_initial
- self.refresh_header()
+ self._refresh_header()
start = 0.0 # result based on init value never appears on screen
while True:
- self.refresh_body(time.time() - start)
+ self._refresh_body(time.time() - start)
curses.halfdelay(int(sleeptime * 10))
start = time.time()
sleeptime = self._delay_regular
@@ -1327,47 +1387,39 @@ class Tui(object):
if char == 'b':
self._display_guests = not self._display_guests
if self.stats.toggle_display_guests(self._display_guests):
- self.show_msg(['Command not available with tracepoints'
- ' enabled', 'Restart with debugfs only '
- '(see option \'-d\') and try again!'])
+ self._show_msg(['Command not available with '
+ 'tracepoints enabled', 'Restart with '
+ 'debugfs only (see option \'-d\') and '
+ 'try again!'])
self._display_guests = not self._display_guests
- self.refresh_header()
+ self._refresh_header()
if char == 'c':
- self.stats.fields_filter = DEFAULT_REGEX
- self.refresh_header(0)
- self.update_pid(0)
+ self.stats.fields_filter = ''
+ self._refresh_header(0)
+ self._update_pid(0)
if char == 'f':
curses.curs_set(1)
- self.show_filter_selection()
+ self._show_filter_selection()
curses.curs_set(0)
sleeptime = self._delay_initial
- if char == 'g':
- curses.curs_set(1)
- self.show_vm_selection_by_guest_name()
- curses.curs_set(0)
+ if char == 'g' or char == 'p':
+ self._show_vm_selection_by_guest()
sleeptime = self._delay_initial
if char == 'h':
- self.show_help_interactive()
+ self._show_help_interactive()
if char == 'o':
self._sorting = not self._sorting
- if char == 'p':
- curses.curs_set(1)
- self.show_vm_selection_by_pid()
- curses.curs_set(0)
- sleeptime = self._delay_initial
if char == 'q':
break
if char == 'r':
self.stats.reset()
if char == 's':
curses.curs_set(1)
- self.show_set_update_interval()
+ self._show_set_update_interval()
curses.curs_set(0)
sleeptime = self._delay_initial
if char == 'x':
- self.update_drilldown()
- # prevents display of current values on next refresh
- self.stats.get(self._display_guests)
+ self.stats.child_events = not self.stats.child_events
except KeyboardInterrupt:
break
except curses.error:
@@ -1380,9 +1432,9 @@ def batch(stats):
s = stats.get()
time.sleep(1)
s = stats.get()
- for key in sorted(s.keys()):
- values = s[key]
- print('%-42s%10d%10d' % (key, values[0], values[1]))
+ for key, values in sorted(s.items()):
+ print('%-42s%10d%10d' % (key.split(' ')[0], values.value,
+ values.delta))
except KeyboardInterrupt:
pass
@@ -1392,14 +1444,14 @@ def log(stats):
keys = sorted(stats.get().keys())
def banner():
- for k in keys:
- print(k, end=' ')
+ for key in keys:
+ print(key.split(' ')[0], end=' ')
print()
def statline():
s = stats.get()
- for k in keys:
- print(' %9d' % s[k][1], end=' ')
+ for key in keys:
+ print(' %9d' % s[key].delta, end=' ')
print()
line = 0
banner_repeat = 20
@@ -1504,7 +1556,7 @@ Press any other key to refresh statistics immediately.
)
optparser.add_option('-f', '--fields',
action='store',
- default=DEFAULT_REGEX,
+ default='',
dest='fields',
help='''fields to display (regex)
"-f help" for a list of available events''',
@@ -1539,17 +1591,6 @@ Press any other key to refresh statistics immediately.
def check_access(options):
"""Exits if the current user can't access all needed directories."""
- if not os.path.exists('/sys/kernel/debug'):
- sys.stderr.write('Please enable CONFIG_DEBUG_FS in your kernel.')
- sys.exit(1)
-
- if not os.path.exists(PATH_DEBUGFS_KVM):
- sys.stderr.write("Please make sure, that debugfs is mounted and "
- "readable by the current user:\n"
- "('mount -t debugfs debugfs /sys/kernel/debug')\n"
- "Also ensure, that the kvm modules are loaded.\n")
- sys.exit(1)
-
if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or
not options.debugfs):
sys.stderr.write("Please enable CONFIG_TRACING in your kernel "
@@ -1567,7 +1608,33 @@ def check_access(options):
return options
+def assign_globals():
+ global PATH_DEBUGFS_KVM
+ global PATH_DEBUGFS_TRACING
+
+ debugfs = ''
+ for line in file('/proc/mounts'):
+ if line.split(' ')[0] == 'debugfs':
+ debugfs = line.split(' ')[1]
+ break
+ if debugfs == '':
+ sys.stderr.write("Please make sure that CONFIG_DEBUG_FS is enabled in "
+ "your kernel, mounted and\nreadable by the current "
+ "user:\n"
+ "('mount -t debugfs debugfs /sys/kernel/debug')\n")
+ sys.exit(1)
+
+ PATH_DEBUGFS_KVM = os.path.join(debugfs, 'kvm')
+ PATH_DEBUGFS_TRACING = os.path.join(debugfs, 'tracing')
+
+ if not os.path.exists(PATH_DEBUGFS_KVM):
+ sys.stderr.write("Please make sure that CONFIG_KVM is enabled in "
+ "your kernel and that the modules are loaded.\n")
+ sys.exit(1)
+
+
def main():
+ assign_globals()
options = get_options()
options = check_access(options)
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index b5b3810c9e94..0811d860fe75 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -35,13 +35,13 @@ INTERACTIVE COMMANDS
*f*:: filter by regular expression
-*g*:: filter by guest name
+*g*:: filter by guest name/PID
*h*:: display interactive commands reference
*o*:: toggle sorting order (Total vs CurAvg/s)
-*p*:: filter by PID
+*p*:: filter by guest name/PID
*q*:: quit
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 97073d649c1a..5bbbf285af74 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1060,11 +1060,12 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
prog->insns = new_insn;
prog->main_prog_cnt = prog->insns_cnt;
prog->insns_cnt = new_cnt;
+ pr_debug("added %zd insn from %s to prog %s\n",
+ text->insns_cnt, text->section_name,
+ prog->section_name);
}
insn = &prog->insns[relo->insn_idx];
insn->imm += prog->main_prog_cnt - relo->insn_idx;
- pr_debug("added %zd insn from %s to prog %s\n",
- text->insns_cnt, text->section_name, prog->section_name);
return 0;
}
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 57254f5b2779..694abc628e9b 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -29,7 +29,7 @@
#include "builtin.h"
#include "check.h"
-bool no_fp, no_unreachable;
+bool no_fp, no_unreachable, retpoline, module;
static const char * const check_usage[] = {
"objtool check [<options>] file.o",
@@ -39,6 +39,8 @@ static const char * const check_usage[] = {
const struct option check_options[] = {
OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
+ OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
+ OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
OPT_END(),
};
@@ -53,5 +55,5 @@ int cmd_check(int argc, const char **argv)
objname = argv[0];
- return check(objname, no_fp, no_unreachable, false);
+ return check(objname, false);
}
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
index 91e8e19ff5e0..77ea2b97117d 100644
--- a/tools/objtool/builtin-orc.c
+++ b/tools/objtool/builtin-orc.c
@@ -25,7 +25,6 @@
*/
#include <string.h>
-#include <subcmd/parse-options.h>
#include "builtin.h"
#include "check.h"
@@ -36,9 +35,6 @@ static const char *orc_usage[] = {
NULL,
};
-extern const struct option check_options[];
-extern bool no_fp, no_unreachable;
-
int cmd_orc(int argc, const char **argv)
{
const char *objname;
@@ -54,7 +50,7 @@ int cmd_orc(int argc, const char **argv)
objname = argv[0];
- return check(objname, no_fp, no_unreachable, true);
+ return check(objname, true);
}
if (!strcmp(argv[0], "dump")) {
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
index dd526067fed5..28ff40e19a14 100644
--- a/tools/objtool/builtin.h
+++ b/tools/objtool/builtin.h
@@ -17,6 +17,11 @@
#ifndef _BUILTIN_H
#define _BUILTIN_H
+#include <subcmd/parse-options.h>
+
+extern const struct option check_options[];
+extern bool no_fp, no_unreachable, retpoline, module;
+
extern int cmd_check(int argc, const char **argv);
extern int cmd_orc(int argc, const char **argv);
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a8cb69a26576..472e64e95891 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -18,6 +18,7 @@
#include <string.h>
#include <stdlib.h>
+#include "builtin.h"
#include "check.h"
#include "elf.h"
#include "special.h"
@@ -33,7 +34,6 @@ struct alternative {
};
const char *objname;
-static bool no_fp;
struct cfi_state initial_func_cfi;
struct instruction *find_insn(struct objtool_file *file,
@@ -497,6 +497,7 @@ static int add_jump_destinations(struct objtool_file *file)
* disguise, so convert them accordingly.
*/
insn->type = INSN_JUMP_DYNAMIC;
+ insn->retpoline_safe = true;
continue;
} else {
/* sibling call */
@@ -548,7 +549,8 @@ static int add_call_destinations(struct objtool_file *file)
if (!insn->call_dest && !insn->ignore) {
WARN_FUNC("unsupported intra-function call",
insn->sec, insn->offset);
- WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+ if (retpoline)
+ WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
return -1;
}
@@ -1108,6 +1110,54 @@ static int read_unwind_hints(struct objtool_file *file)
return 0;
}
+static int read_retpoline_hints(struct objtool_file *file)
+{
+ struct section *sec, *relasec;
+ struct instruction *insn;
+ struct rela *rela;
+ int i;
+
+ sec = find_section_by_name(file->elf, ".discard.retpoline_safe");
+ if (!sec)
+ return 0;
+
+ relasec = sec->rela;
+ if (!relasec) {
+ WARN("missing .rela.discard.retpoline_safe section");
+ return -1;
+ }
+
+ if (sec->len % sizeof(unsigned long)) {
+ WARN("retpoline_safe size mismatch: %d %ld", sec->len, sizeof(unsigned long));
+ return -1;
+ }
+
+ for (i = 0; i < sec->len / sizeof(unsigned long); i++) {
+ rela = find_rela_by_dest(sec, i * sizeof(unsigned long));
+ if (!rela) {
+ WARN("can't find rela for retpoline_safe[%d]", i);
+ return -1;
+ }
+
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (!insn) {
+ WARN("can't find insn for retpoline_safe[%d]", i);
+ return -1;
+ }
+
+ if (insn->type != INSN_JUMP_DYNAMIC &&
+ insn->type != INSN_CALL_DYNAMIC) {
+ WARN_FUNC("retpoline_safe hint not a indirect jump/call",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ insn->retpoline_safe = true;
+ }
+
+ return 0;
+}
+
static int decode_sections(struct objtool_file *file)
{
int ret;
@@ -1146,6 +1196,10 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
+ ret = read_retpoline_hints(file);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1891,6 +1945,38 @@ static int validate_unwind_hints(struct objtool_file *file)
return warnings;
}
+static int validate_retpoline(struct objtool_file *file)
+{
+ struct instruction *insn;
+ int warnings = 0;
+
+ for_each_insn(file, insn) {
+ if (insn->type != INSN_JUMP_DYNAMIC &&
+ insn->type != INSN_CALL_DYNAMIC)
+ continue;
+
+ if (insn->retpoline_safe)
+ continue;
+
+ /*
+ * .init.text code is ran before userspace and thus doesn't
+ * strictly need retpolines, except for modules which are
+ * loaded late, they very much do need retpoline in their
+ * .init.text
+ */
+ if (!strcmp(insn->sec->name, ".init.text") && !module)
+ continue;
+
+ WARN_FUNC("indirect %s found in RETPOLINE build",
+ insn->sec, insn->offset,
+ insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+
+ warnings++;
+ }
+
+ return warnings;
+}
+
static bool is_kasan_insn(struct instruction *insn)
{
return (insn->type == INSN_CALL &&
@@ -2022,13 +2108,12 @@ static void cleanup(struct objtool_file *file)
elf_close(file->elf);
}
-int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
+int check(const char *_objname, bool orc)
{
struct objtool_file file;
int ret, warnings = 0;
objname = _objname;
- no_fp = _no_fp;
file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
if (!file.elf)
@@ -2052,6 +2137,13 @@ int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
if (list_empty(&file.insn_list))
goto out;
+ if (retpoline) {
+ ret = validate_retpoline(&file);
+ if (ret < 0)
+ return ret;
+ warnings += ret;
+ }
+
ret = validate_functions(&file);
if (ret < 0)
goto out;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index 23a1d065cae1..c6b68fcb926f 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -45,6 +45,7 @@ struct instruction {
unsigned char type;
unsigned long immediate;
bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
+ bool retpoline_safe;
struct symbol *call_dest;
struct instruction *jump_dest;
struct instruction *first_jump_src;
@@ -63,7 +64,7 @@ struct objtool_file {
bool ignore_unreachables, c_file, hints;
};
-int check(const char *objname, bool no_fp, bool no_unreachable, bool orc);
+int check(const char *objname, bool orc);
struct instruction *find_insn(struct objtool_file *file,
struct section *sec, unsigned long offset);
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 44ef9eba5a7a..6c645eb77d42 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -178,6 +178,55 @@ void idr_get_next_test(int base)
idr_destroy(&idr);
}
+int idr_u32_cb(int id, void *ptr, void *data)
+{
+ BUG_ON(id < 0);
+ BUG_ON(ptr != DUMMY_PTR);
+ return 0;
+}
+
+void idr_u32_test1(struct idr *idr, u32 handle)
+{
+ static bool warned = false;
+ u32 id = handle;
+ int sid = 0;
+ void *ptr;
+
+ BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL));
+ BUG_ON(id != handle);
+ BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC);
+ BUG_ON(id != handle);
+ if (!warned && id > INT_MAX)
+ printk("vvv Ignore these warnings\n");
+ ptr = idr_get_next(idr, &sid);
+ if (id > INT_MAX) {
+ BUG_ON(ptr != NULL);
+ BUG_ON(sid != 0);
+ } else {
+ BUG_ON(ptr != DUMMY_PTR);
+ BUG_ON(sid != id);
+ }
+ idr_for_each(idr, idr_u32_cb, NULL);
+ if (!warned && id > INT_MAX) {
+ printk("^^^ Warnings over\n");
+ warned = true;
+ }
+ BUG_ON(idr_remove(idr, id) != DUMMY_PTR);
+ BUG_ON(!idr_is_empty(idr));
+}
+
+void idr_u32_test(int base)
+{
+ DEFINE_IDR(idr);
+ idr_init_base(&idr, base);
+ idr_u32_test1(&idr, 10);
+ idr_u32_test1(&idr, 0x7fffffff);
+ idr_u32_test1(&idr, 0x80000000);
+ idr_u32_test1(&idr, 0x80000001);
+ idr_u32_test1(&idr, 0xffe00000);
+ idr_u32_test1(&idr, 0xffffffff);
+}
+
void idr_checks(void)
{
unsigned long i;
@@ -248,6 +297,9 @@ void idr_checks(void)
idr_get_next_test(0);
idr_get_next_test(1);
idr_get_next_test(4);
+ idr_u32_test(4);
+ idr_u32_test(1);
+ idr_u32_test(0);
}
/*
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index 6903ccf35595..44a0d1ad4408 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -29,7 +29,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
{
struct radix_tree_node *node;
- if (flags & __GFP_NOWARN)
+ if (!(flags & __GFP_DIRECT_RECLAIM))
return NULL;
pthread_mutex_lock(&cachep->lock);
@@ -73,10 +73,17 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
void *kmalloc(size_t size, gfp_t gfp)
{
- void *ret = malloc(size);
+ void *ret;
+
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ return NULL;
+
+ ret = malloc(size);
uatomic_inc(&nr_allocated);
if (kmalloc_verbose)
printf("Allocating %p from malloc\n", ret);
+ if (gfp & __GFP_ZERO)
+ memset(ret, 0, size);
return ret;
}
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/testing/radix-tree/linux/compiler_types.h
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h
index e9fff59dfd8a..e3201ccf54c3 100644
--- a/tools/testing/radix-tree/linux/gfp.h
+++ b/tools/testing/radix-tree/linux/gfp.h
@@ -11,6 +11,7 @@
#define __GFP_IO 0x40u
#define __GFP_FS 0x80u
#define __GFP_NOWARN 0x200u
+#define __GFP_ZERO 0x8000u
#define __GFP_ATOMIC 0x80000u
#define __GFP_ACCOUNT 0x100000u
#define __GFP_DIRECT_RECLAIM 0x400000u
diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h
index 979baeec7e70..a037def0dec6 100644
--- a/tools/testing/radix-tree/linux/slab.h
+++ b/tools/testing/radix-tree/linux/slab.h
@@ -3,6 +3,7 @@
#define SLAB_H
#include <linux/types.h>
+#include <linux/gfp.h>
#define SLAB_HWCACHE_ALIGN 1
#define SLAB_PANIC 2
@@ -11,6 +12,11 @@
void *kmalloc(size_t size, gfp_t);
void kfree(void *);
+static inline void *kzalloc(size_t size, gfp_t gfp)
+{
+ return kmalloc(size, gfp | __GFP_ZERO);
+}
+
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index 1a7492268993..f6304d2be90c 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -11,11 +11,11 @@ all:
BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
- #SUBDIR test prog name should be in the form: SUBDIR_test.sh
+ #SUBDIR test prog name should be in the form: SUBDIR_test.sh \
TEST=$$DIR"_test.sh"; \
- if [ -e $$DIR/$$TEST ]; then
- rsync -a $$DIR/$$TEST $$BUILD_TARGET/;
- fi
+ if [ -e $$DIR/$$TEST ]; then \
+ rsync -a $$DIR/$$TEST $$BUILD_TARGET/; \
+ fi \
done
override define RUN_TESTS
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index cc15af2e54fe..9cf83f895d98 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -11,3 +11,4 @@ test_progs
test_tcpbpf_user
test_verifier_log
feature
+test_libbpf_open
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 436c4c72414f..9e03a4c356a4 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -126,6 +126,8 @@ static void test_hashmap_sizes(int task, void *data)
fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
2, map_flags);
if (fd < 0) {
+ if (errno == ENOMEM)
+ return;
printf("Failed to create hashmap key=%d value=%d '%s'\n",
i, j, strerror(errno));
exit(1);
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
index 57119ad57a3f..3e645ee41ed5 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
@@ -5,7 +5,6 @@
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
-#include <linux/in6.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/tcp.h>
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index c0f16e93f9bd..c73592fa3d41 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2587,6 +2587,32 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
+ "runtime/jit: pass negative index to tail_call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog = { 1 },
+ .result = ACCEPT,
+ },
+ {
+ "runtime/jit: pass > 32bit index to tail_call",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog = { 2 },
+ .result = ACCEPT,
+ },
+ {
"stack pointer arithmetic",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 4),
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
index cea4adcd42b8..a63e8453984d 100644
--- a/tools/testing/selftests/futex/Makefile
+++ b/tools/testing/selftests/futex/Makefile
@@ -12,9 +12,9 @@ all:
BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
- if [ -e $$DIR/$(TEST_PROGS) ]; then
- rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/;
- fi
+ if [ -e $$DIR/$(TEST_PROGS) ]; then \
+ rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \
+ fi \
done
override define RUN_TESTS
diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
new file mode 100644
index 000000000000..835c7f4dadcd
--- /dev/null
+++ b/tools/testing/selftests/memfd/config
@@ -0,0 +1 @@
+CONFIG_FUSE_FS=m
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile
index 86636d207adf..183b46883875 100644
--- a/tools/testing/selftests/memory-hotplug/Makefile
+++ b/tools/testing/selftests/memory-hotplug/Makefile
@@ -4,7 +4,7 @@ all:
include ../lib.mk
TEST_PROGS := mem-on-off-test.sh
-override RUN_TESTS := ./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]"
+override RUN_TESTS := @./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]"
override EMIT_TESTS := echo "$(RUN_TESTS)"
run_full_test:
diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config
index 6a8e5a9bfc10..d148f9f89fb6 100644
--- a/tools/testing/selftests/pstore/config
+++ b/tools/testing/selftests/pstore/config
@@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y
CONFIG_PSTORE=y
CONFIG_PSTORE_PMSG=y
CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=m
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 0b457e8e0f0c..5df609950a66 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -141,6 +141,15 @@ struct seccomp_data {
#define SECCOMP_FILTER_FLAG_LOG 2
#endif
+#ifndef PTRACE_SECCOMP_GET_METADATA
+#define PTRACE_SECCOMP_GET_METADATA 0x420d
+
+struct seccomp_metadata {
+ __u64 filter_off; /* Input: which filter */
+ __u64 flags; /* Output: filter's flags */
+};
+#endif
+
#ifndef seccomp
int seccomp(unsigned int op, unsigned int flags, void *args)
{
@@ -2845,6 +2854,58 @@ TEST(get_action_avail)
EXPECT_EQ(errno, EOPNOTSUPP);
}
+TEST(get_metadata)
+{
+ pid_t pid;
+ int pipefd[2];
+ char buf;
+ struct seccomp_metadata md;
+
+ ASSERT_EQ(0, pipe(pipefd));
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ struct sock_filter filter[] = {
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+ };
+ struct sock_fprog prog = {
+ .len = (unsigned short)ARRAY_SIZE(filter),
+ .filter = filter,
+ };
+
+ /* one with log, one without */
+ ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
+ SECCOMP_FILTER_FLAG_LOG, &prog));
+ ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
+
+ ASSERT_EQ(0, close(pipefd[0]));
+ ASSERT_EQ(1, write(pipefd[1], "1", 1));
+ ASSERT_EQ(0, close(pipefd[1]));
+
+ while (1)
+ sleep(100);
+ }
+
+ ASSERT_EQ(0, close(pipefd[1]));
+ ASSERT_EQ(1, read(pipefd[0], &buf, 1));
+
+ ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
+ ASSERT_EQ(pid, waitpid(pid, NULL, 0));
+
+ md.filter_off = 0;
+ ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
+ EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
+ EXPECT_EQ(md.filter_off, 0);
+
+ md.filter_off = 1;
+ ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
+ EXPECT_EQ(md.flags, 0);
+ EXPECT_EQ(md.filter_off, 1);
+
+ ASSERT_EQ(0, kill(pid, SIGKILL));
+}
+
/*
* TODO:
* - add microbenchmarks
diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile
index b3c8ba3cb668..d0121a8a3523 100644
--- a/tools/testing/selftests/sync/Makefile
+++ b/tools/testing/selftests/sync/Makefile
@@ -30,7 +30,7 @@ $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS)
$(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS)
$(OBJS): $(OUTPUT)/%.o: %.c
- $(CC) -c $^ -o $@
+ $(CC) -c $^ -o $@ $(CFLAGS)
$(TESTS): $(OUTPUT)/%.o: %.c
$(CC) -c $^ -o $@
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
index 3d5a62ff7d31..f5d7a7851e21 100644
--- a/tools/testing/selftests/vDSO/Makefile
+++ b/tools/testing/selftests/vDSO/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+include ../lib.mk
+
ifndef CROSS_COMPILE
CFLAGS := -std=gnu99
CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
@@ -6,16 +8,14 @@ ifeq ($(CONFIG_X86_32),y)
LDLIBS += -lgcc_s
endif
-TEST_PROGS := vdso_test vdso_standalone_test_x86
+TEST_PROGS := $(OUTPUT)/vdso_test $(OUTPUT)/vdso_standalone_test_x86
all: $(TEST_PROGS)
-vdso_test: parse_vdso.c vdso_test.c
-vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
+$(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c
+$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
$(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
vdso_standalone_test_x86.c parse_vdso.c \
- -o vdso_standalone_test_x86
+ -o $@
-include ../lib.mk
-clean:
- rm -fr $(TEST_PROGS)
+EXTRA_CLEAN := $(TEST_PROGS)
endif
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index 63c94d776e89..342c7bc9dc8c 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -11,3 +11,4 @@ mlock-intersect-test
mlock-random-test
virtual_address_range
gup_benchmark
+va_128TBswitch
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 70268c0bec79..70f4c30918eb 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
static unsigned int host_vtimer_irq;
static u32 host_vtimer_irq_flags;
+static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
+
static const struct kvm_irq_level default_ptimer_irq = {
.irq = 30,
.level = 1,
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
return timecounter->cc->read(timecounter->cc);
}
+static inline bool userspace_irqchip(struct kvm *kvm)
+{
+ return static_branch_unlikely(&userspace_irqchip_in_use) &&
+ unlikely(!irqchip_in_kernel(kvm));
+}
+
static void soft_timer_start(struct hrtimer *hrt, u64 ns)
{
hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
cancel_work_sync(work);
}
-static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu)
-{
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-
- /*
- * When using a userspace irqchip with the architected timers, we must
- * prevent continuously exiting from the guest, and therefore mask the
- * physical interrupt by disabling it on the host interrupt controller
- * when the virtual level is high, such that the guest can make
- * forward progress. Once we detect the output level being
- * de-asserted, we unmask the interrupt again so that we exit from the
- * guest when the timer fires.
- */
- if (vtimer->irq.level)
- disable_percpu_irq(host_vtimer_irq);
- else
- enable_percpu_irq(host_vtimer_irq, 0);
-}
-
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
if (kvm_timer_should_fire(vtimer))
kvm_timer_update_irq(vcpu, true, vtimer);
- if (static_branch_unlikely(&userspace_irqchip_in_use) &&
- unlikely(!irqchip_in_kernel(vcpu->kvm)))
- kvm_vtimer_update_mask_user(vcpu);
+ if (userspace_irqchip(vcpu->kvm) &&
+ !static_branch_unlikely(&has_gic_active_state))
+ disable_percpu_irq(host_vtimer_irq);
return IRQ_HANDLED;
}
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
timer_ctx->irq.level);
- if (!static_branch_unlikely(&userspace_irqchip_in_use) ||
- likely(irqchip_in_kernel(vcpu->kvm))) {
+ if (!userspace_irqchip(vcpu->kvm)) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer_ctx->irq.irq,
timer_ctx->irq.level,
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
phys_timer_emulate(vcpu);
}
-static void __timer_snapshot_state(struct arch_timer_context *timer)
-{
- timer->cnt_ctl = read_sysreg_el0(cntv_ctl);
- timer->cnt_cval = read_sysreg_el0(cntv_cval);
-}
-
static void vtimer_save_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
if (!vtimer->loaded)
goto out;
- if (timer->enabled)
- __timer_snapshot_state(vtimer);
+ if (timer->enabled) {
+ vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
+ vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
+ }
/* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl);
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
}
-static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu)
+static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active)
+{
+ int r;
+ r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active);
+ WARN_ON(r);
+}
+
+static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
bool phys_active;
- int ret;
- phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
-
- ret = irq_set_irqchip_state(host_vtimer_irq,
- IRQCHIP_STATE_ACTIVE,
- phys_active);
- WARN_ON(ret);
+ if (irqchip_in_kernel(vcpu->kvm))
+ phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
+ else
+ phys_active = vtimer->irq.level;
+ set_vtimer_irq_phys_active(vcpu, phys_active);
}
-static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu)
+static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
{
- kvm_vtimer_update_mask_user(vcpu);
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+
+ /*
+ * When using a userspace irqchip with the architected timers and a
+ * host interrupt controller that doesn't support an active state, we
+ * must still prevent continuously exiting from the guest, and
+ * therefore mask the physical interrupt by disabling it on the host
+ * interrupt controller when the virtual level is high, such that the
+ * guest can make forward progress. Once we detect the output level
+ * being de-asserted, we unmask the interrupt again so that we exit
+ * from the guest when the timer fires.
+ */
+ if (vtimer->irq.level)
+ disable_percpu_irq(host_vtimer_irq);
+ else
+ enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
}
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
if (unlikely(!timer->enabled))
return;
- if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
- kvm_timer_vcpu_load_user(vcpu);
+ if (static_branch_likely(&has_gic_active_state))
+ kvm_timer_vcpu_load_gic(vcpu);
else
- kvm_timer_vcpu_load_vgic(vcpu);
+ kvm_timer_vcpu_load_nogic(vcpu);
set_cntvoff(vtimer->cntvoff);
@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
- __timer_snapshot_state(vtimer);
- if (!kvm_timer_should_fire(vtimer)) {
- kvm_timer_update_irq(vcpu, false, vtimer);
- kvm_vtimer_update_mask_user(vcpu);
- }
+ if (!kvm_timer_should_fire(vtimer)) {
+ kvm_timer_update_irq(vcpu, false, vtimer);
+ if (static_branch_likely(&has_gic_active_state))
+ set_vtimer_irq_phys_active(vcpu, false);
+ else
+ enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
}
}
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
- unmask_vtimer_irq_user(vcpu);
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ if (unlikely(!timer->enabled))
+ return;
+
+ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ unmask_vtimer_irq_user(vcpu);
}
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic)
kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
goto out_free_irq;
}
+
+ static_branch_enable(&has_gic_active_state);
}
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4501e658e8d6..65dea3ffef68 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -969,8 +969,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */
r = -EEXIST;
kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
- if ((slot->id >= KVM_USER_MEM_SLOTS) ||
- (slot->id == id))
+ if (slot->id == id)
continue;
if (!((base_gfn + npages <= slot->base_gfn) ||
(base_gfn >= slot->base_gfn + slot->npages)))