aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-10-14 16:50:14 -0700
committerJakub Kicinski <kuba@kernel.org>2021-10-14 16:50:14 -0700
commite15f5972b8031f9069f41e24adff63bd34463b3a (patch)
treed31c80ea86df9cdd9f8189dc2a89603aac57402f
parentnet: of: fix stub of_net helpers for CONFIG_NET=n (diff)
parentMerge tag 'net-5.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net (diff)
downloadlinux-dev-e15f5972b8031f9069f41e24adff63bd34463b3a.tar.xz
linux-dev-e15f5972b8031f9069f41e24adff63bd34463b3a.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
tools/testing/selftests/net/ioam6.sh 7b1700e009cc ("selftests: net: modify IOAM tests for undef bits") bf77b1400a56 ("selftests: net: Test for the IOAM encapsulation with IPv6") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst28
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml2
-rw-r--r--Documentation/gpu/amdgpu.rst4
-rw-r--r--Documentation/gpu/drm-internals.rst9
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h8
-rw-r--r--arch/powerpc/include/asm/code-patching.h1
-rw-r--r--arch/powerpc/include/asm/interrupt.h18
-rw-r--r--arch/powerpc/include/asm/security_features.h5
-rw-r--r--arch/powerpc/kernel/dma-iommu.c9
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S25
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/security.c5
-rw-r--r--arch/powerpc/kernel/traps.c43
-rw-r--r--arch/powerpc/lib/code-patching.c7
-rw-r--r--arch/powerpc/net/bpf_jit.h33
-rw-r--r--arch/powerpc/net/bpf_jit64.h8
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c6
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c16
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c100
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c4
-rw-r--r--arch/powerpc/platforms/pseries/msi.c15
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/include/asm/syscall.h1
-rw-r--r--arch/riscv/include/asm/vdso.h18
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h3
-rw-r--r--arch/riscv/kernel/syscall_table.c1
-rw-r--r--arch/riscv/kernel/vdso.c53
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S3
-rw-r--r--arch/riscv/mm/cacheflush.c2
-rw-r--r--arch/s390/include/asm/pci.h2
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/s390/pci/pci.c45
-rw-r--r--arch/s390/pci/pci_event.c4
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/entry-common.h2
-rw-r--r--arch/x86/include/asm/xen/pci.h11
-rw-r--r--arch/x86/kernel/cpu/common.c1
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c6
-rw-r--r--arch/x86/kernel/early-quirks.c6
-rw-r--r--arch/x86/kernel/fpu/signal.c11
-rw-r--r--arch/x86/kernel/hpet.c81
-rw-r--r--arch/x86/kernel/sev-shared.c2
-rw-r--r--arch/x86/pci/xen.c15
-rw-r--r--arch/x86/platform/olpc/olpc.c2
-rw-r--r--arch/x86/platform/pvh/enlighten.c12
-rw-r--r--arch/x86/xen/Kconfig19
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c54
-rw-r--r--arch/x86/xen/enlighten_pv.c35
-rw-r--r--arch/x86/xen/enlighten_pvh.c10
-rw-r--r--arch/x86/xen/mmu_pv.c2
-rw-r--r--arch/x86/xen/xen-ops.h5
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h2
-rw-r--r--arch/xtensa/kernel/irq.c2
-rw-r--r--arch/xtensa/kernel/setup.c12
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c12
-rw-r--r--block/bdev.c2
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/genhd.c1
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/arm64/gtdt.c2
-rw-r--r--drivers/base/test/Makefile2
-rw-r--r--drivers/firmware/Kconfig5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c8
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h27
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c12
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c8
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h5
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c81
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.h5
-rw-r--r--drivers/gpu/drm/kmb/kmb_regs.h3
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c311
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c7
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c26
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c97
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c8
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c4
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/i2c-core-acpi.c1
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c3
-rw-r--r--drivers/isdn/capi/kcapi.c5
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c73
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c22
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c125
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h9
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c21
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/ocelot/felix.c149
-rw-r--r--drivers/net/dsa/ocelot/felix.h1
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/arc/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c52
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c10
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h4
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c107
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c3
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c19
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ipa/Kconfig1
-rw-r--r--drivers/net/phy/phy_device.c3
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig2
-rw-r--r--drivers/of/base.c1
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c9
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pinctrl/qcom/Kconfig3
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c4
-rw-r--r--drivers/platform/x86/amd-pmc.c1
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c1
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c23
-rw-r--r--drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c6
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.c3
-rw-r--r--drivers/scsi/libiscsi.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c11
-rw-r--r--drivers/scsi/ufs/ufshcd.c52
-rw-r--r--drivers/scsi/ufs/ufshcd.h1
-rw-r--r--drivers/thunderbolt/Makefile1
-rw-r--r--drivers/tty/hvc/hvc_xen.c13
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c15
-rw-r--r--drivers/usb/class/cdc-acm.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/common/Kconfig3
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c14
-rw-r--r--drivers/usb/host/xhci-tegra.c12
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1
-rw-r--r--drivers/usb/typec/tipd/core.c8
-rw-r--r--drivers/video/fbdev/Kconfig5
-rw-r--r--drivers/video/fbdev/gbefb.c2
-rw-r--r--drivers/xen/Kconfig4
-rw-r--r--drivers/xen/balloon.c21
-rw-r--r--drivers/xen/privcmd.c18
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/dir-item.c48
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/file.c19
-rw-r--r--fs/btrfs/tree-log.c79
-rw-r--r--fs/ksmbd/connection.c10
-rw-r--r--fs/ksmbd/glob.h2
-rw-r--r--fs/ksmbd/smb2misc.c98
-rw-r--r--fs/ksmbd/smb2ops.c5
-rw-r--r--fs/ksmbd/smb2pdu.c59
-rw-r--r--fs/ksmbd/smb2pdu.h1
-rw-r--r--fs/ksmbd/smb_common.c12
-rw-r--r--fs/ksmbd/smb_common.h4
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/kunit/test.h6
-rw-r--r--include/linux/arm-smccc.h10
-rw-r--r--include/linux/dsa/mv88e6xxx.h13
-rw-r--r--include/linux/dsa/ocelot.h49
-rw-r--r--include/linux/mlx5/mlx5_ifc.h10
-rw-r--r--include/linux/qcom_scm.h71
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/soc/mscc/ocelot.h55
-rw-r--r--include/soc/mscc/ocelot_ptp.h3
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/xen/xen-ops.h15
-rw-r--r--kernel/cgroup/cpuset.c56
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/workqueue.c18
-rw-r--r--lib/Makefile2
-rw-r--r--lib/kunit/executor_test.c4
-rw-r--r--net/core/net-procfs.c24
-rw-r--r--net/dsa/Kconfig4
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/switch.c2
-rw-r--r--net/dsa/tag_dsa.c28
-rw-r--r--net/dsa/tag_ocelot.c1
-rw-r--r--net/dsa/tag_ocelot_8021q.c40
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv6/ioam6.c70
-rw-r--r--net/ipv6/ioam6_iptunnel.c6
-rw-r--r--net/mptcp/protocol.c55
-rw-r--r--net/nfc/af_nfc.c3
-rw-r--r--net/nfc/digital_core.c9
-rw-r--r--net/nfc/digital_technology.c8
-rw-r--r--net/nfc/nci/rsp.c2
-rw-r--r--net/sched/sch_mqprio.c30
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/smc/smc_cdc.c7
-rw-r--r--net/smc/smc_core.c20
-rw-r--r--net/smc/smc_llc.c63
-rw-r--r--net/smc/smc_tx.c22
-rw-r--r--net/smc/smc_wr.h14
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--scripts/Makefile.gcc-plugins4
-rwxr-xr-xscripts/checksyscalls.sh6
-rw-r--r--sound/core/pcm_compat.c72
-rw-r--r--sound/core/seq_device.c8
-rw-r--r--sound/hda/hdac_controller.c5
-rw-r--r--sound/pci/hda/hda_bind.c20
-rw-r--r--sound/pci/hda/hda_codec.c1
-rw-r--r--sound/pci/hda/hda_controller.c24
-rw-r--r--sound/pci/hda/hda_controller.h2
-rw-r--r--sound/pci/hda/hda_intel.c29
-rw-r--r--sound/pci/hda/hda_intel.h4
-rw-r--r--sound/pci/hda/patch_realtek.c66
-rw-r--r--sound/usb/mixer_scarlett_gen2.c2
-rw-r--r--sound/usb/quirks-table.h42
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/objtool/arch/x86/decode.c2
-rw-r--r--tools/objtool/check.c16
-rw-r--r--tools/objtool/elf.c14
-rw-r--r--tools/objtool/include/objtool/elf.h1
-rw-r--r--tools/objtool/orc_gen.c2
-rw-r--r--tools/objtool/special.c40
-rwxr-xr-xtools/testing/kunit/kunit.py24
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py8
-rwxr-xr-xtools/testing/selftests/net/ioam6.sh21
-rw-r--r--tools/testing/selftests/net/ioam6_parser.c164
284 files changed, 3016 insertions, 1438 deletions
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index babbe04c8d37..4d8c27eca96b 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1226,7 +1226,7 @@ PAGE_SIZE multiple when read back.
Note that all fields in this file are hierarchical and the
file modified event can be generated due to an event down the
- hierarchy. For for the local events at the cgroup level see
+ hierarchy. For the local events at the cgroup level see
memory.events.local.
low
@@ -2170,19 +2170,19 @@ existing device files.
Cgroup v2 device controller has no interface files and is implemented
on top of cgroup BPF. To control access to device files, a user may
-create bpf programs of the BPF_CGROUP_DEVICE type and attach them
-to cgroups. On an attempt to access a device file, corresponding
-BPF programs will be executed, and depending on the return value
-the attempt will succeed or fail with -EPERM.
-
-A BPF_CGROUP_DEVICE program takes a pointer to the bpf_cgroup_dev_ctx
-structure, which describes the device access attempt: access type
-(mknod/read/write) and device (type, major and minor numbers).
-If the program returns 0, the attempt fails with -EPERM, otherwise
-it succeeds.
-
-An example of BPF_CGROUP_DEVICE program may be found in the kernel
-source tree in the tools/testing/selftests/bpf/progs/dev_cgroup.c file.
+create bpf programs of type BPF_PROG_TYPE_CGROUP_DEVICE and attach
+them to cgroups with BPF_CGROUP_DEVICE flag. On an attempt to access a
+device file, corresponding BPF programs will be executed, and depending
+on the return value the attempt will succeed or fail with -EPERM.
+
+A BPF_PROG_TYPE_CGROUP_DEVICE program takes a pointer to the
+bpf_cgroup_dev_ctx structure, which describes the device access attempt:
+access type (mknod/read/write) and device (type, major and minor numbers).
+If the program returns 0, the attempt fails with -EPERM, otherwise it
+succeeds.
+
+An example of BPF_PROG_TYPE_CGROUP_DEVICE program may be found in
+tools/testing/selftests/bpf/progs/dev_cgroup.c in the kernel source tree.
RDMA
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 91ba391f9b32..43dc35fe5bc0 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1266,7 +1266,7 @@
The VGA and EFI output is eventually overwritten by
the real console.
- The xen output can only be used by Xen PV guests.
+ The xen option can only be used in Xen domains.
The sclp output can only be used on s390.
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
index 2ed010f91e2d..20ce88ab4b3a 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
@@ -22,7 +22,7 @@ properties:
items:
- enum:
# ili9341 240*320 Color on stm32f429-disco board
- - st,sf-tc240t-9370-t
+ - st,sf-tc240t-9370-t
- const: ilitek,ili9341
reg: true
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 42689b7d03a2..c115c95ee584 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -21,6 +21,7 @@ select:
contains:
enum:
- snps,dwmac
+ - snps,dwmac-3.40a
- snps,dwmac-3.50a
- snps,dwmac-3.610
- snps,dwmac-3.70a
@@ -76,6 +77,7 @@ properties:
- rockchip,rk3399-gmac
- rockchip,rv1108-gmac
- snps,dwmac
+ - snps,dwmac-3.40a
- snps,dwmac-3.50a
- snps,dwmac-3.610
- snps,dwmac-3.70a
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
index 364680cdad2e..8ba72e898099 100644
--- a/Documentation/gpu/amdgpu.rst
+++ b/Documentation/gpu/amdgpu.rst
@@ -300,8 +300,8 @@ pcie_replay_count
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
:doc: pcie_replay_count
-+GPU SmartShift Information
-============================
+GPU SmartShift Information
+==========================
GPU SmartShift information via sysfs
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 06af044c882f..607f78f0f189 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -111,15 +111,6 @@ Component Helper Usage
.. kernel-doc:: drivers/gpu/drm/drm_drv.c
:doc: component helper usage recommendations
-IRQ Helper Library
-~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
- :doc: irq helpers
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
- :export:
-
Memory Manager Initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/MAINTAINERS b/MAINTAINERS
index a4a0c2baaf27..703aa3150a15 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7440,7 +7440,7 @@ FREESCALE IMX / MXC FEC DRIVER
M: Joakim Zhang <qiangqing.zhang@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/net/fsl-fec.txt
+F: Documentation/devicetree/bindings/net/fsl,fec.yaml
F: drivers/net/ethernet/freescale/fec.h
F: drivers/net/ethernet/freescale/fec_main.c
F: drivers/net/ethernet/freescale/fec_ptp.c
@@ -9307,7 +9307,7 @@ S: Maintained
F: drivers/platform/x86/intel/atomisp2/led.c
INTEL BIOS SAR INT1092 DRIVER
-M: Shravan S <s.shravan@intel.com>
+M: Shravan Sudhakar <s.shravan@intel.com>
M: Intel Corporation <linuxwwan@intel.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
@@ -9629,7 +9629,7 @@ F: include/uapi/linux/isst_if.h
F: tools/power/x86/intel-speed-select/
INTEL STRATIX10 FIRMWARE DRIVERS
-M: Richard Gong <richard.gong@linux.intel.com>
+M: Dinh Nguyen <dinguyen@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu
@@ -11153,6 +11153,7 @@ S: Maintained
F: Documentation/devicetree/bindings/net/dsa/marvell.txt
F: Documentation/networking/devlink/mv88e6xxx.rst
F: drivers/net/dsa/mv88e6xxx/
+F: include/linux/dsa/mv88e6xxx.h
F: include/linux/platform_data/mv88e6xxx.h
MARVELL ARMADA 3700 PHY DRIVERS
diff --git a/Makefile b/Makefile
index 7b74223d1309..4d0c0ed9236e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Opossums on Parade
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fc196421b2ce..59baf6c132a7 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1989,8 +1989,6 @@ config ARCH_HIBERNATION_POSSIBLE
endmenu
-source "drivers/firmware/Kconfig"
-
if CRYPTO
source "arch/arm/crypto/Kconfig"
endif
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index f266b7b03482..cc88ebe7a60c 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -47,7 +47,7 @@
};
gmac: eth@e0800000 {
- compatible = "st,spear600-gmac";
+ compatible = "snps,dwmac-3.40a";
reg = <0xe0800000 0x8000>;
interrupts = <23 22>;
interrupt-names = "macirq", "eth_wake_irq";
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5c7ae4c3954b..fee914c716aa 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1931,8 +1931,6 @@ source "drivers/cpufreq/Kconfig"
endmenu
-source "drivers/firmware/Kconfig"
-
source "drivers/acpi/Kconfig"
source "arch/arm64/kvm/Kconfig"
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 23505fc35324..a8158c948966 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -43,7 +43,7 @@ void __init arm64_hugetlb_cma_reserve(void)
#ifdef CONFIG_ARM64_4K_PAGES
order = PUD_SHIFT - PAGE_SHIFT;
#else
- order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
+ order = CONT_PMD_SHIFT - PAGE_SHIFT;
#endif
/*
* HugeTLB CMA reservation is required for gigantic
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 045792cde481..1e33666fa679 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -388,8 +388,6 @@ config CRASH_DUMP
help
Generate crash dump after being started by kexec.
-source "drivers/firmware/Kconfig"
-
endmenu
menu "Power management and ACPI options"
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 771ca53af06d..6b8f591c5054 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -3316,8 +3316,6 @@ source "drivers/cpuidle/Kconfig"
endmenu
-source "drivers/firmware/Kconfig"
-
source "arch/mips/kvm/Kconfig"
source "arch/mips/vdso/Kconfig"
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 4742b6f169b7..27a8b49af11f 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -384,6 +384,4 @@ config KEXEC_FILE
endmenu
-source "drivers/firmware/Kconfig"
-
source "drivers/parisc/Kconfig"
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index d4b145b279f6..9f38040f0641 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
if (kuap_is_disabled())
return;
+ if (unlikely(kuap != KUAP_NONE)) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, false);
+ }
+
+ if (likely(regs->kuap == KUAP_NONE))
+ return;
+
current->thread.kuap = regs->kuap;
kuap_unlock(regs->kuap, false);
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index a95f63788c6b..4ba834599c4d 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -23,6 +23,7 @@
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
+bool is_offset_in_cond_branch_range(long offset);
int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 6b800d3e2681..a1d238255f07 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- if (is_implicit_soft_masked(regs)) {
- // Adjust regs->softe soft implicit soft-mask, so
- // arch_irq_disabled_regs(regs) behaves as expected.
+ if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
+ /*
+ * Adjust regs->softe to be soft-masked if it had not been
+ * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
+ * not yet set disabled), or if it was in an implicit soft
+ * masked state. This makes arch_irq_disabled_regs(regs)
+ * behave as expected.
+ */
regs->softe = IRQS_ALL_DISABLED;
}
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
/* Don't do any per-CPU operations until interrupt state is fixed */
@@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
/* kernel/traps.c */
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
#ifdef CONFIG_PPC_BOOK3S_64
-DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
-#else
-DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
#endif
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
DECLARE_INTERRUPT_HANDLER(SMIException);
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
DECLARE_INTERRUPT_HANDLER(unknown_exception);
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 792eefaf230b..27574f218b37 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
return !!(powerpc_security_features & feature);
}
+#ifdef CONFIG_PPC_BOOK3S_64
+enum stf_barrier_type stf_barrier_type_get(void);
+#else
+static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
+#endif
// Features indicating support for Spectre/Meltdown mitigations
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 111249fd619d..038ce8d9061d 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
struct iommu_table *tbl = get_iommu_table_base(dev);
u64 mask;
+ if (dev_is_pci(dev)) {
+ u64 bypass_mask = dma_direct_get_required_mask(dev);
+
+ if (dma_iommu_dma_supported(dev, bypass_mask)) {
+ dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
+ return bypass_mask;
+ }
+ }
+
if (!tbl)
return 0;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 37859e62a8dc..eaf1f72131a1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
li r10,MSR_RI
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_exception
+ bl machine_check_exception_async
b interrupt_return_srr
@@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
subi r12,r12,1
sth r12,PACA_IN_MCE(r13)
- /* Invoke machine_check_exception to print MCE event and panic. */
+ /*
+ * Invoke machine_check_exception to print MCE event and panic.
+ * This is the NMI version of the handler because we are called from
+ * the early handler which is a true NMI.
+ */
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
@@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
*/
andi. r10,r12,MSR_PR
- bne 2f /* If userspace, go normal path */
+ bne .Lnormal_stack /* If userspace, go normal path */
andis. r10,r12,(SRR1_PROGTM)@h
- bne 1f /* If TM, emergency */
+ bne .Lemergency_stack /* If TM, emergency */
cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
- blt 2f /* normal path if not */
+ blt .Lnormal_stack /* normal path if not */
/* Use the emergency stack */
-1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
+.Lemergency_stack:
+ andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
/* 3 in EXCEPTION_PROLOG_COMMON */
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
__ISTACK(program_check)=0
__GEN_COMMON_BODY program_check
- b 3f
-2:
+ b .Ldo_program_check
+
+.Lnormal_stack:
__ISTACK(program_check)=1
__GEN_COMMON_BODY program_check
-3:
+
+.Ldo_program_check:
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 551b653228c4..c4f1d6b7d992 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
return;
}
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(in_nmi() || in_hardirq());
+
/*
* After the stb, interrupts are unmasked and there are no interrupts
* pending replay. The restart sequence makes this atomic with
@@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
if (mask)
return;
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(in_nmi() || in_hardirq());
+
/*
* From this point onward, we can take interrupts, preempt,
* etc... unless we got hard-disabled. We check if an event
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 1a998490fe60..15fb5ea1b9ea 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
early_param("no_stf_barrier", handle_no_stf_barrier);
+enum stf_barrier_type stf_barrier_type_get(void)
+{
+ return stf_enabled_flush_types;
+}
+
/* This is the generic flag used by other architectures */
static int __init handle_ssbd(char *p)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index aac8c0412ff9..11741703d26e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
return false;
}
- show_signal_msg(signr, regs, code, addr);
+ /*
+ * Must not enable interrupts even for user-mode exception, because
+ * this can be called from machine check, which may be a NMI or IRQ
+ * which don't like interrupts being enabled. Could check for
+ * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
+ * reason why _exception() should enable irqs for an exception handler,
+ * the handlers themselves do that directly.
+ */
- if (arch_irqs_disabled())
- interrupt_cond_local_irq_enable(regs);
+ show_signal_msg(signr, regs, code, addr);
current->thread.trap_nr = code;
@@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
* do_exit() checks for in_interrupt() and panics in that case, so
* exit the irq/nmi before calling die.
*/
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
- irq_exit();
- else
+ if (in_nmi())
nmi_exit();
+ else
+ irq_exit();
die(str, regs, err);
}
/*
- * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
* (it uses its own early real-mode handler to handle the MCE proper
* and then raises irq_work to call this handler when interrupts are
- * enabled).
+ * enabled). The only time when this is not true is if the early handler
+ * is unrecoverable, then it does call this directly to try to get a
+ * message out.
*/
-#ifdef CONFIG_PPC_BOOK3S_64
-DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
-#else
-DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
-#endif
+static void __machine_check_exception(struct pt_regs *regs)
{
int recover = 0;
@@ -841,12 +845,19 @@ bail:
/* Must die if the interrupt is not recoverable */
if (regs_is_unrecoverable(regs))
die_mce("Unrecoverable Machine check", regs, SIGBUS);
+}
#ifdef CONFIG_PPC_BOOK3S_64
- return;
-#else
- return 0;
+DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
+{
+ __machine_check_exception(regs);
+}
#endif
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+{
+ __machine_check_exception(regs);
+
+ return 0;
}
DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index f9a3019e37b4..c5ed98823835 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
}
+bool is_offset_in_cond_branch_range(long offset)
+{
+ return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
+}
+
/*
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
@@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */
- if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
+ if (!is_offset_in_cond_branch_range(offset))
return 1;
/* Mask out the flags and target, so they don't step on each other. */
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 99fad093f43e..7e9b978b768e 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -24,16 +24,30 @@
#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
/* Long jump; (unconditional 'branch') */
-#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
- (((dest) - (ctx->idx * 4)) & 0x03fffffc))
+#define PPC_JMP(dest) \
+ do { \
+ long offset = (long)(dest) - (ctx->idx * 4); \
+ if (!is_offset_in_branch_range(offset)) { \
+ pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
+ return -ERANGE; \
+ } \
+ EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
+ } while (0)
+
/* blr; (unconditional 'branch' with link) to absolute address */
#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
(((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
/* "cond" here covers BO:BI fields. */
-#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
- (((cond) & 0x3ff) << 16) | \
- (((dest) - (ctx->idx * 4)) & \
- 0xfffc))
+#define PPC_BCC_SHORT(cond, dest) \
+ do { \
+ long offset = (long)(dest) - (ctx->idx * 4); \
+ if (!is_offset_in_cond_branch_range(offset)) { \
+ pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
+ return -ERANGE; \
+ } \
+ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
+ } while (0)
+
/* Sign-extended 32-bit immediate load */
#define PPC_LI32(d, i) do { \
if ((int)(uintptr_t)(i) >= -32768 && \
@@ -78,11 +92,6 @@
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
#endif
-static inline bool is_nearbranch(int offset)
-{
- return (offset < 32768) && (offset >= -32768);
-}
-
/*
* The fly in the ointment of code size changing from pass to pass is
* avoided by padding the short branch case with a NOP. If code size differs
@@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
* state.
*/
#define PPC_BCC(cond, dest) do { \
- if (is_nearbranch((dest) - (ctx->idx * 4))) { \
+ if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
PPC_BCC_SHORT(cond, dest); \
EMIT(PPC_RAW_NOP()); \
} else { \
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 7b713edfa7e2..b63b35e45e55 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -16,18 +16,18 @@
* with our redzone usage.
*
* [ prev sp ] <-------------
- * [ nv gpr save area ] 6*8 |
+ * [ nv gpr save area ] 5*8 |
* [ tail_call_cnt ] 8 |
- * [ local_tmp_var ] 8 |
+ * [ local_tmp_var ] 16 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
* [ frame header ] 32/112 |
* sp (r1) ---> [ stack pointer ] --------------
*/
/* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE (6*8)
+#define BPF_PPC_STACK_SAVE (5*8)
/* for bpf JIT code internal usage */
-#define BPF_PPC_STACK_LOCALS 16
+#define BPF_PPC_STACK_LOCALS 24
/* stack frame excluding BPF stack, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 53aefee3fe70..fcbf7a917c56 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -210,7 +210,11 @@ skip_init_ctx:
/* Now build the prologue, body code & epilogue for real. */
cgctx.idx = 0;
bpf_jit_build_prologue(code_base, &cgctx);
- bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
+ if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
+ bpf_jit_binary_free(bpf_hdr);
+ fp = org_fp;
+ goto out_addrs;
+ }
bpf_jit_build_epilogue(code_base, &cgctx);
if (bpf_jit_enable > 1)
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index beb12cbc8c29..0da31d41d413 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
}
}
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{
/*
* By now, the eBPF program has already setup parameters in r3-r6
@@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
bpf_jit_emit_common_epilogue(image, ctx);
EMIT(PPC_RAW_BCTR());
+
/* out: */
+ return 0;
}
/* Assemble the body code between the prologue & epilogue */
@@ -355,7 +357,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
}
- if (imm >= 0)
+ if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
else
EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
@@ -623,7 +625,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_LI(dst_reg_h, 0));
break;
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
- EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
bpf_set_seen_register(ctx, tmp_reg);
@@ -1073,7 +1075,7 @@ cond_branch:
break;
case BPF_JMP32 | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
- if (imm >= -32768 && imm < 32768) {
+ if (imm >= 0 && imm < 32768) {
/* PPC_ANDI is _only/always_ dot-form */
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
@@ -1090,7 +1092,9 @@ cond_branch:
*/
case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
- bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ if (ret < 0)
+ return ret;
break;
default:
@@ -1103,7 +1107,7 @@ cond_branch:
return -EOPNOTSUPP;
}
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
- !insn_is_zext(&insn[i + 1]))
+ !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
EMIT(PPC_RAW_LI(dst_reg_h, 0));
}
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index b87a63dba9c8..8b5157ccfeba 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
#include <linux/bpf.h>
+#include <asm/security_features.h>
#include "bpf_jit64.h"
@@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* [ prev sp ] <-------------
* [ ... ] |
* sp (r1) ---> [ stack pointer ] --------------
- * [ nv gpr save area ] 6*8
+ * [ nv gpr save area ] 5*8
* [ tail_call_cnt ] 8
- * [ local_tmp_var ] 8
+ * [ local_tmp_var ] 16
* [ unused red zone ] 208 bytes protected
*/
static int bpf_jit_stack_local(struct codegen_context *ctx)
@@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
if (bpf_has_stack_frame(ctx))
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
else
- return -(BPF_PPC_STACK_SAVE + 16);
+ return -(BPF_PPC_STACK_SAVE + 24);
}
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
{
- return bpf_jit_stack_local(ctx) + 8;
+ return bpf_jit_stack_local(ctx) + 16;
}
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
@@ -206,7 +207,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
EMIT(PPC_RAW_BCTRL());
}
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{
/*
* By now, the eBPF program has already setup parameters in r3, r4 and r5
@@ -267,13 +268,38 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
bpf_jit_emit_common_epilogue(image, ctx);
EMIT(PPC_RAW_BCTR());
+
/* out: */
+ return 0;
}
+/*
+ * We spill into the redzone always, even if the bpf program has its own stackframe.
+ * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
+ */
+void bpf_stf_barrier(void);
+
+asm (
+" .global bpf_stf_barrier ;"
+" bpf_stf_barrier: ;"
+" std 21,-64(1) ;"
+" std 22,-56(1) ;"
+" sync ;"
+" ld 21,-64(1) ;"
+" ld 22,-56(1) ;"
+" ori 31,31,0 ;"
+" .rept 14 ;"
+" b 1f ;"
+" 1: ;"
+" .endr ;"
+" blr ;"
+);
+
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
u32 *addrs, bool extra_pass)
{
+ enum stf_barrier_type stf_barrier = stf_barrier_type_get();
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i, ret;
@@ -328,18 +354,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
- case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+ if (!imm) {
+ goto bpf_alu32_trunc;
+ } else if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+ } else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
- if (BPF_OP(code) == BPF_SUB)
- imm = -imm;
- if (imm) {
- if (imm >= -32768 && imm < 32768)
- EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
- else {
- PPC_LI32(b2p[TMP_REG_1], imm);
- EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
- }
+ if (!imm) {
+ goto bpf_alu32_trunc;
+ } else if (imm > -32768 && imm <= 32768) {
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
+ } else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
@@ -389,8 +422,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (imm == 0)
return -EINVAL;
- else if (imm == 1)
- goto bpf_alu32_trunc;
+ if (imm == 1) {
+ if (BPF_OP(code) == BPF_DIV) {
+ goto bpf_alu32_trunc;
+ } else {
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ break;
+ }
+ }
PPC_LI32(b2p[TMP_REG_1], imm);
switch (BPF_CLASS(code)) {
@@ -631,6 +670,29 @@ emit_clear:
* BPF_ST NOSPEC (speculation barrier)
*/
case BPF_ST | BPF_NOSPEC:
+ if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
+ !security_ftr_enabled(SEC_FTR_STF_BARRIER))
+ break;
+
+ switch (stf_barrier) {
+ case STF_BARRIER_EIEIO:
+ EMIT(PPC_RAW_EIEIO() | 0x02000000);
+ break;
+ case STF_BARRIER_SYNC_ORI:
+ EMIT(PPC_RAW_SYNC());
+ EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
+ EMIT(PPC_RAW_ORI(_R31, _R31, 0));
+ break;
+ case STF_BARRIER_FALLBACK:
+ EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
+ PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+ EMIT(PPC_RAW_MTCTR(12));
+ EMIT(PPC_RAW_BCTRL());
+ EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+ break;
+ case STF_BARRIER_NONE:
+ break;
+ }
break;
/*
@@ -993,7 +1055,9 @@ cond_branch:
*/
case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
- bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ if (ret < 0)
+ return ret;
break;
default:
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index bc15200852b7..09fafcf2d3a0 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void)
if (is_kdump_kernel() || reset_devices) {
pr_info("Issue PHB reset ...\n");
list_for_each_entry(phb, &hose_list, list_node) {
+ // Skip if the slot is empty
+ if (list_empty(&PCI_DN(phb->dn)->child_list))
+ continue;
+
pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
config_addr = pseries_eeh_get_pe_config_addr(pdn);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 1b305e411862..8627362f613e 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -507,12 +507,27 @@ static void pseries_msi_unmask(struct irq_data *d)
irq_chip_unmask_parent(d);
}
+static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct msi_desc *entry = irq_data_get_msi_desc(data);
+
+ /*
+ * Do not update the MSIx vector table. It's not strictly necessary
+ * because the table is initialized by the underlying hypervisor, PowerVM
+ * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
+ * activation will fail. This can happen in some drivers (eg. IPR) which
+ * deactivate an IRQ used for testing MSI support.
+ */
+ entry->msg = *msg;
+}
+
static struct irq_chip pseries_pci_msi_irq_chip = {
.name = "pSeries-PCI-MSI",
.irq_shutdown = pseries_msi_shutdown,
.irq_mask = pseries_msi_mask,
.irq_unmask = pseries_msi_unmask,
.irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pseries_msi_write_msg,
};
static struct msi_domain_info pseries_msi_domain_info = {
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 301a54233c7e..6a6fa9e976d5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -561,5 +561,3 @@ menu "Power management options"
source "kernel/power/Kconfig"
endmenu
-
-source "drivers/firmware/Kconfig"
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index b933b1583c9f..34fbb3ea21d5 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -82,4 +82,5 @@ static inline int syscall_get_arch(struct task_struct *task)
#endif
}
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 893e47195e30..208e31bc5d1c 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -16,18 +16,24 @@
#ifdef CONFIG_MMU
#include <linux/types.h>
-#include <generated/vdso-offsets.h>
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefor don't have one.
+ */
+#ifdef CONFIG_MMU
+
+#define __VVAR_PAGES 1
-#ifndef CONFIG_GENERIC_TIME_VSYSCALL
-struct vdso_data {
-};
-#endif
+#ifndef __ASSEMBLY__
+#include <generated/vdso-offsets.h>
#define VDSO_SYMBOL(base, name) \
(void __user *)((unsigned long)(base) + __vdso_##name##_offset)
#endif /* CONFIG_MMU */
-asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
index 4b989ae15d59..8062996c2dfd 100644
--- a/arch/riscv/include/uapi/asm/unistd.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -18,9 +18,10 @@
#ifdef __LP64__
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_CLONE3
#endif /* __LP64__ */
+#define __ARCH_WANT_SYS_CLONE3
+
#include <asm-generic/unistd.h>
/*
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index a63c667c27b3..44b1420a2270 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/syscalls.h>
#include <asm-generic/syscalls.h>
-#include <asm/vdso.h>
#include <asm/syscall.h>
#undef __SYSCALL
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 25a3b8849599..b70956d80408 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -12,14 +12,24 @@
#include <linux/binfmts.h>
#include <linux/err.h>
#include <asm/page.h>
+#include <asm/vdso.h>
+
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
#include <vdso/datapage.h>
#else
-#include <asm/vdso.h>
+struct vdso_data {
+};
#endif
extern char vdso_start[], vdso_end[];
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_NR_PAGES,
+};
+
+#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
+
static unsigned int vdso_pages __ro_after_init;
static struct page **vdso_pagelist __ro_after_init;
@@ -38,7 +48,7 @@ static int __init vdso_init(void)
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
vdso_pagelist =
- kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
if (unlikely(vdso_pagelist == NULL)) {
pr_err("vdso: pagelist allocation failed\n");
return -ENOMEM;
@@ -63,38 +73,41 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
unsigned long vdso_base, vdso_len;
int ret;
- vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+ BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+
+ vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
- mmap_write_lock(mm);
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
goto end;
}
- /*
- * Put vDSO base into mm struct. We need to do this before calling
- * install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO (since arch_vma_name fails).
- */
- mm->context.vdso = (void *)vdso_base;
+ mm->context.vdso = NULL;
+ ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+ if (unlikely(ret))
+ goto end;
ret =
- install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ install_special_mapping(mm, vdso_base + VVAR_SIZE,
+ vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
vdso_pagelist);
- if (unlikely(ret)) {
- mm->context.vdso = NULL;
+ if (unlikely(ret))
goto end;
- }
- vdso_base += (vdso_pages << PAGE_SHIFT);
- ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
- (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+ /*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ */
+ mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
- if (unlikely(ret))
- mm->context.vdso = NULL;
end:
mmap_write_unlock(mm);
return ret;
@@ -105,7 +118,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]";
if (vma->vm_mm && (vma->vm_start ==
- (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ (long)vma->vm_mm->context.vdso - VVAR_SIZE))
return "[vdso_data]";
return NULL;
}
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index e6f558bca71b..e9111f700af0 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -3,12 +3,13 @@
* Copyright (C) 2012 Regents of the University of California
*/
#include <asm/page.h>
+#include <asm/vdso.h>
OUTPUT_ARCH(riscv)
SECTIONS
{
- PROVIDE(_vdso_data = . + PAGE_SIZE);
+ PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 094118663285..89f81067e09e 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -16,6 +16,8 @@ static void ipi_remote_fence_i(void *info)
void flush_icache_all(void)
{
+ local_flush_icache_all();
+
if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_remote_fence_i(NULL);
else
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index e4803ec51110..6b3c366af78e 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -207,6 +207,8 @@ int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
int zpci_deconfigure_device(struct zpci_dev *zdev);
+void zpci_device_reserved(struct zpci_dev *zdev);
+bool zpci_is_device_configured(struct zpci_dev *zdev);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 840d8594437d..1a374d021e25 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1826,7 +1826,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL) {
fp = orig_fp;
- goto out;
+ goto free_addrs;
}
/*
* Three initial passes:
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e7e6788d75a8..b833155ce838 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
spin_unlock(&zpci_list_lock);
list_for_each_entry_safe(zdev, tmp, &remove, entry)
- zpci_zdev_put(zdev);
+ zpci_device_reserved(zdev);
}
int pci_domain_nr(struct pci_bus *bus)
@@ -751,6 +751,14 @@ error:
return ERR_PTR(rc);
}
+bool zpci_is_device_configured(struct zpci_dev *zdev)
+{
+ enum zpci_state state = zdev->state;
+
+ return state != ZPCI_FN_STATE_RESERVED &&
+ state != ZPCI_FN_STATE_STANDBY;
+}
+
/**
* zpci_scan_configured_device() - Scan a freshly configured zpci_dev
* @zdev: The zpci_dev to be configured
@@ -822,6 +830,31 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
return 0;
}
+/**
+ * zpci_device_reserved() - Mark device as resverved
+ * @zdev: the zpci_dev that was reserved
+ *
+ * Handle the case that a given zPCI function was reserved by another system.
+ * After a call to this function the zpci_dev can not be found via
+ * get_zdev_by_fid() anymore but may still be accessible via existing
+ * references though it will not be functional anymore.
+ */
+void zpci_device_reserved(struct zpci_dev *zdev)
+{
+ if (zdev->has_hp_slot)
+ zpci_exit_slot(zdev);
+ /*
+ * Remove device from zpci_list as it is going away. This also
+ * makes sure we ignore subsequent zPCI events for this device.
+ */
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
+ zdev->state = ZPCI_FN_STATE_RESERVED;
+ zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+ zpci_zdev_put(zdev);
+}
+
void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
@@ -843,6 +876,12 @@ void zpci_release_device(struct kref *kref)
case ZPCI_FN_STATE_STANDBY:
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
+ zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+ fallthrough;
+ case ZPCI_FN_STATE_RESERVED:
if (zdev->has_resources)
zpci_cleanup_bus_resources(zdev);
zpci_bus_device_unregister(zdev);
@@ -851,10 +890,6 @@ void zpci_release_device(struct kref *kref)
default:
break;
}
-
- spin_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- spin_unlock(&zpci_list_lock);
zpci_dbg(3, "rem fid:%x\n", zdev->fid);
kfree(zdev);
}
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index c856f80cb21b..5b8d647523f9 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -140,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
/* The 0x0304 event may immediately reserve the device */
if (!clp_get_state(zdev->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
- zpci_zdev_put(zdev);
+ zpci_device_reserved(zdev);
}
}
break;
@@ -151,7 +151,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
case 0x0308: /* Standby -> Reserved */
if (!zdev)
break;
- zpci_zdev_put(zdev);
+ zpci_device_reserved(zdev);
break;
default:
break;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ab83c22d274e..bd70e8a39fbf 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1405,7 +1405,7 @@ config HIGHMEM4G
config HIGHMEM64G
bool "64GB"
- depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+ depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
select X86_PAE
help
Select this if you have a 32-bit processor and more than 4
@@ -2832,8 +2832,6 @@ config HAVE_ATOMIC_IOMAP
def_bool y
depends on X86_32
-source "drivers/firmware/Kconfig"
-
source "arch/x86/kvm/Kconfig"
source "arch/x86/Kconfig.assembler"
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 14ebd2196569..43184640b579 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -25,7 +25,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
* For !SMAP hardware we patch out CLAC on entry.
*/
if (boot_cpu_has(X86_FEATURE_SMAP) ||
- (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+ (IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
mask |= X86_EFLAGS_AC;
WARN_ON_ONCE(flags & mask);
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 3506d8c598c1..4557f7cb0fa6 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -14,16 +14,19 @@ static inline int pci_xen_hvm_init(void)
return -1;
}
#endif
-#if defined(CONFIG_XEN_DOM0)
+#ifdef CONFIG_XEN_PV_DOM0
int __init pci_xen_initial_domain(void);
-int xen_find_device_domain_owner(struct pci_dev *dev);
-int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
-int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
static inline int __init pci_xen_initial_domain(void)
{
return -1;
}
+#endif
+#ifdef CONFIG_XEN_DOM0
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
return -1;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0f8885949e8c..b3410f1ac217 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -326,6 +326,7 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_SMAP
cr4_set_bits(X86_CR4_SMAP);
#else
+ clear_cpu_cap(c, X86_FEATURE_SMAP);
cr4_clear_bits(X86_CR4_SMAP);
#endif
}
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 4b8813bafffd..bb1c3f5f60c8 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -527,12 +527,14 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
rdt_domain_reconfigure_cdp(r);
if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
- kfree(d);
+ kfree(hw_dom);
return;
}
if (r->mon_capable && domain_setup_mon_state(r, d)) {
- kfree(d);
+ kfree(hw_dom->ctrl_val);
+ kfree(hw_dom->mbps_val);
+ kfree(hw_dom);
return;
}
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 38837dad46e6..391a4e2b8604 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -714,12 +714,6 @@ static struct chipset early_qrk[] __initdata = {
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
- { PCI_VENDOR_ID_INTEL, 0x3e20,
- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
- { PCI_VENDOR_ID_INTEL, 0x3ec4,
- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
- { PCI_VENDOR_ID_INTEL, 0x8a12,
- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 445c57c9c539..fa17a27390ab 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -379,9 +379,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
sizeof(fpu->state.fxsave)))
return -EFAULT;
- /* Reject invalid MXCSR values. */
- if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
- return -EINVAL;
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ /* Reject invalid MXCSR values. */
+ if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
+ return -EINVAL;
+ } else {
+ /* Mask invalid bits out for historical reasons (broken hardware). */
+ fpu->state.fxsave.mxcsr &= ~mxcsr_feature_mask;
+ }
/* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
if (use_xsave())
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 42fc41dd0e1f..882213df3713 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -10,6 +10,7 @@
#include <asm/irq_remapping.h>
#include <asm/hpet.h>
#include <asm/time.h>
+#include <asm/mwait.h>
#undef pr_fmt
#define pr_fmt(fmt) "hpet: " fmt
@@ -916,6 +917,83 @@ static bool __init hpet_counting(void)
return false;
}
+static bool __init mwait_pc10_supported(void)
+{
+ unsigned int eax, ebx, ecx, mwait_substates;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return false;
+
+ if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
+ return false;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return false;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+ return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
+ (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
+ (mwait_substates & (0xF << 28));
+}
+
+/*
+ * Check whether the system supports PC10. If so force disable HPET as that
+ * stops counting in PC10. This check is overbroad as it does not take any
+ * of the following into account:
+ *
+ * - ACPI tables
+ * - Enablement of intel_idle
+ * - Command line arguments which limit intel_idle C-state support
+ *
+ * That's perfectly fine. HPET is a piece of hardware designed by committee
+ * and the only reasons why it is still in use on modern systems is the
+ * fact that it is impossible to reliably query TSC and CPU frequency via
+ * CPUID or firmware.
+ *
+ * If HPET is functional it is useful for calibrating TSC, but this can be
+ * done via PMTIMER as well which seems to be the last remaining timer on
+ * X86/INTEL platforms that has not been completely wreckaged by feature
+ * creep.
+ *
+ * In theory HPET support should be removed altogether, but there are older
+ * systems out there which depend on it because TSC and APIC timer are
+ * dysfunctional in deeper C-states.
+ *
+ * It's only 20 years now that hardware people have been asked to provide
+ * reliable and discoverable facilities which can be used for timekeeping
+ * and per CPU timer interrupts.
+ *
+ * The probability that this problem is going to be solved in the
+ * forseeable future is close to zero, so the kernel has to be cluttered
+ * with heuristics to keep up with the ever growing amount of hardware and
+ * firmware trainwrecks. Hopefully some day hardware people will understand
+ * that the approach of "This can be fixed in software" is not sustainable.
+ * Hope dies last...
+ */
+static bool __init hpet_is_pc10_damaged(void)
+{
+ unsigned long long pcfg;
+
+ /* Check whether PC10 substates are supported */
+ if (!mwait_pc10_supported())
+ return false;
+
+ /* Check whether PC10 is enabled in PKG C-state limit */
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
+ if ((pcfg & 0xF) < 8)
+ return false;
+
+ if (hpet_force_user) {
+ pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
+ return false;
+ }
+
+ pr_info("HPET dysfunctional in PC10. Force disabled.\n");
+ boot_hpet_disable = true;
+ return true;
+}
+
/**
* hpet_enable - Try to setup the HPET timer. Returns 1 on success.
*/
@@ -929,6 +1007,9 @@ int __init hpet_enable(void)
if (!is_hpet_capable())
return 0;
+ if (hpet_is_pc10_damaged())
+ return 0;
+
hpet_set_mapping();
if (!hpet_virt_address)
return 0;
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 9f90f460a28c..bf1033a62e48 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -130,6 +130,8 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
} else {
ret = ES_VMM_ERROR;
}
+ } else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
+ ret = ES_VMM_ERROR;
} else {
ret = ES_OK;
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 3d41a09c2c14..5debe4ac6f81 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -113,7 +113,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
false /* no mapping of GSI to PIRQ */);
}
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
static int xen_register_gsi(u32 gsi, int triggering, int polarity)
{
int rc, irq;
@@ -261,7 +261,7 @@ error:
return irq;
}
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
static bool __read_mostly pci_seg_supported = true;
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -375,10 +375,10 @@ static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
}
}
-#else /* CONFIG_XEN_DOM0 */
+#else /* CONFIG_XEN_PV_DOM0 */
#define xen_initdom_setup_msi_irqs NULL
#define xen_initdom_restore_msi_irqs NULL
-#endif /* !CONFIG_XEN_DOM0 */
+#endif /* !CONFIG_XEN_PV_DOM0 */
static void xen_teardown_msi_irqs(struct pci_dev *dev)
{
@@ -555,7 +555,7 @@ int __init pci_xen_hvm_init(void)
return 0;
}
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
int __init pci_xen_initial_domain(void)
{
int irq;
@@ -583,6 +583,9 @@ int __init pci_xen_initial_domain(void)
}
return 0;
}
+#endif
+
+#ifdef CONFIG_XEN_DOM0
struct xen_device_domain_owner {
domid_t domain;
@@ -656,4 +659,4 @@ int xen_unregister_device_domain_owner(struct pci_dev *dev)
return 0;
}
EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
-#endif
+#endif /* CONFIG_XEN_DOM0 */
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index ee2beda590d0..1d4a00e767ec 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -274,7 +274,7 @@ static struct olpc_ec_driver ec_xo1_driver = {
static struct olpc_ec_driver ec_xo1_5_driver = {
.ec_cmd = olpc_xo1_ec_cmd,
-#ifdef CONFIG_OLPC_XO1_5_SCI
+#ifdef CONFIG_OLPC_XO15_SCI
/*
* XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
* compiled in
diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
index 9ac7457f52a3..ed0442e35434 100644
--- a/arch/x86/platform/pvh/enlighten.c
+++ b/arch/x86/platform/pvh/enlighten.c
@@ -16,15 +16,15 @@
/*
* PVH variables.
*
- * pvh_bootparams and pvh_start_info need to live in the data segment since
+ * pvh_bootparams and pvh_start_info need to live in a data segment since
* they are used after startup_{32|64}, which clear .bss, are invoked.
*/
-struct boot_params pvh_bootparams __section(".data");
-struct hvm_start_info pvh_start_info __section(".data");
+struct boot_params __initdata pvh_bootparams;
+struct hvm_start_info __initdata pvh_start_info;
-unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
+const unsigned int __initconst pvh_start_info_sz = sizeof(pvh_start_info);
-static u64 pvh_get_root_pointer(void)
+static u64 __init pvh_get_root_pointer(void)
{
return pvh_start_info.rsdp_paddr;
}
@@ -107,7 +107,7 @@ void __init __weak xen_pvh_init(struct boot_params *boot_params)
BUG();
}
-static void hypervisor_specific_init(bool xen_guest)
+static void __init hypervisor_specific_init(bool xen_guest)
{
if (xen_guest)
xen_pvh_init(&pvh_bootparams);
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index afc1da68b06d..6bcd3d8ca6ac 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -43,13 +43,9 @@ config XEN_PV_SMP
def_bool y
depends on XEN_PV && SMP
-config XEN_DOM0
- bool "Xen PV Dom0 support"
- default y
- depends on XEN_PV && PCI_XEN && SWIOTLB_XEN
- depends on X86_IO_APIC && ACPI && PCI
- help
- Support running as a Xen PV Dom0 guest.
+config XEN_PV_DOM0
+ def_bool y
+ depends on XEN_PV && XEN_DOM0
config XEN_PVHVM
def_bool y
@@ -86,3 +82,12 @@ config XEN_PVH
def_bool n
help
Support for running as a Xen PVH guest.
+
+config XEN_DOM0
+ bool "Xen Dom0 support"
+ default XEN_PV
+ depends on (XEN_PV && SWIOTLB_XEN) || (XEN_PVH && X86_64)
+ depends on X86_IO_APIC && ACPI && PCI
+ select X86_X2APIC if XEN_PVH && X86_64
+ help
+ Support running as a Xen Dom0 guest.
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 40b5779fce21..4953260e281c 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -45,7 +45,7 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_XEN_DOM0) += vga.o
+obj-$(CONFIG_XEN_PV_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c79bd0af2e8c..95d970359e17 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -3,6 +3,7 @@
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
#include <linux/memblock.h>
#endif
+#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/slab.h>
@@ -10,12 +11,15 @@
#include <xen/xen.h>
#include <xen/features.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/version.h>
#include <xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <asm/cpu.h>
#include <asm/e820/api.h>
+#include <asm/setup.h>
#include "xen-ops.h"
#include "smp.h"
@@ -52,9 +56,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
-enum xen_domain_type xen_domain_type = XEN_NATIVE;
-EXPORT_SYMBOL_GPL(xen_domain_type);
-
unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
EXPORT_SYMBOL(machine_to_phys_mapping);
unsigned long machine_to_phys_nr;
@@ -69,10 +70,12 @@ __read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/*
- * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
- * before clearing the bss.
+ * NB: These need to live in .data or alike because they're used by
+ * xen_prepare_pvh() which runs before clearing the bss.
*/
-uint32_t xen_start_flags __section(".data") = 0;
+enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
+EXPORT_SYMBOL_GPL(xen_domain_type);
+uint32_t __ro_after_init xen_start_flags;
EXPORT_SYMBOL(xen_start_flags);
/*
@@ -258,6 +261,45 @@ int xen_vcpu_setup(int cpu)
return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
}
+void __init xen_banner(void)
+{
+ unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ struct xen_extraversion extra;
+
+ HYPERVISOR_xen_version(XENVER_extraversion, &extra);
+
+ pr_info("Booting kernel on %s\n", pv_info.name);
+ pr_info("Xen version: %u.%u%s%s\n",
+ version >> 16, version & 0xffff, extra.extraversion,
+ xen_feature(XENFEAT_mmu_pt_update_preserve_ad)
+ ? " (preserve-AD)" : "");
+}
+
+/* Check if running on Xen version (major, minor) or later */
+bool xen_running_on_version_or_later(unsigned int major, unsigned int minor)
+{
+ unsigned int version;
+
+ if (!xen_domain())
+ return false;
+
+ version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
+ ((version >> 16) > major))
+ return true;
+ return false;
+}
+
+void __init xen_add_preferred_consoles(void)
+{
+ add_preferred_console("xenboot", 0, NULL);
+ if (!boot_params.screen_info.orig_video_isVGA)
+ add_preferred_console("tty", 0, NULL);
+ add_preferred_console("hvc", 0, NULL);
+ if (boot_params.screen_info.orig_video_isVGA)
+ add_preferred_console("tty", 0, NULL);
+}
+
void xen_reboot(int reason)
{
struct sched_shutdown r = { .reason = reason };
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 6e0d0754f94f..a7b7d674f500 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -28,7 +28,6 @@
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/highmem.h>
-#include <linux/console.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/edd.h>
@@ -109,17 +108,6 @@ struct tls_descs {
*/
static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
-static void __init xen_banner(void)
-{
- unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
- struct xen_extraversion extra;
- HYPERVISOR_xen_version(XENVER_extraversion, &extra);
-
- pr_info("Booting paravirtualized kernel on %s\n", pv_info.name);
- pr_info("Xen version: %d.%d%s (preserve-AD)\n",
- version >> 16, version & 0xffff, extra.extraversion);
-}
-
static void __init xen_pv_init_platform(void)
{
populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
@@ -142,22 +130,6 @@ static void __init xen_pv_guest_late_init(void)
#endif
}
-/* Check if running on Xen version (major, minor) or later */
-bool
-xen_running_on_version_or_later(unsigned int major, unsigned int minor)
-{
- unsigned int version;
-
- if (!xen_domain())
- return false;
-
- version = HYPERVISOR_xen_version(XENVER_version, NULL);
- if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
- ((version >> 16) > major))
- return true;
- return false;
-}
-
static __read_mostly unsigned int cpuid_leaf5_ecx_val;
static __read_mostly unsigned int cpuid_leaf5_edx_val;
@@ -1364,7 +1336,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
if (!xen_initial_domain()) {
- add_preferred_console("xenboot", 0, NULL);
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
x86_platform.set_legacy_features =
@@ -1409,11 +1380,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
#endif
}
- if (!boot_params.screen_info.orig_video_isVGA)
- add_preferred_console("tty", 0, NULL);
- add_preferred_console("hvc", 0, NULL);
- if (boot_params.screen_info.orig_video_isVGA)
- add_preferred_console("tty", 0, NULL);
+ xen_add_preferred_consoles();
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 0d5e34b9e6f9..bcae606bbc5c 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
+#include <linux/export.h>
#include <xen/hvc-console.h>
@@ -18,10 +19,11 @@
/*
* PVH variables.
*
- * The variable xen_pvh needs to live in the data segment since it is used
+ * The variable xen_pvh needs to live in a data segment since it is used
* after startup_{32|64} is invoked, which will clear the .bss segment.
*/
-bool xen_pvh __section(".data") = 0;
+bool __ro_after_init xen_pvh;
+EXPORT_SYMBOL_GPL(xen_pvh);
void __init xen_pvh_init(struct boot_params *boot_params)
{
@@ -36,6 +38,10 @@ void __init xen_pvh_init(struct boot_params *boot_params)
pfn = __pa(hypercall_page);
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+ if (xen_initial_domain())
+ x86_init.oem.arch_setup = xen_add_preferred_consoles;
+ x86_init.oem.banner = xen_banner;
+
xen_efi_init(boot_params);
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 8d751939c6f3..3359c23573c5 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2398,7 +2398,7 @@ static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages)
+ unsigned int domid, bool no_translate)
{
int err = 0;
struct remap_data rmd;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 8d7ec49a35fb..8bc8b72a205d 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -51,6 +51,7 @@ void __init xen_remap_memory(void);
phys_addr_t __init xen_find_free_area(phys_addr_t size);
char * __init xen_memory_setup(void);
void __init xen_arch_setup(void);
+void xen_banner(void);
void xen_enable_sysenter(void);
void xen_enable_syscall(void);
void xen_vcpu_restore(void);
@@ -109,7 +110,7 @@ static inline void xen_uninit_lock_cpu(int cpu)
struct dom0_vga_console_info;
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
@@ -118,6 +119,8 @@ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
}
#endif
+void xen_add_preferred_consoles(void);
+
void __init xen_init_apic(void);
#ifdef CONFIG_XEN_EFI
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 7cbf68ca7106..6fc05cba61a2 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -78,7 +78,7 @@
#endif
#define XCHAL_KIO_SIZE 0x10000000
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#ifndef __ASSEMBLY__
extern unsigned long xtensa_kio_paddr;
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 764b54bef701..15051a8a1539 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -143,7 +143,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
void __init init_IRQ(void)
{
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
irqchip_init();
#else
#ifdef CONFIG_HAVE_SMP
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index ed184106e4cf..ee9082a142fe 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -63,7 +63,7 @@ extern unsigned long initrd_end;
extern int initrd_below_start_ok;
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
void *dtb_start = __dtb_start;
#endif
@@ -125,7 +125,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
#endif /* CONFIG_BLK_DEV_INITRD */
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
static int __init parse_tag_fdt(const bp_tag_t *tag)
{
@@ -135,7 +135,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
__tagtable(BP_TAG_FDT, parse_tag_fdt);
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
@@ -183,7 +183,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
}
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -232,7 +232,7 @@ void __init early_init_devtree(void *params)
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
}
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
/*
* Initialize architecture. (Early stage)
@@ -253,7 +253,7 @@ void __init init_arch(bp_tag_t *bp_start)
if (bp_start)
parse_bootparam(bp_start);
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
early_init_devtree(dtb_start);
#endif
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 7e4d97dc8bd8..38acda4f04e8 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -101,7 +101,7 @@ void init_mmu(void)
void init_kio(void)
{
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
*/
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 4f7d6142d41f..538e6748e85a 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -51,8 +51,12 @@ void platform_power_off(void)
void platform_restart(void)
{
- /* Flush and reset the mmu, simulate a processor reset, and
- * jump to the reset vector. */
+ /* Try software reset first. */
+ WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
+
+ /* If software reset did not work, flush and reset the mmu,
+ * simulate a processor reset, and jump to the reset vector.
+ */
cpu_reset();
/* control never gets here */
}
@@ -66,7 +70,7 @@ void __init platform_calibrate_ccount(void)
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
static void __init xtfpga_clk_setup(struct device_node *np)
{
@@ -284,4 +288,4 @@ static int __init xtavnet_init(void)
*/
arch_initcall(xtavnet_init);
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
diff --git a/block/bdev.c b/block/bdev.c
index cf2780cb44a7..485a258b0ab3 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -490,7 +490,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
bdev = I_BDEV(inode);
mutex_init(&bdev->bd_fsfreeze_mutex);
spin_lock_init(&bdev->bd_size_lock);
- bdev->bd_disk = disk;
bdev->bd_partno = partno;
bdev->bd_inode = inode;
bdev->bd_stats = alloc_percpu(struct disk_stats);
@@ -498,6 +497,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
iput(inode);
return NULL;
}
+ bdev->bd_disk = disk;
return bdev;
}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 4b66d2776eda..3b38d15723de 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
+ QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(NOWAIT),
};
#undef QUEUE_FLAG_NAME
diff --git a/block/genhd.c b/block/genhd.c
index 7b6e5e1cf956..496e8458c357 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1268,6 +1268,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
out_destroy_part_tbl:
xa_destroy(&disk->part_tbl);
+ disk->part0->bd_disk = NULL;
iput(disk->part0->bd_inode);
out_free_bdi:
bdi_put(disk->bdi);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 30d2db37cc87..0d399ddaa185 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -17,6 +17,8 @@ source "drivers/bus/Kconfig"
source "drivers/connector/Kconfig"
+source "drivers/firmware/Kconfig"
+
source "drivers/gnss/Kconfig"
source "drivers/mtd/Kconfig"
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 0a0a982f9c28..c0e77c1c8e09 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -36,7 +36,7 @@ struct acpi_gtdt_descriptor {
static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
-static inline void *next_platform_timer(void *platform_timer)
+static inline __init void *next_platform_timer(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 64b2f3d744d5..7f76fee6f989 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
-CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
+CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 0bebd5a62a9f..cda7d7162cbb 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -203,10 +203,7 @@ config INTEL_STRATIX10_RSU
Say Y here if you want Intel RSU support.
config QCOM_SCM
- tristate "Qcom SCM driver"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on HAVE_ARM_SMCCC
- select RESET_CONTROLLER
+ tristate
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d356e329e6f8..269437b01328 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1087,6 +1087,7 @@ struct amdgpu_device {
bool no_hw_access;
struct pci_saved_state *pci_state;
+ pci_channel_state_t pci_channel_state;
struct amdgpu_reset_control *reset_cntl;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 2d6b2d77b738..054c1a224def 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -563,6 +563,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
+ kfree(ttm->sg);
ttm->sg = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ab3794c42d36..af9bdf16eefd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2394,10 +2394,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
goto init_failed;
- r = amdgpu_amdkfd_resume_iommu(adev);
- if (r)
- goto init_failed;
-
r = amdgpu_device_ip_hw_init_phase1(adev);
if (r)
goto init_failed;
@@ -2436,6 +2432,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.pending_reset)
amdgpu_amdkfd_device_init(adev);
+ r = amdgpu_amdkfd_resume_iommu(adev);
+ if (r)
+ goto init_failed;
+
amdgpu_fru_get_product_info(adev);
init_failed:
@@ -5399,6 +5399,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
return PCI_ERS_RESULT_DISCONNECT;
}
+ adev->pci_channel_state = state;
+
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
@@ -5541,6 +5543,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
DRM_INFO("PCI error: resume callback!!\n");
+ /* Only continue execution for the case of pci_channel_io_frozen */
+ if (adev->pci_channel_state != pci_channel_io_frozen)
+ return;
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index e7f06bd0f0cd..1916ec84dd71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -31,6 +31,8 @@
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
+#define GFX_OFF_NO_DELAY 0
+
/*
* GPU GFX IP block helpers function.
*/
@@ -558,6 +560,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{
+ unsigned long delay = GFX_OFF_DELAY_ENABLE;
+
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return;
@@ -573,8 +577,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
adev->gfx.gfx_off_req_count--;
- if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+ if (adev->gfx.gfx_off_req_count == 0 &&
+ !adev->gfx.gfx_off_state) {
+ /* If going to s2idle, no need to wait */
+ if (adev->in_s0ix)
+ delay = GFX_OFF_NO_DELAY;
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ delay);
+ }
} else {
if (adev->gfx.gfx_off_req_count == 0) {
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index c2a4d920da40..4a416231b24c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1085,18 +1085,12 @@ static int kfd_resume(struct kfd_dev *kfd)
int err = 0;
err = kfd->dqm->ops.start(kfd->dqm);
- if (err) {
+ if (err)
dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
- goto dqm_start_error;
- }
return err;
-
-dqm_start_error:
- kfd_iommu_suspend(kfd);
- return err;
}
static inline void kfd_queue_work(struct workqueue_struct *wq,
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 7dffc04a557e..127667e549c1 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -25,6 +25,8 @@ config DRM_AMD_DC_HDCP
config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
+ depends on DRM_AMDGPU_SI
+ depends on DRM_AMD_DC
default n
help
Choose this option to enable new AMD DC support for SI asics
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 05eaec03d9f7..6d655e158267 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1306,12 +1306,6 @@ static void override_training_settings(
{
uint32_t lane;
- /* Override link settings */
- if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
- lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
- if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
- lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
-
/* Override link spread */
if (!link->dp_ss_off && overrides->downspread != NULL)
lt_settings->link_settings.link_spread = *overrides->downspread ?
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index d8b22618b79e..c337588231ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -118,6 +118,7 @@ struct dcn10_link_enc_registers {
uint32_t RDPCSTX_PHY_CNTL4;
uint32_t RDPCSTX_PHY_CNTL5;
uint32_t RDPCSTX_PHY_CNTL6;
+ uint32_t RDPCSPIPE_PHY_CNTL6;
uint32_t RDPCSTX_PHY_CNTL7;
uint32_t RDPCSTX_PHY_CNTL8;
uint32_t RDPCSTX_PHY_CNTL9;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 90127c1f9e35..b0892443fbd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -37,6 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
+#include "dal_asic_id.h"
#define CTX \
enc10->base.ctx
@@ -62,6 +63,10 @@
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
void dcn31_link_encoder_set_dio_phy_mux(
struct link_encoder *enc,
enum encoder_type_select sel,
@@ -215,8 +220,8 @@ static const struct link_encoder_funcs dcn31_link_enc_funcs = {
.fec_is_active = enc2_fec_is_active,
.get_dig_frontend = dcn10_get_dig_frontend,
.get_dig_mode = dcn10_get_dig_mode,
- .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
- .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
+ .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
};
@@ -404,3 +409,60 @@ void dcn31_link_encoder_disable_output(
}
}
+bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable;
+ bool is_usb_c_alt_mode = false;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ // [Note] no need to check hw_internal_rev once phy mux selection is ready
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ } else {
+ /*
+ * B0 phys use a new set of registers to check whether alt mode is disabled.
+ * if value == 1 alt mode is disabled, otherwise it is enabled.
+ */
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+ || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+ || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ } else {
+ // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ }
+ }
+
+ is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+ }
+
+ return is_usb_c_alt_mode;
+}
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t is_in_usb_c_dp4_mode = 0;
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+ /* in usb c dp2 mode, max lane count is 2 */
+ if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ // [Note] no need to check hw_internal_rev once phy mux selection is ready
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ } else {
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+ || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+ || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ } else {
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ }
+ }
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
index 32d146312838..3454f1e7c1f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
@@ -69,6 +69,7 @@
SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSPIPE_PHY_CNTL6, RDPCSPIPE, id), \
SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
@@ -115,7 +116,9 @@
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
- LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
@@ -243,4 +246,13 @@ void dcn31_link_encoder_disable_output(
struct link_encoder *enc,
enum signal_type signal);
+/*
+ * Check whether USB-C DP Alt mode is disabled
+ */
+bool dcn31_link_encoder_is_in_alt_mode(
+ struct link_encoder *enc);
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index a7702d3c75cd..0006bbac466c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -928,7 +928,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 7680,/*upto 8K*/
+ .max_downscale_src_width = 3840,/*upto 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
@@ -1284,6 +1284,12 @@ static struct stream_encoder *dcn31_stream_encoder_create(
if (!enc1 || !vpg || !afmt)
return NULL;
+ if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD))
+ eng_id = eng_id + 3; // For B0 only. C->F, D->G.
+ }
+
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 381c17caace1..5adc471bef57 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -227,7 +227,7 @@ enum {
#define FAMILY_YELLOW_CARP 146
#define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x02 // TODO: DCN31 - update with correct B0 ID
+#define YELLOW_CARP_B0 0x1A
#define YELLOW_CARP_UNKNOWN 0xFF
#ifndef ASICREV_IS_YELLOW_CARP
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
index 92caf8441d1e..01a56556cde1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
@@ -11932,5 +11932,32 @@
#define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_RX_OVRD_OUT_2 0xe0c7
#define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_TX_OVRD_IN_2 0xe0c8
+//RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+
+//RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+
+//[Note] Hack. RDPCSPIPE only has 2 instances.
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6 0x2d73
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6 0x2e4b
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6 0x2d73
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6 0x2e4b
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6 0x2d73
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 43ec7fcd3f5d..a3eae3f3eadc 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1577,8 +1577,14 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *intel_crtc;
+ enum pipe pipe;
+
+ if (!crtc_state)
+ return;
+
+ intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ pipe = intel_crtc->pipe;
/* wa verify 1409054076:icl,jsl,ehl */
if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 532237588511..4e0f96bf6158 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -1308,8 +1308,9 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
else
aud_freq = aud_freq_init;
- /* use BIOS provided value for TGL unless it is a known bad value */
- if (IS_TIGERLAKE(dev_priv) && aud_freq_init != AUD_FREQ_TGL_BROKEN)
+ /* use BIOS provided value for TGL and RKL unless it is a known bad value */
+ if ((IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) &&
+ aud_freq_init != AUD_FREQ_TGL_BROKEN)
aud_freq = aud_freq_init;
drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index e86e6ed2d3bf..fd71346aac7b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -451,13 +451,23 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
- if (bdb->version >= 191 &&
- get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
- const struct lfp_backlight_control_method *method;
+ if (bdb->version >= 191) {
+ size_t exp_size;
- method = &backlight_data->backlight_control[panel_type];
- i915->vbt.backlight.type = method->type;
- i915->vbt.backlight.controller = method->controller;
+ if (bdb->version >= 236)
+ exp_size = sizeof(struct bdb_lfp_backlight_data);
+ else if (bdb->version >= 234)
+ exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
+ else
+ exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
+
+ if (get_blocksize(backlight_data) >= exp_size) {
+ const struct lfp_backlight_control_method *method;
+
+ method = &backlight_data->backlight_control[panel_type];
+ i915->vbt.backlight.type = method->type;
+ i915->vbt.backlight.controller = method->controller;
+ }
}
i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9903a78df896..bd184325d0c7 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3807,7 +3807,13 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
static void intel_ddi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- if (intel_crtc_has_dp_encoder(crtc_state))
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_tc(i915, phy))
+ intel_tc_port_sanitize(enc_to_dig_port(encoder));
+
+ if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
intel_dp_sync_state(encoder, crtc_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 134a6acbd8fb..17f44ffea586 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -13082,18 +13082,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
readout_plane_state(dev_priv);
for_each_intel_encoder(dev, encoder) {
+ struct intel_crtc_state *crtc_state = NULL;
+
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- struct intel_crtc_state *crtc_state;
-
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
intel_encoder_get_config(encoder, crtc_state);
- if (encoder->sync_state)
- encoder->sync_state(encoder, crtc_state);
/* read out to slave crtc as well for bigjoiner */
if (crtc_state->bigjoiner) {
@@ -13108,6 +13106,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->base.crtc = NULL;
}
+ if (encoder->sync_state)
+ encoder->sync_state(encoder, crtc_state);
+
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
@@ -13390,17 +13391,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
-
- /* Sanitize the TypeC port mode upfront, encoders depend on this */
- for_each_intel_encoder(dev, encoder) {
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- /* We need to sanitize only the MST primary port. */
- if (encoder->type != INTEL_OUTPUT_DP_MST &&
- intel_phy_is_tc(dev_priv, phy))
- intel_tc_port_sanitize(enc_to_dig_port(encoder));
- }
-
get_encoder_power_domains(dev_priv);
if (HAS_PCH_IBX(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 330077c2e588..a2108a8f544d 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -814,6 +814,11 @@ struct lfp_brightness_level {
u16 reserved;
} __packed;
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
+ offsetof(struct bdb_lfp_backlight_data, brightness_level)
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
+ offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
+
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index e382b7f2353b..5ab136ffdeb2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -118,7 +118,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
- int err;
+ int err = 0;
/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
@@ -242,12 +242,15 @@ skip:
list_splice_tail(&still_in_list, phase->list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (err)
- return err;
+ break;
}
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ if (err)
+ return err;
+
if (nr_scanned)
*nr_scanned += scanned;
return count;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 664970f2bc62..4037030f0984 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8193,6 +8193,11 @@ enum {
#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
#define HSW_FBCQ_DIS (1 << 22)
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
+#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0)
+#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
+#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
+#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
+#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define _CHICKEN_TRANS_A 0x420c0
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 65bc3709f54c..a725792d5248 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -76,6 +76,8 @@ struct intel_wm_config {
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ enum pipe pipe;
+
if (HAS_LLC(dev_priv)) {
/*
* WaCompressedResourceDisplayNewHashMode:skl,kbl
@@ -89,6 +91,16 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
SKL_DE_COMPRESSED_HASH_MODE);
}
+ for_each_pipe(dev_priv, pipe) {
+ /*
+ * "Plane N strech max must be programmed to 11b (x1)
+ * when Async flips are enabled on that plane."
+ */
+ if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active())
+ intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+ SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
+ }
+
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 1c2f4799f421..12ce669650cc 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -172,10 +172,10 @@ static int kmb_setup_mode_config(struct drm_device *drm)
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
- drm->mode_config.min_width = KMB_MIN_WIDTH;
- drm->mode_config.min_height = KMB_MIN_HEIGHT;
- drm->mode_config.max_width = KMB_MAX_WIDTH;
- drm->mode_config.max_height = KMB_MAX_HEIGHT;
+ drm->mode_config.min_width = KMB_FB_MIN_WIDTH;
+ drm->mode_config.min_height = KMB_FB_MIN_HEIGHT;
+ drm->mode_config.max_width = KMB_FB_MAX_WIDTH;
+ drm->mode_config.max_height = KMB_FB_MAX_HEIGHT;
drm->mode_config.funcs = &kmb_mode_config_funcs;
ret = kmb_setup_crtc(drm);
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index ebbaa5f422d5..69a62e2d03ff 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -20,6 +20,11 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
+#define KMB_FB_MAX_WIDTH 1920
+#define KMB_FB_MAX_HEIGHT 1080
+#define KMB_FB_MIN_WIDTH 1
+#define KMB_FB_MIN_HEIGHT 1
+
#define KMB_LCD_DEFAULT_CLK 200000000
#define KMB_SYS_CLK_MHZ 500
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index ecee6782612d..06b0c42c9e91 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -94,9 +94,10 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
if (ret)
return ret;
- if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT)
- return -EINVAL;
- if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT)
+ if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH ||
+ new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT ||
+ new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
+ new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
return -EINVAL;
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
@@ -277,6 +278,44 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id)
kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]);
}
+static void kmb_plane_set_alpha(struct kmb_drm_private *kmb,
+ const struct drm_plane_state *state,
+ unsigned char plane_id,
+ unsigned int *val)
+{
+ u16 plane_alpha = state->alpha;
+ u16 pixel_blend_mode = state->pixel_blend_mode;
+ int has_alpha = state->fb->format->has_alpha;
+
+ if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE)
+ *val |= LCD_LAYER_ALPHA_STATIC;
+
+ if (has_alpha) {
+ switch (pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ *val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT;
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ *val |= LCD_LAYER_ALPHA_EMBED;
+ break;
+ default:
+ DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n",
+ __stringify(pixel_blend_mode),
+ (long)pixel_blend_mode);
+ break;
+ }
+ }
+
+ if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) {
+ *val &= LCD_LAYER_ALPHA_DISABLED;
+ return;
+ }
+
+ kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha);
+}
+
static void kmb_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -303,11 +342,12 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
fb = new_plane_state->fb;
if (!fb)
return;
+
num_planes = fb->format->num_planes;
kmb_plane = to_kmb_plane(plane);
- plane_id = kmb_plane->id;
kmb = to_kmb(plane->dev);
+ plane_id = kmb_plane->id;
spin_lock_irq(&kmb->irq_lock);
if (kmb->kmb_under_flow || kmb->kmb_flush_done) {
@@ -400,20 +440,32 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
config_csc(kmb, plane_id);
}
+ kmb_plane_set_alpha(kmb, plane->state, plane_id, &val);
+
kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val);
+ /* Configure LCD_CONTROL */
+ ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+
+ /* Set layer blending config */
+ ctrl &= ~LCD_CTRL_ALPHA_ALL;
+ ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 |
+ LCD_CTRL_ALPHA_BLEND_VL2;
+
+ ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE;
+
switch (plane_id) {
case LAYER_0:
- ctrl = LCD_CTRL_VL1_ENABLE;
+ ctrl |= LCD_CTRL_VL1_ENABLE;
break;
case LAYER_1:
- ctrl = LCD_CTRL_VL2_ENABLE;
+ ctrl |= LCD_CTRL_VL2_ENABLE;
break;
case LAYER_2:
- ctrl = LCD_CTRL_GL1_ENABLE;
+ ctrl |= LCD_CTRL_GL1_ENABLE;
break;
case LAYER_3:
- ctrl = LCD_CTRL_GL2_ENABLE;
+ ctrl |= LCD_CTRL_GL2_ENABLE;
break;
}
@@ -425,7 +477,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
*/
ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL;
- kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
+ kmb_write_lcd(kmb, LCD_CONTROL, ctrl);
/* Enable pipeline AXI read transactions for the DMA
* after setting graphics layers. This must be done
@@ -490,6 +542,9 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
enum drm_plane_type plane_type;
const u32 *plane_formats;
int num_plane_formats;
+ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
for (i = 0; i < KMB_MAX_PLANES; i++) {
plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL);
@@ -521,8 +576,16 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
drm_dbg(drm, "%s : %d i=%d type=%d",
__func__, __LINE__,
i, plane_type);
+ drm_plane_create_alpha_property(&plane->base_plane);
+
+ drm_plane_create_blend_mode_property(&plane->base_plane,
+ blend_caps);
+
+ drm_plane_create_zpos_immutable_property(&plane->base_plane, i);
+
drm_plane_helper_add(&plane->base_plane,
&kmb_plane_helper_funcs);
+
if (plane_type == DRM_PLANE_TYPE_PRIMARY) {
primary = plane;
kmb->plane = plane;
diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h
index 486490f7a3ec..6e8d22cf8819 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.h
+++ b/drivers/gpu/drm/kmb/kmb_plane.h
@@ -35,6 +35,9 @@
#define POSSIBLE_CRTCS 1
#define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane)
+#define POSSIBLE_CRTCS 1
+#define KMB_MAX_PLANES 2
+
enum layer_id {
LAYER_0,
LAYER_1,
@@ -43,8 +46,6 @@ enum layer_id {
/* KMB_MAX_PLANES */
};
-#define KMB_MAX_PLANES 1
-
enum sub_plane_id {
Y_PLANE,
U_PLANE,
diff --git a/drivers/gpu/drm/kmb/kmb_regs.h b/drivers/gpu/drm/kmb/kmb_regs.h
index 48150569f702..9756101b0d32 100644
--- a/drivers/gpu/drm/kmb/kmb_regs.h
+++ b/drivers/gpu/drm/kmb/kmb_regs.h
@@ -43,8 +43,10 @@
#define LCD_CTRL_OUTPUT_ENABLED BIT(19)
#define LCD_CTRL_BPORCH_ENABLE BIT(21)
#define LCD_CTRL_FPORCH_ENABLE BIT(22)
+#define LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE BIT(23)
#define LCD_CTRL_PIPELINE_DMA BIT(28)
#define LCD_CTRL_VHSYNC_IDLE_LVL BIT(31)
+#define LCD_CTRL_ALPHA_ALL (0xff << 6)
/* interrupts */
#define LCD_INT_STATUS (0x4 * 0x001)
@@ -115,6 +117,7 @@
#define LCD_LAYER_ALPHA_EMBED BIT(5)
#define LCD_LAYER_ALPHA_COMBI (LCD_LAYER_ALPHA_STATIC | \
LCD_LAYER_ALPHA_EMBED)
+#define LCD_LAYER_ALPHA_DISABLED ~(LCD_LAYER_ALPHA_COMBI)
/* RGB multiplied with alpha */
#define LCD_LAYER_ALPHA_PREMULT BIT(6)
#define LCD_LAYER_INVERT_COL BIT(7)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index e9c6af78b1d7..3ddf739a6f9b 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -17,7 +17,7 @@ config DRM_MSM
select DRM_SCHED
select SHMEM
select TMPFS
- select QCOM_SCM if ARCH_QCOM
+ select QCOM_SCM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
@@ -55,7 +55,7 @@ config DRM_MSM_GPU_SUDO
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
- depends on DRM_MSM && QCOM_SCM
+ depends on DRM_MSM
default y
help
Choose this option to enable HDCP state machine
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index b8c31b697797..66f32d965c72 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -704,6 +704,7 @@ static const struct file_operations nv50_crc_flip_threshold_fops = {
.open = nv50_crc_debugfs_flip_threshold_open,
.read = seq_read,
.write = nv50_crc_debugfs_flip_threshold_set,
+ .release = single_release,
};
int nv50_head_crc_late_register(struct nv50_head *head)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d66f97280282..72099d1e4816 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -52,6 +52,7 @@ nv50_head_flush_clr(struct nv50_head *head,
void
nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
{
+ if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
asyh->olut.buffer,
@@ -67,7 +68,6 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);
- if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.base ) head->func->base (head, asyh);
if (asyh->set.ovly ) head->func->ovly (head, asyh);
if (asyh->set.dither ) head->func->dither (head, asyh);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index c68cc957248e..a582c0cb0cb0 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -71,6 +71,7 @@
#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
#define VOLTA_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c36f
#define TURING_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c46f
+#define AMPERE_CHANNEL_GPFIFO_B /* clc36f.h */ 0x0000c76f
#define NV50_DISP /* cl5070.h */ 0x00005070
#define G82_DISP /* cl5070.h */ 0x00008270
@@ -200,6 +201,7 @@
#define PASCAL_DMA_COPY_B 0x0000c1b5
#define VOLTA_DMA_COPY_A 0x0000c3b5
#define TURING_DMA_COPY_A 0x0000c5b5
+#define AMPERE_DMA_COPY_B 0x0000c7b5
#define FERMI_DECOMPRESS 0x000090b8
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 54fab7cc36c1..64ee82c7c1be 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -77,4 +77,5 @@ int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int ga102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6d07e653f82d..c58bcdba2c7a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -844,6 +844,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 80099ef75702..ea7769135b0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -250,7 +250,8 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u64 runlist, bool priv, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { AMPERE_CHANNEL_GPFIFO_B,
+ TURING_CHANNEL_GPFIFO_A,
VOLTA_CHANNEL_GPFIFO_A,
PASCAL_CHANNEL_GPFIFO_A,
MAXWELL_CHANNEL_GPFIFO_A,
@@ -386,7 +387,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
nvif_object_map(&chan->user, NULL, 0);
- if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
+ if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO &&
+ chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) {
ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled",
nouveau_channel_killed,
true, NV906F_V0_NTFY_KILLED,
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index c2bc05eb2e54..1cbe01048b93 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = {
.open = nouveau_debugfs_pstate_open,
.read = seq_read,
.write = nouveau_debugfs_pstate_set,
+ .release = single_release,
};
static struct drm_info_list nouveau_debugfs_list[] = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1f828c9f691c..6109cd9e3399 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -345,6 +345,9 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
u32 arg0, arg1;
int ret;
+ if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE)
+ return;
+
/* Allocate channel that has access to the graphics engine. */
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
@@ -469,6 +472,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
case PASCAL_CHANNEL_GPFIFO_A:
case VOLTA_CHANNEL_GPFIFO_A:
case TURING_CHANNEL_GPFIFO_A:
+ case AMPERE_CHANNEL_GPFIFO_B:
ret = nvc0_fence_create(drm);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5b27845075a1..8c2ecc282723 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -247,10 +247,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
}
ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
- if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
+ if (ret)
return ret;
- }
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 7c9c928c3196..c3526a8622e3 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -204,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
- priv->base.uevent = true;
+ priv->base.uevent = drm->client.device.info.family < NV_DEVICE_INFO_V0_AMPERE;
mutex_init(&priv->mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 93ddf63d1114..ca75c5f6ecaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2602,6 +2602,7 @@ nv172_chipset = {
.top = { 0x00000001, ga100_top_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
};
static const struct nvkm_device_chip
@@ -2622,6 +2623,7 @@ nv174_chipset = {
.top = { 0x00000001, ga100_top_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
};
static const struct nvkm_device_chip
@@ -2642,6 +2644,7 @@ nv177_chipset = {
.top = { 0x00000001, ga100_top_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
};
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 3209eb7af65f..5e831d347a95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/gp10b.o
nvkm-y += nvkm/engine/fifo/gv100.o
nvkm-y += nvkm/engine/fifo/tu102.o
+nvkm-y += nvkm/engine/fifo/ga102.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
new file mode 100644
index 000000000000..c630dbd2911a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine)
+#define ga102_chan(p) container_of((p), struct ga102_chan, object)
+#include <engine/fifo.h>
+#include "user.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+
+#include <nvif/cl0080.h>
+#include <nvif/clc36f.h>
+#include <nvif/class.h>
+
+struct ga102_fifo {
+ struct nvkm_fifo base;
+};
+
+struct ga102_chan {
+ struct nvkm_object object;
+
+ struct {
+ u32 runl;
+ u32 chan;
+ } ctrl;
+
+ struct nvkm_memory *mthd;
+ struct nvkm_memory *inst;
+ struct nvkm_memory *user;
+ struct nvkm_memory *runl;
+
+ struct nvkm_vmm *vmm;
+};
+
+static int
+ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
+{
+ if (index == 0) {
+ oclass->ctor = nvkm_object_new;
+ oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B };
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+ u64 bar2 = nvkm_memory_bar2(chan->user);
+
+ if (bar2 == ~0ULL)
+ return -EFAULT;
+
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = device->func->resource_addr(device, 3) + bar2;
+ *size = 0x1000;
+ return 0;
+}
+
+static int
+ga102_chan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+
+ nvkm_wr32(device, chan->ctrl.chan, 0x00000003);
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000))
+ break;
+ );
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x088, 0);
+
+ nvkm_wr32(device, chan->ctrl.chan, 0xffffffff);
+ return 0;
+}
+
+static int
+ga102_chan_init(struct nvkm_object *object)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+
+ nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000);
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl)));
+ nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl)));
+ nvkm_wr32(device, chan->ctrl.runl + 0x088, 2);
+
+ nvkm_wr32(device, chan->ctrl.chan, 0x00000002);
+ nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0);
+ return 0;
+}
+
+static void *
+ga102_chan_dtor(struct nvkm_object *object)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+
+ if (chan->vmm) {
+ nvkm_vmm_part(chan->vmm, chan->inst);
+ nvkm_vmm_unref(&chan->vmm);
+ }
+
+ nvkm_memory_unref(&chan->runl);
+ nvkm_memory_unref(&chan->user);
+ nvkm_memory_unref(&chan->inst);
+ nvkm_memory_unref(&chan->mthd);
+ return chan;
+}
+
+static const struct nvkm_object_func
+ga102_chan = {
+ .dtor = ga102_chan_dtor,
+ .init = ga102_chan_init,
+ .fini = ga102_chan_fini,
+ .map = ga102_chan_map,
+ .sclass = ga102_chan_sclass,
+};
+
+static int
+ga102_chan_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ struct volta_channel_gpfifo_a_v0 *args = argv;
+ struct nvkm_top_device *tdev;
+ struct nvkm_vmm *vmm;
+ struct ga102_chan *chan;
+ int ret;
+
+ if (argc != sizeof(*args))
+ return -ENOSYS;
+
+ vmm = nvkm_uvmm_search(oclass->client, args->vmm);
+ if (IS_ERR(vmm))
+ return PTR_ERR(vmm);
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&ga102_chan, oclass, &chan->object);
+ *pobject = &chan->object;
+
+ list_for_each_entry(tdev, &device->top->device, head) {
+ if (tdev->type == NVKM_ENGINE_CE) {
+ chan->ctrl.runl = tdev->runlist;
+ break;
+ }
+ }
+
+ if (!chan->ctrl.runl)
+ return -ENODEV;
+
+ chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0;
+
+ args->chid = 0;
+ args->inst = 0;
+ args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x010, 0x0000face);
+ nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
+ nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset));
+ nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) |
+ (order_base_2(args->ilength / 8) << 16));
+ nvkm_wo32(chan->inst, 0x084, 0x20400000);
+ nvkm_wo32(chan->inst, 0x094, 0x30000001);
+ nvkm_wo32(chan->inst, 0x0ac, 0x00020000);
+ nvkm_wo32(chan->inst, 0x0e4, 0x00000000);
+ nvkm_wo32(chan->inst, 0x0e8, 0);
+ nvkm_wo32(chan->inst, 0x0f4, 0x00001000);
+ nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
+ nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
+ nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd)));
+ nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd)));
+ nvkm_done(chan->inst);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->runl);
+ nvkm_wo32(chan->runl, 0x00, 0x80030001);
+ nvkm_wo32(chan->runl, 0x04, 1);
+ nvkm_wo32(chan->runl, 0x08, 0);
+ nvkm_wo32(chan->runl, 0x0c, 0x00000000);
+ nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user)));
+ nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user)));
+ nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst)));
+ nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst)));
+ nvkm_done(chan->runl);
+
+ ret = nvkm_vmm_join(vmm, chan->inst);
+ if (ret)
+ return ret;
+
+ chan->vmm = nvkm_vmm_ref(vmm);
+ return 0;
+}
+
+static const struct nvkm_device_oclass
+ga102_chan_oclass = {
+ .ctor = ga102_chan_new,
+};
+
+static int
+ga102_user_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ return tu102_fifo_user_new(oclass, argv, argc, pobject);
+}
+
+static const struct nvkm_device_oclass
+ga102_user_oclass = {
+ .ctor = ga102_user_new,
+};
+
+static int
+ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
+{
+ if (index == 0) {
+ oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A };
+ *class = &ga102_user_oclass;
+ return 0;
+ } else
+ if (index == 1) {
+ oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B };
+ *class = &ga102_chan_oclass;
+ return 0;
+ }
+
+ return 2;
+}
+
+static int
+ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+ switch (mthd) {
+ case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0;
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+static void *
+ga102_fifo_dtor(struct nvkm_engine *engine)
+{
+ return ga102_fifo(engine);
+}
+
+static const struct nvkm_engine_func
+ga102_fifo = {
+ .dtor = ga102_fifo_dtor,
+ .info = ga102_fifo_info,
+ .base.sclass = ga102_fifo_sclass,
+};
+
+int
+ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ struct ga102_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine);
+ *pfifo = &fifo->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
index 31933f3e5a07..c982d834c8d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
@@ -54,7 +54,7 @@ ga100_top_oneinit(struct nvkm_top *top)
info->reset = (data & 0x0000001f);
break;
case 2:
- info->runlist = (data & 0x0000fc00) >> 10;
+ info->runlist = (data & 0x00fffc00);
info->engine = (data & 0x00000003);
break;
default:
@@ -85,9 +85,10 @@ ga100_top_oneinit(struct nvkm_top *top)
}
nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
- "runlist %2d engine %2d reset %2d\n", type, inst,
+ "runlist %6x engine %2d reset %2d\n", type, inst,
info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
- info->addr, info->fault, info->runlist, info->engine, info->reset);
+ info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist,
+ info->engine, info->reset);
info = NULL;
}
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 2d8794d495d0..3d8a9ab47cae 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -146,8 +146,8 @@ static const struct reg_sequence y030xx067a_init_sequence[] = {
{ 0x09, REG09_SUB_BRIGHT_R(0x20) },
{ 0x0a, REG0A_SUB_BRIGHT_B(0x20) },
{ 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN },
- { 0x0c, REG0C_CONTRAST_R(0x10) },
- { 0x0d, REG0D_CONTRAST_G(0x10) },
+ { 0x0c, REG0C_CONTRAST_R(0x00) },
+ { 0x0d, REG0D_CONTRAST_G(0x00) },
{ 0x0e, REG0E_CONTRAST_B(0x10) },
{ 0x0f, 0 },
{ 0x10, REG10_BRIGHT(0x7f) },
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ba9e14da41b4..a25b98b7f5bd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1174,26 +1174,24 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
*
* Action plan:
*
- * 1. When DRM gives us a mode, we should add 999 Hz to it. That way
- * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
- * make 60000 kHz then the clock framework will actually give us
- * the right clock.
+ * 1. Try to set the exact rate first, and confirm the clock framework
+ * can provide it.
*
- * NOTE: if the PLL (maybe through a divider) could actually make
- * a clock rate 999 Hz higher instead of the one we want then this
- * could be a problem. Unfortunately there's not much we can do
- * since it's baked into DRM to use kHz. It shouldn't matter in
- * practice since Rockchip PLLs are controlled by tables and
- * even if there is a divider in the middle I wouldn't expect PLL
- * rates in the table that are just a few kHz different.
+ * 2. If the clock framework cannot provide the exact rate, we should
+ * add 999 Hz to the requested rate. That way if the clock we need
+ * is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then
+ * the clock framework will actually give us the right clock.
*
- * 2. Get the clock framework to round the rate for us to tell us
+ * 3. Get the clock framework to round the rate for us to tell us
* what it will actually make.
*
- * 3. Store the rounded up rate so that we don't need to worry about
+ * 4. Store the rounded up rate so that we don't need to worry about
* this in the actual clk_set_rate().
*/
- rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+ rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000);
+ if (rate / 1000 != adjusted_mode->clock)
+ rate = clk_round_rate(vop->dclk,
+ adjusted_mode->clock * 1000 + 999);
adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index f75fb157f2ff..016b877051da 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -216,11 +216,13 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_clk_tmds;
}
+ ret = sun8i_hdmi_phy_init(hdmi->phy);
+ if (ret)
+ goto err_disable_clk_tmds;
+
drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
- sun8i_hdmi_phy_init(hdmi->phy);
-
plat_data->mode_valid = hdmi->quirks->mode_valid;
plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
@@ -262,6 +264,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
+ sun8i_hdmi_phy_deinit(hdmi->phy);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
gpiod_set_value(hdmi->ddc_en, 0);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 74f6ed0e2570..bffe1b9cd3dc 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -169,6 +169,7 @@ struct sun8i_hdmi_phy {
struct clk *clk_phy;
struct clk *clk_pll0;
struct clk *clk_pll1;
+ struct device *dev;
unsigned int rcal;
struct regmap *regs;
struct reset_control *rst_phy;
@@ -205,7 +206,8 @@ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy);
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
struct dw_hdmi_plat_data *plat_data);
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index c9239708d398..b64d93da651d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -506,9 +506,60 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2;
}
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
{
+ int ret;
+
+ ret = reset_control_deassert(phy->rst_phy);
+ if (ret) {
+ dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(phy->clk_bus);
+ if (ret) {
+ dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret);
+ goto err_assert_rst_phy;
+ }
+
+ ret = clk_prepare_enable(phy->clk_mod);
+ if (ret) {
+ dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret);
+ goto err_disable_clk_bus;
+ }
+
+ if (phy->variant->has_phy_clk) {
+ ret = sun8i_phy_clk_create(phy, phy->dev,
+ phy->variant->has_second_pll);
+ if (ret) {
+ dev_err(phy->dev, "Couldn't create the PHY clock\n");
+ goto err_disable_clk_mod;
+ }
+
+ clk_prepare_enable(phy->clk_phy);
+ }
+
phy->variant->phy_init(phy);
+
+ return 0;
+
+err_disable_clk_mod:
+ clk_disable_unprepare(phy->clk_mod);
+err_disable_clk_bus:
+ clk_disable_unprepare(phy->clk_bus);
+err_assert_rst_phy:
+ reset_control_assert(phy->rst_phy);
+
+ return ret;
+}
+
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy)
+{
+ clk_disable_unprepare(phy->clk_mod);
+ clk_disable_unprepare(phy->clk_bus);
+ clk_disable_unprepare(phy->clk_phy);
+
+ reset_control_assert(phy->rst_phy);
}
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
@@ -638,6 +689,7 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
return -ENOMEM;
phy->variant = (struct sun8i_hdmi_phy_variant *)match->data;
+ phy->dev = dev;
ret = of_address_to_resource(node, 0, &res);
if (ret) {
@@ -696,47 +748,10 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
goto err_put_clk_pll1;
}
- ret = reset_control_deassert(phy->rst_phy);
- if (ret) {
- dev_err(dev, "Cannot deassert phy reset control: %d\n", ret);
- goto err_put_rst_phy;
- }
-
- ret = clk_prepare_enable(phy->clk_bus);
- if (ret) {
- dev_err(dev, "Cannot enable bus clock: %d\n", ret);
- goto err_deassert_rst_phy;
- }
-
- ret = clk_prepare_enable(phy->clk_mod);
- if (ret) {
- dev_err(dev, "Cannot enable mod clock: %d\n", ret);
- goto err_disable_clk_bus;
- }
-
- if (phy->variant->has_phy_clk) {
- ret = sun8i_phy_clk_create(phy, dev,
- phy->variant->has_second_pll);
- if (ret) {
- dev_err(dev, "Couldn't create the PHY clock\n");
- goto err_disable_clk_mod;
- }
-
- clk_prepare_enable(phy->clk_phy);
- }
-
platform_set_drvdata(pdev, phy);
return 0;
-err_disable_clk_mod:
- clk_disable_unprepare(phy->clk_mod);
-err_disable_clk_bus:
- clk_disable_unprepare(phy->clk_bus);
-err_deassert_rst_phy:
- reset_control_assert(phy->rst_phy);
-err_put_rst_phy:
- reset_control_put(phy->rst_phy);
err_put_clk_pll1:
clk_put(phy->clk_pll1);
err_put_clk_pll0:
@@ -753,12 +768,6 @@ static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
{
struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
- clk_disable_unprepare(phy->clk_mod);
- clk_disable_unprepare(phy->clk_bus);
- clk_disable_unprepare(phy->clk_phy);
-
- reset_control_assert(phy->rst_phy);
-
reset_control_put(phy->rst_phy);
clk_put(phy->clk_pll0);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b4b4653fe301..ed8a4b7f8b6e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1395,14 +1395,6 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
return 0;
}
-static const struct snd_soc_dapm_widget vc4_hdmi_audio_widgets[] = {
- SND_SOC_DAPM_OUTPUT("TX"),
-};
-
-static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = {
- { "TX", NULL, "Playback" },
-};
-
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
.name = "vc4-hdmi-cpu-dai-component",
};
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 4e0b7c2882ce..015e11c4663f 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -49,7 +49,7 @@
#define MLXCPLD_LPCI2C_NACK_IND 2
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0f
+#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c
#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
enum mlxcpld_i2c_frequency {
@@ -495,7 +495,7 @@ mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
return err;
/* Set frequency only if it is not 100KHz, which is default. */
- switch ((data->reg & data->mask) >> data->bit) {
+ switch ((regval & data->mask) >> data->bit) {
case MLXCPLD_I2C_FREQ_1000KHZ:
freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
break;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 477480d1de6b..7d4b3eb7077a 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -41,6 +41,8 @@
#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002
+#define I2C_ST_START_CON 0x8001
+#define I2C_FS_START_CON 0x1800
#define I2C_TIME_CLR_VALUE 0x0000
#define I2C_TIME_DEFAULT_VALUE 0x0003
#define I2C_WRRD_TRANAC_VALUE 0x0002
@@ -480,6 +482,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
u16 intr_stat_reg;
+ u16 ext_conf_val;
mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
@@ -518,8 +521,13 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
if (i2c->dev_comp->ltiming_adjust)
mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
+ if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
+ ext_conf_val = I2C_ST_START_CON;
+ else
+ ext_conf_val = I2C_FS_START_CON;
+
if (i2c->dev_comp->timing_adjust) {
- mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF);
+ ext_conf_val = i2c->ac_timing.ext;
mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div,
OFFSET_CLOCK_DIV);
mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE,
@@ -544,6 +552,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
OFFSET_HS_STA_STO_AC_TIMING);
}
}
+ mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF);
/* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
if (i2c->have_pmic)
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index aaeeacc12121..546cc935e035 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -454,6 +454,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
break;
i2c_acpi_register_device(adapter, adev, &info);
+ put_device(&adapter->dev);
break;
case ACPI_RECONFIG_DEVICE_REMOVE:
if (!acpi_device_enumerated(adev))
diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
index f1099b495301..467519a2027e 100644
--- a/drivers/iio/test/Makefile
+++ b/drivers/iio/test/Makefile
@@ -5,3 +5,4 @@
# Keep in alphabetical order
obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
+CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 124c41adeca1..c5c71b7ab7e8 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -308,7 +308,6 @@ config APPLE_DART
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
- depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -438,7 +437,7 @@ config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
- depends on QCOM_SCM=y
+ select QCOM_SCM
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
index e240a7bcf310..b0cc01aa20c9 100644
--- a/drivers/iommu/arm/arm-smmu/Makefile
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 9f465e146799..2c25cce38060 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -215,7 +215,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
of_device_is_compatible(np, "nvidia,tegra186-smmu"))
return nvidia_smmu_impl_init(smmu);
- smmu = qcom_smmu_impl_init(smmu);
+ if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
+ smmu = qcom_smmu_impl_init(smmu);
if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
smmu->impl = &mrvl_mmu500_impl;
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index cb0afe897162..7313454e403a 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -480,6 +480,11 @@ int detach_capi_ctr(struct capi_ctr *ctr)
ctr_down(ctr, CAPI_CTR_DETACHED);
+ if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
+ err = -EINVAL;
+ goto unlock_out;
+ }
+
if (capi_controller[ctr->cnr - 1] != ctr) {
err = -EINVAL;
goto unlock_out;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 2a1ddd47a096..a52f275f8263 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -949,8 +949,8 @@ nj_release(struct tiger_hw *card)
nj_disable_hwirq(card);
mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE);
- card->isac.release(&card->isac);
spin_unlock_irqrestore(&card->lock, flags);
+ card->isac.release(&card->isac);
release_region(card->base, card->base_s);
card->base_s = 0;
}
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 157c924686e4..80321e03809a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -565,7 +565,7 @@ config VIDEO_QCOM_VENUS
depends on VIDEO_DEV && VIDEO_V4L2 && QCOM_SMEM
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
select QCOM_MDT_LOADER if ARCH_QCOM
- select QCOM_SCM if ARCH_QCOM
+ select QCOM_SCM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 71313961cc54..95b3511b0560 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -547,7 +547,7 @@ config MMC_SDHCI_MSM
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
- select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM
+ select QCOM_SCM if MMC_CRYPTO
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 3f28eb4d17fe..8f36536cb1b6 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -746,7 +746,7 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
writel(start, host->regs + SD_EMMC_START);
}
-/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+/* local sg copy for dram_access_quirk */
static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
size_t buflen, bool to_buffer)
{
@@ -764,21 +764,27 @@ static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data
sg_miter_start(&miter, sgl, nents, sg_flags);
while ((offset < buflen) && sg_miter_next(&miter)) {
- unsigned int len;
+ unsigned int buf_offset = 0;
+ unsigned int len, left;
+ u32 *buf = miter.addr;
len = min(miter.length, buflen - offset);
+ left = len;
- /* When dram_access_quirk, the bounce buffer is a iomem mapping */
- if (host->dram_access_quirk) {
- if (to_buffer)
- memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
- else
- memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+ if (to_buffer) {
+ do {
+ writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
+
+ buf_offset += 4;
+ left -= 4;
+ } while (left);
} else {
- if (to_buffer)
- memcpy(host->bounce_buf + offset, miter.addr, len);
- else
- memcpy(miter.addr, host->bounce_buf + offset, len);
+ do {
+ *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
+
+ buf_offset += 4;
+ left -= 4;
+ } while (left);
}
offset += len;
@@ -830,7 +836,11 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
if (data->flags & MMC_DATA_WRITE) {
cmd_cfg |= CMD_CFG_DATA_WR;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+ if (host->dram_access_quirk)
+ meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+ else
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
dma_wmb();
}
@@ -849,12 +859,43 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
}
+static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* Reject request if any element offset or size is not 32bit aligned */
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(sg->length, sizeof(u32))) {
+ dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
+ data->sg->offset, data->sg->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct meson_host *host = mmc_priv(mmc);
bool needs_pre_post_req = mrq->data &&
!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
+ /*
+ * The memory at the end of the controller used as bounce buffer for
+ * the dram_access_quirk only accepts 32bit read/write access,
+ * check the aligment and length of the data before starting the request.
+ */
+ if (host->dram_access_quirk && mrq->data) {
+ mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
+ if (mrq->cmd->error) {
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
if (needs_pre_post_req) {
meson_mmc_get_transfer_mode(mmc, mrq);
if (!meson_mmc_desc_chain_mode(mrq->data))
@@ -999,7 +1040,11 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
if (meson_mmc_bounce_buf_read(data)) {
xfer_bytes = data->blksz * data->blocks;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+ if (host->dram_access_quirk)
+ meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+ else
+ sg_copy_from_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
}
next_cmd = meson_mmc_get_next_command(cmd);
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 5564d7b23e7c..d1a1c548c515 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
@@ -61,7 +62,6 @@ static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
{
u16 clk;
- unsigned long timeout;
host->mmc->actual_clock = 0;
@@ -86,16 +86,11 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
- timeout = 20;
- while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
- & SDHCI_CLOCK_INT_STABLE)) {
- if (timeout == 0) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- return;
- }
- timeout--;
- mdelay(1);
+ if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
+ 1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
}
clk |= SDHCI_CLOCK_CARD_EN;
@@ -114,6 +109,7 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ unsigned int tmp;
sdhci_reset(host, mask);
@@ -126,6 +122,10 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
SDMMC_CALCR);
+
+ if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN),
+ 10, 20000, false, host, SDMMC_CALCR))
+ dev_err(mmc_dev(host->mmc), "Failed to calibrate\n");
}
}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 1542bfb8b5e5..7c2968a639eb 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -449,8 +449,10 @@ EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
/* timer started */
- if (dev->mib_read_interval)
+ if (dev->mib_read_interval) {
+ dev->mib_read_interval = 0;
cancel_delayed_work_sync(&dev->mib_read);
+ }
dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 03744d1c43fc..8dadcae93c9b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -12,6 +12,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/dsa/mv88e6xxx.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
@@ -749,7 +750,11 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
+ /* Internal PHYs propagate their configuration directly to the MAC.
+ * External PHYs depend on whether the PPU is enabled for this port.
+ */
+ if (((!mv88e6xxx_phy_is_internal(ds, port) &&
+ !mv88e6xxx_port_ppu_updates(chip, port)) ||
mode == MLO_AN_FIXED) && ops->port_sync_link)
err = ops->port_sync_link(chip, port, mode, false);
mv88e6xxx_reg_unlock(chip);
@@ -772,7 +777,12 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) {
+ /* Internal PHYs propagate their configuration directly to the MAC.
+ * External PHYs depend on whether the PPU is enabled for this port.
+ */
+ if ((!mv88e6xxx_phy_is_internal(ds, port) &&
+ !mv88e6xxx_port_ppu_updates(chip, port)) ||
+ mode == MLO_AN_FIXED) {
/* FIXME: for an automedia port, should we force the link
* down here - what if the link comes up due to "other" media
* while we're bringing the port up, how is the exclusivity
@@ -1677,6 +1687,30 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
return 0;
}
+static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_port *dp = dsa_to_port(chip->ds, port);
+ struct mv88e6xxx_port *p = &chip->ports[port];
+ u16 pvid = MV88E6XXX_VID_STANDALONE;
+ bool drop_untagged = false;
+ int err;
+
+ if (dp->bridge_dev) {
+ if (br_vlan_enabled(dp->bridge_dev)) {
+ pvid = p->bridge_pvid.vid;
+ drop_untagged = !p->bridge_pvid.valid;
+ } else {
+ pvid = MV88E6XXX_VID_BRIDGED;
+ }
+ }
+
+ err = mv88e6xxx_port_set_pvid(chip, port, pvid);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_drop_untagged(chip, port, drop_untagged);
+}
+
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
@@ -1690,7 +1724,16 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
+
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto unlock;
+
+unlock:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -1725,11 +1768,15 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
u16 fid;
int err;
- /* Null VLAN ID corresponds to the port private database */
+ /* Ports have two private address databases: one for when the port is
+ * standalone and one for when the port is under a bridge and the
+ * 802.1Q mode is disabled. When the port is standalone, DSA wants its
+ * address database to remain 100% empty, so we never load an ATU entry
+ * into a standalone port's database. Therefore, translate the null
+ * VLAN ID into the port's database used for VLAN-unaware bridging.
+ */
if (vid == 0) {
- err = mv88e6xxx_port_get_fid(chip, port, &fid);
- if (err)
- return err;
+ fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -2123,6 +2170,7 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mv88e6xxx_port *p = &chip->ports[port];
bool warn;
u8 member;
int err;
@@ -2156,13 +2204,21 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
}
if (pvid) {
- err = mv88e6xxx_port_set_pvid(chip, port, vlan->vid);
- if (err) {
- dev_err(ds->dev, "p%d: failed to set PVID %d\n",
- port, vlan->vid);
+ p->bridge_pvid.vid = vlan->vid;
+ p->bridge_pvid.valid = true;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto out;
+ } else if (vlan->vid && p->bridge_pvid.vid == vlan->vid) {
+ /* The old pvid was reinstalled as a non-pvid VLAN */
+ p->bridge_pvid.valid = false;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
goto out;
- }
}
+
out:
mv88e6xxx_reg_unlock(chip);
@@ -2212,6 +2268,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port *p = &chip->ports[port];
int err = 0;
u16 pvid;
@@ -2229,7 +2286,9 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
goto unlock;
if (vlan->vid == pvid) {
- err = mv88e6xxx_port_set_pvid(chip, port, 0);
+ p->bridge_pvid.valid = false;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto unlock;
}
@@ -2393,7 +2452,16 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
int err;
mv88e6xxx_reg_lock(chip);
+
err = mv88e6xxx_bridge_map(chip, br);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto unlock;
+
+unlock:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -2403,11 +2471,20 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
+
if (mv88e6xxx_bridge_map(chip, br) ||
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore standalone pvid: %pe\n",
+ port, ERR_PTR(err));
+
mv88e6xxx_reg_unlock(chip);
}
@@ -2853,6 +2930,20 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
+ /* Associate MV88E6XXX_VID_BRIDGED with MV88E6XXX_FID_BRIDGED in the
+ * ATU by virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID after MV88E6XXX_FID_STANDALONE. This will be used
+ * as the private PVID on ports under a VLAN-unaware bridge.
+ * Shared (DSA and CPU) ports must also be members of it, to translate
+ * the VID from the DSA tag into MV88E6XXX_FID_BRIDGED, instead of
+ * relying on their port default FID.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+ false);
+ if (err)
+ return err;
+
if (chip->info->ops->port_set_jumbo_size) {
err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
if (err)
@@ -2925,7 +3016,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- err = mv88e6xxx_port_set_fid(chip, port, 0);
+ err = mv88e6xxx_port_set_fid(chip, port, MV88E6XXX_FID_STANDALONE);
if (err)
return err;
@@ -3115,6 +3206,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
}
}
+ err = mv88e6xxx_vtu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -3144,10 +3239,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
- err = mv88e6xxx_vtu_setup(chip);
- if (err)
- goto unlock;
-
err = mv88e6xxx_pvt_setup(chip);
if (err)
goto unlock;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 59f316cc8583..8271b8aa7b71 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -21,6 +21,9 @@
#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096
+#define MV88E6XXX_FID_STANDALONE 0
+#define MV88E6XXX_FID_BRIDGED 1
+
/* PVT limits for 4-bit port and 5-bit switch */
#define MV88E6XXX_MAX_PVT_SWITCHES 32
#define MV88E6XXX_MAX_PVT_PORTS 16
@@ -246,9 +249,15 @@ struct mv88e6xxx_policy {
u16 vid;
};
+struct mv88e6xxx_vlan {
+ u16 vid;
+ bool valid;
+};
+
struct mv88e6xxx_port {
struct mv88e6xxx_chip *chip;
int port;
+ struct mv88e6xxx_vlan bridge_pvid;
u64 serdes_stats[2];
u64 atu_member_violation;
u64 atu_miss_violation;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 451028c57af8..d9817b20ea64 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -1257,6 +1257,27 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+ bool drop_untagged)
+{
+ u16 old, new;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &old);
+ if (err)
+ return err;
+
+ if (drop_untagged)
+ new = old | MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+ else
+ new = old & ~MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+
+ if (new == old)
+ return 0;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
+}
+
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
{
u16 reg;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index b10e5aebacf6..03382b66f800 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -423,6 +423,8 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+ bool drop_untagged);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 01c01bd0c854..83808e7dbdda 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -266,12 +266,12 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
*/
static int felix_setup_mmio_filtering(struct felix *felix)
{
- unsigned long user_ports = 0, cpu_ports = 0;
+ unsigned long user_ports = dsa_user_ports(felix->ds);
struct ocelot_vcap_filter *redirect_rule;
struct ocelot_vcap_filter *tagging_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
- int port, ret;
+ int cpu = -1, port, ret;
tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!tagging_rule)
@@ -284,12 +284,15 @@ static int felix_setup_mmio_filtering(struct felix *felix)
}
for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_user_port(ds, port))
- user_ports |= BIT(port);
- if (dsa_is_cpu_port(ds, port))
- cpu_ports |= BIT(port);
+ if (dsa_is_cpu_port(ds, port)) {
+ cpu = port;
+ break;
+ }
}
+ if (cpu < 0)
+ return -EINVAL;
+
tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
*(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
*(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
@@ -325,7 +328,7 @@ static int felix_setup_mmio_filtering(struct felix *felix)
* the CPU port module
*/
redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = cpu_ports;
+ redirect_rule->action.port_mask = BIT(cpu);
} else {
/* Trap PTP packets only to the CPU port module (which is
* redirected to the NPI port)
@@ -1076,6 +1079,101 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
+static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *skb)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ struct sk_buff *skb_match = NULL, *skb_tmp;
+ unsigned long flags;
+
+ if (!clone)
+ return;
+
+ spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
+
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (skb != clone)
+ continue;
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
+
+ WARN_ONCE(!skb_match,
+ "Could not find skb clone in TX timestamping list\n");
+}
+
+#define work_to_xmit_work(w) \
+ container_of((w), struct felix_deferred_xmit_work, work)
+
+static void felix_port_deferred_xmit(struct kthread_work *work)
+{
+ struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct sk_buff *skb = xmit_work->skb;
+ u32 rew_op = ocelot_ptp_rew_op(skb);
+ struct ocelot *ocelot = ds->priv;
+ int port = xmit_work->dp->index;
+ int retries = 10;
+
+ do {
+ if (ocelot_can_inject(ocelot, 0))
+ break;
+
+ cpu_relax();
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(ocelot->dev, "port %d failed to inject skb\n",
+ port);
+ ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
+ kfree_skb(skb);
+ return;
+ }
+
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+
+ consume_skb(skb);
+ kfree(xmit_work);
+}
+
+static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct felix_port *felix_port;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
+ if (!felix_port)
+ return -ENOMEM;
+
+ felix_port->xmit_worker = felix->xmit_worker;
+ felix_port->xmit_work_fn = felix_port_deferred_xmit;
+
+ dp->priv = felix_port;
+
+ return 0;
+}
+
+static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct felix_port *felix_port = dp->priv;
+
+ if (!felix_port)
+ return;
+
+ dp->priv = NULL;
+ kfree(felix_port);
+}
+
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
* us to allocate structures twice (leak memory) and map PCI memory twice
@@ -1104,6 +1202,12 @@ static int felix_setup(struct dsa_switch *ds)
}
}
+ felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
+ if (IS_ERR(felix->xmit_worker)) {
+ err = PTR_ERR(felix->xmit_worker);
+ goto out_deinit_timestamp;
+ }
+
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
@@ -1114,6 +1218,14 @@ static int felix_setup(struct dsa_switch *ds)
* bits of vlan tag.
*/
felix_port_qos_map_init(ocelot, port);
+
+ err = felix_port_setup_tagger_data(ds, port);
+ if (err) {
+ dev_err(ds->dev,
+ "port %d failed to set up tagger data: %pe\n",
+ port, ERR_PTR(err));
+ goto out_deinit_ports;
+ }
}
err = ocelot_devlink_sb_register(ocelot);
@@ -1128,6 +1240,7 @@ static int felix_setup(struct dsa_switch *ds)
* there's no real point in checking for errors.
*/
felix_set_tag_protocol(ds, port, felix->tag_proto);
+ break;
}
ds->mtu_enforcement_ingress = true;
@@ -1140,9 +1253,13 @@ out_deinit_ports:
if (dsa_is_unused_port(ds, port))
continue;
+ felix_port_teardown_tagger_data(ds, port);
ocelot_deinit_port(ocelot, port);
}
+ kthread_destroy_worker(felix->xmit_worker);
+
+out_deinit_timestamp:
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1164,19 +1281,23 @@ static void felix_teardown(struct dsa_switch *ds)
continue;
felix_del_tag_protocol(ds, port, felix->tag_proto);
+ break;
}
- ocelot_devlink_sb_unregister(ocelot);
- ocelot_deinit_timestamp(ocelot);
- ocelot_deinit(ocelot);
-
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
+ felix_port_teardown_tagger_data(ds, port);
ocelot_deinit_port(ocelot, port);
}
+ kthread_destroy_worker(felix->xmit_worker);
+
+ ocelot_devlink_sb_unregister(ocelot);
+ ocelot_deinit_timestamp(ocelot);
+ ocelot_deinit(ocelot);
+
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
}
@@ -1293,8 +1414,12 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
if (!ocelot->ptp)
return;
- if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone))
+ if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
+ dev_err_ratelimited(ds->dev,
+ "port %d delivering skb without TX timestamp\n",
+ port);
return;
+ }
if (clone)
OCELOT_SKB_CB(skb)->clone = clone;
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 54024b6f9498..be3e42e135c0 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -62,6 +62,7 @@ struct felix {
resource_size_t switch_base;
resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
+ struct kthread_worker *xmit_worker;
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index d796684ec9ca..412ae3e43ffb 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -100,6 +100,7 @@ config JME
config KORINA
tristate "Korina (IDT RC32434) Ethernet support"
depends on MIKROTIK_RB532 || COMPILE_TEST
+ select CRC32
select MII
help
If you have a Mikrotik RouterBoard 500 or IDT RC32434
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 840a9ce7ba1c..0a67612af228 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -21,6 +21,7 @@ config ARC_EMAC_CORE
depends on ARC || ARCH_ROCKCHIP || COMPILE_TEST
select MII
select PHYLIB
+ select CRC32
config ARC_EMAC
tristate "ARC EMAC support"
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 77298f6cef2f..f3884dc5524e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1671,22 +1671,21 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
u8 idx;
- spin_lock(&tx->lock);
-
for (idx = 0; idx < tx->len; idx++) {
u8 phy_idx = idx + tx->quad_offset;
- /* Clear any potential residual timestamp in the PHY block */
- if (!pf->hw.reset_ongoing)
- ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
-
+ spin_lock(&tx->lock);
if (tx->tstamps[idx].skb) {
dev_kfree_skb_any(tx->tstamps[idx].skb);
tx->tstamps[idx].skb = NULL;
}
- }
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
- spin_unlock(&tx->lock);
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+ }
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index cf97985628ab..02e77ffe5c3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -155,6 +155,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
int err;
+ mlx5_debug_cq_remove(dev, cq);
+
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
mlx5_eq_del_cq(&cq->eq->core, cq);
@@ -162,16 +164,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
err = mlx5_cmd_exec_in(dev, destroy_cq, in);
- if (err)
- return err;
synchronize_irq(cq->irqn);
- mlx5_debug_cq_remove(dev, cq);
mlx5_cq_put(cq);
wait_for_completion(&cq->free);
- return 0;
+ return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index b5ddaa82755f..c6d2f8c78db7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -475,9 +475,6 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
goto err_alloc_wq;
}
- INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
- queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
- msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
err = register_switchdev_notifier(&br_offloads->nb);
@@ -500,6 +497,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
err);
goto err_register_netdev;
}
+ INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
return;
err_register_netdev:
@@ -523,10 +523,10 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
if (!br_offloads)
return;
+ cancel_delayed_work_sync(&br_offloads->update_work);
unregister_netdevice_notifier(&br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
- cancel_delayed_work(&br_offloads->update_work);
destroy_workqueue(br_offloads->wq);
rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e81e5505207c..6d8fde1314e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3032,8 +3032,8 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
agg_count += mqprio->qopt.count[i];
}
- if (priv->channels.params.num_channels < agg_count) {
- netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n",
+ if (priv->channels.params.num_channels != agg_count) {
+ netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
agg_count, priv->channels.params.num_channels);
return -EINVAL;
}
@@ -3406,20 +3406,67 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable);
}
+static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
+ bool supported, curr_state;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, ports_check))
+ return 0;
+
+ err = mlx5_query_ports_check(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
+ curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
+
+ if (!supported || enable == curr_state)
+ return 0;
+
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+ MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
+
+ return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_channels *chs = &priv->channels;
+ struct mlx5_core_dev *mdev = priv->mdev;
int err;
mutex_lock(&priv->state_lock);
- priv->channels.params.scatter_fcs_en = enable;
- err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
- if (err)
- priv->channels.params.scatter_fcs_en = !enable;
+ if (enable) {
+ err = mlx5e_set_rx_port_ts(mdev, false);
+ if (err)
+ goto out;
- mutex_unlock(&priv->state_lock);
+ chs->params.scatter_fcs_en = true;
+ err = mlx5e_modify_channels_scatter_fcs(chs, true);
+ if (err) {
+ chs->params.scatter_fcs_en = false;
+ mlx5e_set_rx_port_ts(mdev, true);
+ }
+ } else {
+ chs->params.scatter_fcs_en = false;
+ err = mlx5e_modify_channels_scatter_fcs(chs, false);
+ if (err) {
+ chs->params.scatter_fcs_en = true;
+ goto out;
+ }
+ err = mlx5e_set_rx_port_ts(mdev, true);
+ if (err) {
+ mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
+ err = 0;
+ }
+ }
+out:
+ mutex_unlock(&priv->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3dd1101cc693..0684ac6699b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -618,6 +618,11 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
+ /* Set an initial non-zero value, so that mlx5e_select_queue won't
+ * divide by zero if called before first activating channels.
+ */
+ priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
+
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -643,7 +648,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
- netdev->features |= NETIF_F_VLAN_CHALLENGED;
netdev->features |= NETIF_F_NETNS_LOCAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 0998dcc9cac0..b29824448aa8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -24,16 +24,8 @@
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
+#define MLXSW_THERMAL_MIN_STATE 2
#define MLXSW_THERMAL_MAX_DUTY 255
-/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
- * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
- * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
- * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
- * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
- */
-#define MLXSW_THERMAL_SPEED_MIN (MLXSW_THERMAL_MAX_STATE + 2)
-#define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2)
-#define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */
/* External cooling devices, allowed for binding to mlxsw thermal zones. */
static char * const mlxsw_thermal_external_allowed_cdev[] = {
@@ -646,49 +638,16 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
struct mlxsw_thermal *thermal = cdev->devdata;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- unsigned long cur_state, i;
int idx;
- u8 duty;
int err;
+ if (state > MLXSW_THERMAL_MAX_STATE)
+ return -EINVAL;
+
idx = mlxsw_get_cooling_device_idx(thermal, cdev);
if (idx < 0)
return idx;
- /* Verify if this request is for changing allowed fan dynamical
- * minimum. If it is - update cooling levels accordingly and update
- * state, if current state is below the newly requested minimum state.
- * For example, if current state is 5, and minimal state is to be
- * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
- * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
- * overwritten.
- */
- if (state >= MLXSW_THERMAL_SPEED_MIN &&
- state <= MLXSW_THERMAL_SPEED_MAX) {
- state -= MLXSW_THERMAL_MAX_STATE;
- for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(state, i);
-
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
- err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
- if (err)
- return err;
-
- duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
- cur_state = mlxsw_duty_to_state(duty);
-
- /* If current fan state is lower than requested dynamical
- * minimum, increase fan speed up to dynamical minimum.
- */
- if (state < cur_state)
- return 0;
-
- state = cur_state;
- }
-
- if (state > MLXSW_THERMAL_MAX_STATE)
- return -EINVAL;
-
/* Normalize the state to the valid speed range. */
state = thermal->cooling_levels[state];
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
@@ -998,8 +957,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
/* Initialize cooling levels per PWM state. */
for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
- i);
+ thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
thermal->polling_delay = bus_info->low_frequency ?
MLXSW_THERMAL_SLOW_POLL_INT :
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 796e46a53926..81a8ccca7e5e 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -497,13 +497,19 @@ static struct regmap_bus phymap_encx24j600 = {
.reg_read = regmap_encx24j600_phy_reg_read,
};
-void devm_regmap_init_encx24j600(struct device *dev,
- struct encx24j600_context *ctx)
+int devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx)
{
mutex_init(&ctx->mutex);
regcfg.lock_arg = ctx;
ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+ if (IS_ERR(ctx->phymap))
+ return PTR_ERR(ctx->phymap);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index d4680113e249..79167c3a3179 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1023,10 +1023,13 @@ static int encx24j600_spi_probe(struct spi_device *spi)
priv->speed = SPEED_100;
priv->ctx.spi = spi;
- devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
ndev->irq = spi->irq;
ndev->netdev_ops = &encx24j600_netdev_ops;
+ ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+ if (ret)
+ goto out_free;
+
mutex_init(&priv->lock);
/* Reset device and check if it is connected */
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index fac61a8fbd02..34c5a289898c 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -15,8 +15,8 @@ struct encx24j600_context {
int bank;
};
-void devm_regmap_init_encx24j600(struct device *dev,
- struct encx24j600_context *ctx);
+int devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx);
/* Single-byte instructions */
#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 9a871192ca96..d65697c239c8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1477,8 +1477,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (err)
goto out;
- if (cq->gdma_id >= gc->max_num_cqs)
+ if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
+ err = -EINVAL;
goto out;
+ }
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 05c456dbdd72..520a75b57866 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -472,9 +472,9 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
!(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
ocelot_port_rmwl(ocelot_port,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG);
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
@@ -569,49 +569,44 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
-static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
+static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ocelot->ts_id_lock, flags);
- spin_lock(&ocelot_port->ts_id_lock);
+ if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+ ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ return -EBUSY;
+ }
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
- ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
- spin_unlock(&ocelot_port->ts_id_lock);
-}
+ ocelot_port->ts_id++;
+ if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+ ocelot_port->ts_id = 0;
-u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
- struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
- u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
- u32 rew_op = 0;
+ ocelot_port->ptp_skbs_in_flight++;
+ ocelot->ptp_skbs_in_flight++;
- if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
- rew_op = ptp_cmd;
- rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
- } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- rew_op = ptp_cmd;
- }
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
- return rew_op;
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+
+ return 0;
}
-EXPORT_SYMBOL(ocelot_ptp_rew_op);
-static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb)
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
+ unsigned int ptp_class)
{
struct ptp_header *hdr;
- unsigned int ptp_class;
u8 msgtype, twostep;
- ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return false;
-
hdr = ptp_parse_header(skb, ptp_class);
if (!hdr)
return false;
@@ -631,10 +626,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 ptp_cmd = ocelot_port->ptp_cmd;
+ unsigned int ptp_class;
+ int err;
+
+ /* Don't do anything if PTP timestamping not enabled */
+ if (!ptp_cmd)
+ return 0;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- if (ocelot_ptp_is_onestep_sync(skb)) {
+ if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
return 0;
}
@@ -648,8 +653,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (!(*clone))
return -ENOMEM;
- ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ if (err)
+ return err;
+
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
}
return 0;
@@ -683,6 +692,17 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
+static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+{
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+ if (WARN_ON(!hdr))
+ return false;
+
+ return seqid == ntohs(hdr->sequence_id);
+}
+
void ocelot_get_txtstamp(struct ocelot *ocelot)
{
int budget = OCELOT_PTP_QUEUE_SZ;
@@ -690,10 +710,10 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
while (budget--) {
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
+ u32 val, id, seqid, txport;
struct ocelot_port *port;
struct timespec64 ts;
unsigned long flags;
- u32 val, id, txport;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
@@ -706,10 +726,17 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Retrieve the ts ID and Tx port */
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
- /* Retrieve its associated skb */
port = ocelot->ports[txport];
+ spin_lock(&ocelot->ts_id_lock);
+ port->ptp_skbs_in_flight--;
+ ocelot->ptp_skbs_in_flight--;
+ spin_unlock(&ocelot->ts_id_lock);
+
+ /* Retrieve its associated skb */
+try_again:
spin_lock_irqsave(&port->tx_skbs.lock, flags);
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
@@ -722,12 +749,20 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+ if (WARN_ON(!skb_match))
+ continue;
+
+ if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+ dev_err_ratelimited(ocelot->dev,
+ "port %d received stale TX timestamp for seqid %d, discarding\n",
+ txport, seqid);
+ dev_kfree_skb_any(skb);
+ goto try_again;
+ }
+
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
- if (unlikely(!skb_match))
- continue;
-
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -1948,7 +1983,6 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
struct ocelot_port *ocelot_port = ocelot->ports[port];
skb_queue_head_init(&ocelot_port->tx_skbs);
- spin_lock_init(&ocelot_port->ts_id_lock);
/* Basic L2 initialization */
@@ -2081,6 +2115,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
+ spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 2f2312715439..9992bf06311d 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -8,6 +8,7 @@
* Copyright 2020-2021 NXP
*/
+#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <linux/of_net.h>
#include <linux/phy/phy.h>
@@ -1625,7 +1626,7 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
ocelot_port_rmwl(ocelot_port, 0,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG);
ocelot_port->phy_mode = phy_mode;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index ea0b2f3fd9c6..d1c32c65db05 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8566,7 +8566,7 @@ static void s2io_io_resume(struct pci_dev *pdev)
return;
}
- if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
+ if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
s2io_card_down(sp);
pr_err("Can't restore mac addr after reset.\n");
return;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index c029950a81e2..ac1dcfa1d179 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -830,10 +830,6 @@ static int nfp_flower_init(struct nfp_app *app)
if (err)
goto err_cleanup;
- err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
- if (err)
- goto err_cleanup;
-
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_init(app);
@@ -942,7 +938,20 @@ static int nfp_flower_start(struct nfp_app *app)
return err;
}
- return nfp_tunnel_config_start(app);
+ err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
+ if (err)
+ return err;
+
+ err = nfp_tunnel_config_start(app);
+ if (err)
+ goto err_tunnel_config;
+
+ return 0;
+
+err_tunnel_config:
+ flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+ nfp_flower_setup_indr_tc_release);
+ return err;
}
static void nfp_flower_stop(struct nfp_app *app)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 893a80e36632..63f8a8163b5f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1249,6 +1249,10 @@ static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
+ /* Don't delete our own address from the uc list */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 29f9711bf438..7673b3e07736 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1295,6 +1295,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
} else {
DP_NOTICE(cdev,
"Failed to acquire PTT for aRFS\n");
+ rc = -EINVAL;
goto err;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index fbfda55b4c52..5e731a72cce8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -71,6 +71,7 @@ err_remove_config_dt:
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.40a"},
{ .compatible = "snps,dwmac-3.50a"},
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 90383abafa66..f5581db0ba9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -218,11 +218,18 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
readl(ioaddr + DMA_BUS_MODE + i * 4);
}
-static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
+ if (!hw_cap) {
+ /* 0x00000000 is the value read on old hardware that does not
+ * implement this register
+ */
+ return -EOPNOTSUPP;
+ }
+
dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
@@ -252,6 +259,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
/* Alternate (enhanced) DESC mode */
dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+
+ return 0;
}
static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 5be8e6a631d9..d99fa028c646 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -347,8 +347,8 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
}
-static void dwmac4_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwmac4_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
@@ -437,6 +437,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
+
+ return 0;
}
/* Enable/disable TSO feature and set MSS */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 906e985441a9..5e98355f422b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -371,8 +371,8 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap;
@@ -445,6 +445,8 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
+
+ return 0;
}
static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 6cf2c2107197..f7dc447f05a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -203,8 +203,8 @@ struct stmmac_dma_ops {
int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir);
/* If supported then get the optional core features */
- void (*get_hw_feature)(void __iomem *ioaddr,
- struct dma_features *dma_cap);
+ int (*get_hw_feature)(void __iomem *ioaddr,
+ struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue);
void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
@@ -255,7 +255,7 @@ struct stmmac_dma_ops {
#define stmmac_dma_interrupt_status(__priv, __args...) \
stmmac_do_callback(__priv, dma, dma_interrupt, __args)
#define stmmac_get_hw_feature(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, get_hw_feature, __args)
+ stmmac_do_callback(__priv, dma, get_hw_feature, __args)
#define stmmac_rx_watchdog(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
#define stmmac_set_tx_ring_len(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 62cec9bfcd33..232ac98943cd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -508,6 +508,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
}
+ if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
+ plat->has_gmac = 1;
+ plat->enh_desc = 1;
+ plat->tx_coe = 1;
+ plat->bugged_jumbo = 1;
+ plat->pmt = 1;
+ }
+
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") ||
of_device_is_compatible(np, "snps,dwmac-4.20a") ||
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 8f99cfa14680..d037682fb7ad 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -4,6 +4,7 @@ config QCOM_IPA
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM
select QCOM_QMI_HELPERS
help
Choose Y or M here to include support for the Qualcomm
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ba295b8ad8fe..74d8e1dc125f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3125,6 +3125,9 @@ static void phy_shutdown(struct device *dev)
{
struct phy_device *phydev = to_phy_device(dev);
+ if (phydev->state == PHY_READY || !phydev->attached_dev)
+ return;
+
phy_disable_interrupts(phydev);
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 4c5d69732a7e..f87f17503373 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -99,6 +99,10 @@ config USB_RTL8150
config USB_RTL8152
tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
select MII
+ select CRC32
+ select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_SHA256
help
This option adds support for Realtek RTL8152 based USB 2.0
10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 74d2be438180..c501b5974aee 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -408,7 +408,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
* add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
truesize = headroom ? PAGE_SIZE : truesize;
- tailroom = truesize - len - headroom;
+ tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
buf = p - headroom;
len -= hdr_len;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 741289e385d5..ca007b800f75 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -44,7 +44,7 @@ config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support"
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
- depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
+ select QCOM_SCM
select QCOM_QMI_HELPERS
help
This module adds support for integrated WCN3990 chip connected
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f720c0d246f2..0ac17256258d 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -36,6 +36,7 @@ LIST_HEAD(aliases_lookup);
struct device_node *of_root;
EXPORT_SYMBOL(of_root);
struct device_node *of_chosen;
+EXPORT_SYMBOL(of_chosen);
struct device_node *of_aliases;
struct device_node *of_stdout;
static const char *of_stdout_options;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 014868752cd4..dcefdb42ac46 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -62,14 +62,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
- switch (zdev->state) {
- case ZPCI_FN_STATE_STANDBY:
- *value = 0;
- break;
- default:
- *value = 1;
- break;
- }
+ *value = zpci_is_device_configured(zdev) ? 1 : 0;
return 0;
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0f40943a9a18..260a06fb78a6 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1249,6 +1249,9 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
bool check_children;
u64 addr;
+ if (!dev->parent)
+ return NULL;
+
down_read(&pci_acpi_companion_lookup_sem);
adev = pci_acpi_find_companion_hook ?
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 32ea2a8ec02b..5ff4207df66e 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -3,7 +3,8 @@ if (ARCH_QCOM || COMPILE_TEST)
config PINCTRL_MSM
tristate "Qualcomm core pin controller driver"
- depends on GPIOLIB && (QCOM_SCM || !QCOM_SCM) #if QCOM_SCM=m this can't be =y
+ depends on GPIOLIB
+ select QCOM_SCM
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index 7646708d57e4..a916cd89cbbe 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -98,7 +98,7 @@ mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
if (ret)
goto access_error;
- *regval |= rol32(val, regsize * i);
+ *regval |= rol32(val, regsize * i * 8);
}
}
@@ -141,7 +141,7 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
/* Convert buffer to input value. */
- ret = kstrtou32(buf, len, &input_val);
+ ret = kstrtou32(buf, 0, &input_val);
if (ret)
return ret;
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index d6a7c896ac86..fc95620101e8 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -476,6 +476,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0006", 0},
{"AMDI0007", 0},
{"AMD0004", 0},
+ {"AMD0005", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 42513eab1d06..2fffa57e596e 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -167,6 +167,7 @@ config DELL_WMI
config DELL_WMI_PRIVACY
bool "Dell WMI Hardware Privacy Support"
depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
+ depends on DELL_WMI
help
This option adds integration with the "Dell Hardware Privacy"
feature of Dell laptops to the dell-wmi driver.
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index d53634c8a6e0..658bab4b7964 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c
index 379560fe5df9..e03943e6380a 100644
--- a/drivers/platform/x86/intel/int1092/intel_sar.c
+++ b/drivers/platform/x86/intel/int1092/intel_sar.c
@@ -42,12 +42,20 @@ static void update_sar_data(struct wwan_sar_context *context)
if (config->device_mode_info &&
context->sar_data.device_mode < config->total_dev_mode) {
- struct wwan_device_mode_info *dev_mode =
- &config->device_mode_info[context->sar_data.device_mode];
-
- context->sar_data.antennatable_index = dev_mode->antennatable_index;
- context->sar_data.bandtable_index = dev_mode->bandtable_index;
- context->sar_data.sartable_index = dev_mode->sartable_index;
+ int itr = 0;
+
+ for (itr = 0; itr < config->total_dev_mode; itr++) {
+ if (context->sar_data.device_mode ==
+ config->device_mode_info[itr].device_mode) {
+ struct wwan_device_mode_info *dev_mode =
+ &config->device_mode_info[itr];
+
+ context->sar_data.antennatable_index = dev_mode->antennatable_index;
+ context->sar_data.bandtable_index = dev_mode->bandtable_index;
+ context->sar_data.sartable_index = dev_mode->sartable_index;
+ break;
+ }
+ }
}
}
@@ -305,7 +313,6 @@ static struct platform_driver sar_driver = {
.remove = sar_remove,
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(sar_device_ids)
}
};
@@ -313,4 +320,4 @@ module_platform_driver(sar_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
-MODULE_AUTHOR("Shravan S <s.shravan@intel.com>");
+MODULE_AUTHOR("Shravan Sudhakar <s.shravan@intel.com>");
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c b/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
index 9fe0a2527e1c..e59d79c7e82f 100644
--- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
+++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
@@ -401,7 +401,7 @@ int skl_int3472_discrete_remove(struct platform_device *pdev)
gpiod_remove_lookup_table(&int3472->gpios);
- if (int3472->clock.ena_gpio)
+ if (int3472->clock.cl)
skl_int3472_unregister_clock(int3472);
gpiod_put(int3472->clock.ena_gpio);
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index bfa0cc20750d..7cc9089d1e14 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -75,7 +75,7 @@ struct intel_scu_ipc_dev {
#define IPC_READ_BUFFER 0x90
/* Timeout in jiffies */
-#define IPC_TIMEOUT (5 * HZ)
+#define IPC_TIMEOUT (10 * HZ)
static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
@@ -232,7 +232,7 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
+ unsigned long end = jiffies + IPC_TIMEOUT;
do {
u32 status;
@@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
return -ETIMEDOUT;
}
-/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
+/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index b4cb5fb19998..0cc62c1b0825 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1776,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
host->scsi.disconnectable = 0;
if (host->SCpnt->device->id == host->scsi.reconnected.target &&
host->SCpnt->device->lun == host->scsi.reconnected.lun &&
- scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) {
+ scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) {
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
host->host->host_no, acornscsi_target(host)));
diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
index 40fb3a724c76..cf2e41dd354c 100644
--- a/drivers/scsi/elx/efct/efct_scsi.c
+++ b/drivers/scsi/elx/efct/efct_scsi.c
@@ -32,7 +32,7 @@ efct_scsi_io_alloc(struct efct_node *node)
struct efct *efct;
struct efct_xport *xport;
struct efct_io *io;
- unsigned long flags = 0;
+ unsigned long flags;
efct = node->efct;
@@ -44,7 +44,6 @@ efct_scsi_io_alloc(struct efct_node *node)
if (!io) {
efc_log_err(efct, "IO alloc Failed\n");
atomic_add_return(1, &xport->io_alloc_failed_count);
- spin_unlock_irqrestore(&node->active_ios_lock, flags);
return NULL;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4683c183e9d4..5bc91d34df63 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2281,11 +2281,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return FAILED;
}
- conn = session->leadconn;
- iscsi_get_conn(conn->cls_conn);
- conn->eh_abort_cnt++;
- age = session->age;
-
spin_lock(&session->back_lock);
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task || !task->sc) {
@@ -2293,8 +2288,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
spin_unlock(&session->back_lock);
- goto success;
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
}
+
+ conn = session->leadconn;
+ iscsi_get_conn(conn->cls_conn);
+ conn->eh_abort_cnt++;
+ age = session->age;
+
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
__iscsi_get_task(task);
spin_unlock(&session->back_lock);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 78ce38d7251c..026a1196a54d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -12292,12 +12292,12 @@ void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = NULL;
IOCB_t *irsp = &rspiocb->iocb;
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0139 Ignoring ELS cmd tag x%x completion Data: "
+ "0139 Ignoring ELS cmd code x%x completion Data: "
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -12305,10 +12305,13 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
* if exchange is busy.
*/
- if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+ if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+ ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
- else
+ } else {
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
lpfc_els_free_iocb(phba, cmdiocb);
+ }
lpfc_nlp_put(ndlp);
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 188de6f91050..95be7ecdfe10 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6377,27 +6377,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
return retval;
}
-struct ctm_info {
- struct ufs_hba *hba;
- unsigned long pending;
- unsigned int ncpl;
-};
-
-static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
-{
- struct ctm_info *const ci = priv;
- struct completion *c;
-
- WARN_ON_ONCE(reserved);
- if (test_bit(req->tag, &ci->pending))
- return true;
- ci->ncpl++;
- c = req->end_io_data;
- if (c)
- complete(c);
- return true;
-}
-
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
@@ -6408,18 +6387,24 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
- unsigned long flags;
- struct request_queue *q = hba->tmf_queue;
- struct ctm_info ci = {
- .hba = hba,
- };
+ unsigned long flags, pending, issued;
+ irqreturn_t ret = IRQ_NONE;
+ int tag;
+
+ pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
spin_lock_irqsave(hba->host->host_lock, flags);
- ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+ issued = hba->outstanding_tasks & ~pending;
+ for_each_set_bit(tag, &issued, hba->nutmrs) {
+ struct request *req = hba->tmf_rqs[tag];
+ struct completion *c = req->end_io_data;
+
+ complete(c);
+ ret = IRQ_HANDLED;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
- return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
+ return ret;
}
/**
@@ -6542,9 +6527,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
- blk_mq_start_request(req);
task_tag = req->tag;
+ hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
@@ -6585,6 +6570,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
}
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->tmf_rqs[req->tag] = NULL;
__clear_bit(task_tag, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -9635,6 +9621,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set;
}
+ hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
+ sizeof(*hba->tmf_rqs), GFP_KERNEL);
+ if (!hba->tmf_rqs) {
+ err = -ENOMEM;
+ goto free_tmf_queue;
+ }
/* Reset the attached device */
ufshcd_device_reset(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index f0da5d3db1fa..41f6e06f9185 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -828,6 +828,7 @@ struct ufs_hba {
struct blk_mq_tag_set tmf_tag_set;
struct request_queue *tmf_queue;
+ struct request **tmf_rqs;
struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex;
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index da19d7987d00..78fd365893c1 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -7,6 +7,7 @@ thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
+CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o
obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 8f143c09a169..f0bf01ea069a 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -618,10 +618,8 @@ static int __init xenboot_console_setup(struct console *console, char *string)
{
static struct xencons_info xenboot;
- if (xen_initial_domain())
+ if (xen_initial_domain() || !xen_pv_domain())
return 0;
- if (!xen_pv_domain())
- return -ENODEV;
return xencons_info_pv_init(&xenboot, 0);
}
@@ -632,17 +630,16 @@ static void xenboot_write_console(struct console *console, const char *string,
unsigned int linelen, off = 0;
const char *pos;
+ if (dom0_write_console(0, string, len) >= 0)
+ return;
+
if (!xen_pv_domain()) {
xen_hvm_early_write(0, string, len);
return;
}
- dom0_write_console(0, string, len);
-
- if (xen_initial_domain())
+ if (domU_write_console(0, "(early) ", 8) < 0)
return;
-
- domU_write_console(0, "(early) ", 8);
while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
linelen = pos-string+off;
if (off + linelen > len)
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 8b7bc10b6e8b..f1d100671ee6 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -420,11 +420,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
- /* Return -EINVAL if no usbphy is available */
- if (ret == -ENODEV)
- data->phy = NULL;
- else
- goto err_clk;
+ if (ret == -ENODEV) {
+ data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+ if (IS_ERR(data->phy)) {
+ ret = PTR_ERR(data->phy);
+ if (ret == -ENODEV)
+ data->phy = NULL;
+ else
+ goto err_clk;
+ }
+ }
}
pdata.usb_phy = data->phy;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 4e2f1552f4b7..7b2e2420ecae 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -340,6 +340,9 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
acm->iocount.overrun++;
spin_unlock_irqrestore(&acm->read_lock, flags);
+ if (newctrl & ACM_CTRL_BRK)
+ tty_flip_buffer_push(&acm->port);
+
if (difference)
wake_up_all(&acm->wioctl);
@@ -475,11 +478,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
static void acm_process_read_urb(struct acm *acm, struct urb *urb)
{
+ unsigned long flags;
+
if (!urb->actual_length)
return;
+ spin_lock_irqsave(&acm->read_lock, flags);
tty_insert_flip_string(&acm->port, urb->transfer_buffer,
urb->actual_length);
+ spin_unlock_irqrestore(&acm->read_lock, flags);
+
tty_flip_buffer_push(&acm->port);
}
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 35d5908b5478..fdf79bcf7eb0 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
};
/* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN_CORE
+#ifdef CONFIG_WWAN
static int wdm_wwan_port_start(struct wwan_port *port)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
/* inbuf has been copied, it is safe to check for outstanding data */
schedule_work(&desc->service_outs_intr);
}
-#else /* CONFIG_WWAN_CORE */
+#else /* CONFIG_WWAN */
static void wdm_wwan_init(struct wdm_device *desc) {}
static void wdm_wwan_deinit(struct wdm_device *desc) {}
static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN_CORE */
+#endif /* CONFIG_WWAN */
/* --- error handling --- */
static void wdm_rxwork(struct work_struct *work)
diff --git a/drivers/usb/common/Kconfig b/drivers/usb/common/Kconfig
index 5e8a04e3dd3c..b856622431a7 100644
--- a/drivers/usb/common/Kconfig
+++ b/drivers/usb/common/Kconfig
@@ -6,8 +6,7 @@ config USB_COMMON
config USB_LED_TRIG
bool "USB LED Triggers"
- depends on LEDS_CLASS && LEDS_TRIGGERS
- select USB_COMMON
+ depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
help
This option adds LED triggers for USB host and/or gadget activity.
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 804b50548163..4519d06c9ca2 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4243,7 +4243,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
}
- usb_initialize_gadget(dwc->sysdev, dwc->gadget, dwc_gadget_release);
+ usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
dev = &dwc->gadget->dev;
dev->platform_data = dwc;
dwc->gadget->ops = &dwc3_gadget_ops;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index be864560bfea..ef55b8bb5870 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -674,11 +674,17 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC))
+ if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC)) {
+ // Win10 requires max packet size + 1 frame
srate = srate * (1000 + uac2_opts->fb_max) / 1000;
-
- max_size_bw = num_channels(chmask) * ssize *
- DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
+ // updated srate is always bigger, therefore DIV_ROUND_UP always yields +1
+ max_size_bw = num_channels(chmask) * ssize *
+ (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))));
+ } else {
+ // adding 1 frame provision for Win10
+ max_size_bw = num_channels(chmask) * ssize *
+ (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))) + 1);
+ }
ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
max_size_ep));
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 575fa89a783f..1bf494b649bd 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1787,7 +1787,6 @@ static int tegra_xusb_remove(struct platform_device *pdev)
return 0;
}
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
{
struct device *dev = hub->hcd->self.controller;
@@ -2102,7 +2101,7 @@ out:
return err;
}
-static int tegra_xusb_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
@@ -2144,7 +2143,7 @@ out:
return err;
}
-static int tegra_xusb_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
@@ -2174,10 +2173,8 @@ static int tegra_xusb_resume(struct device *dev)
return 0;
}
-#endif
-#ifdef CONFIG_PM
-static int tegra_xusb_runtime_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int ret;
@@ -2190,7 +2187,7 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
return ret;
}
-static int tegra_xusb_runtime_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
@@ -2201,7 +2198,6 @@ static int tegra_xusb_runtime_resume(struct device *dev)
return err;
}
-#endif
static const struct dev_pm_ops tegra_xusb_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_xusb_runtime_suspend,
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 9858716698df..c15eec9cc460 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -696,7 +696,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
tcpm_pd_receive(tcpci->port, &msg);
}
- if (status & TCPC_ALERT_EXTENDED_STATUS) {
+ if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
tcpm_vbus_change(tcpci->port);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index a4d37205df54..7f2f3ff1b391 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4876,6 +4876,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
break;
case SRC_ATTACHED:
+ case SRC_STARTUP:
case SRC_SEND_CAPABILITIES:
case SRC_READY:
if (tcpm_port_is_disconnected(port) ||
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 21b3ae25c76d..ea4cc0a6e40c 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -625,10 +625,6 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- fwnode = device_get_named_child_node(&client->dev, "connector");
- if (!fwnode)
- return -ENODEV;
-
/*
* This fwnode has a "compatible" property, but is never populated as a
* struct device. Instead we simply parse it to read the properties.
@@ -636,7 +632,9 @@ static int tps6598x_probe(struct i2c_client *client)
* with existing DT files, we work around this by deleting any
* fwnode_links to/from this fwnode.
*/
- fw_devlink_purge_absent_suppliers(fwnode);
+ fwnode = device_get_named_child_node(&client->dev, "connector");
+ if (fwnode)
+ fw_devlink_purge_absent_suppliers(fwnode);
tps->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(tps->role_sw)) {
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index b26b79dfcac9..6ed5e608dd04 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2193,8 +2193,9 @@ config FB_HYPERV
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
config FB_SIMPLE
- bool "Simple framebuffer support"
- depends on (FB = y) && !DRM_SIMPLEDRM
+ tristate "Simple framebuffer support"
+ depends on FB
+ depends on !DRM_SIMPLEDRM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index c5b99a4861e8..6b4d5a7f3e15 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1267,7 +1267,7 @@ static struct platform_device *gbefb_device;
static int __init gbefb_init(void)
{
int ret = platform_driver_register(&gbefb_driver);
- if (!ret) {
+ if (IS_ENABLED(CONFIG_SGI_IP32) && !ret) {
gbefb_device = platform_device_alloc("gbefb", 0);
if (gbefb_device) {
ret = platform_device_add(gbefb_device);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 22f5aff0c136..1b2c3aca6887 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -241,7 +241,7 @@ config XEN_PRIVCMD
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen
@@ -259,7 +259,7 @@ config XEN_ACPI_PROCESSOR
config XEN_MCE_LOG
bool "Xen platform mcelog"
- depends on XEN_DOM0 && X86_MCE
+ depends on XEN_PV_DOM0 && X86_MCE
help
Allow kernel fetching MCE error from Xen platform and
converting it into Linux mcelog format for mcelog tools
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 43ebfe36ac27..3a50f097ed3e 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -491,12 +491,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
/*
- * Stop waiting if either state is not BP_EAGAIN and ballooning action is
- * needed, or if the credit has changed while state is BP_EAGAIN.
+ * Stop waiting if either state is BP_DONE and ballooning action is
+ * needed, or if the credit has changed while state is not BP_DONE.
*/
static bool balloon_thread_cond(enum bp_state state, long credit)
{
- if (state != BP_EAGAIN)
+ if (state == BP_DONE)
credit = 0;
return current_credit() != credit || kthread_should_stop();
@@ -516,10 +516,19 @@ static int balloon_thread(void *unused)
set_freezable();
for (;;) {
- if (state == BP_EAGAIN)
- timeout = balloon_stats.schedule_delay * HZ;
- else
+ switch (state) {
+ case BP_DONE:
+ case BP_ECANCELED:
timeout = 3600 * HZ;
+ break;
+ case BP_EAGAIN:
+ timeout = balloon_stats.schedule_delay * HZ;
+ break;
+ case BP_WAIT:
+ timeout = HZ;
+ break;
+ }
+
credit = current_credit();
wait_event_freezable_timeout(balloon_thread_wq,
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 720a7b7abd46..3369734108af 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -257,7 +257,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
LIST_HEAD(pagelist);
struct mmap_gfn_state state;
- /* We only support privcmd_ioctl_mmap_batch for auto translated. */
+ /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
if (xen_feature(XENFEAT_auto_translated_physmap))
return -ENOSYS;
@@ -420,7 +420,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
int rc;
struct page **pages;
- pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
+ pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
if (pages == NULL)
return -ENOMEM;
@@ -428,7 +428,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
if (rc != 0) {
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
numpgs, rc);
- kfree(pages);
+ kvfree(pages);
return -ENOMEM;
}
BUG_ON(vma->vm_private_data != NULL);
@@ -803,21 +803,21 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
unsigned int domid =
(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
DOMID_SELF : kdata.dom;
- int num;
+ int num, *errs = (int *)pfns;
+ BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
num = xen_remap_domain_mfn_array(vma,
kdata.addr & PAGE_MASK,
- pfns, kdata.num, (int *)pfns,
+ pfns, kdata.num, errs,
vma->vm_page_prot,
- domid,
- vma->vm_private_data);
+ domid);
if (num < 0)
rc = num;
else if (num != kdata.num) {
unsigned int i;
for (i = 0; i < num; i++) {
- rc = pfns[i];
+ rc = errs[i];
if (rc < 0)
break;
}
@@ -912,7 +912,7 @@ static void privcmd_close(struct vm_area_struct *vma)
else
pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
numpgs, rc);
- kfree(pages);
+ kvfree(pages);
}
static vm_fault_t privcmd_fault(struct vm_fault *vmf)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index dff2c8a3e059..c0cebcf745ce 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3030,7 +3030,7 @@ struct btrfs_dir_item *
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
- u64 objectid, const char *name, int name_len,
+ u64 index, const char *name, int name_len,
int mod);
struct btrfs_dir_item *
btrfs_search_dir_index_item(struct btrfs_root *root,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index f1274d5c3805..7721ce0c0604 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -190,9 +190,20 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
}
/*
- * lookup a directory item based on name. 'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory item by name.
+ *
+ * @trans: The transaction handle to use. Can be NULL if @mod is 0.
+ * @root: The root of the target tree.
+ * @path: Path to use for the search.
+ * @dir: The inode number (objectid) of the directory.
+ * @name: The name associated to the directory entry we are looking for.
+ * @name_len: The length of the name.
+ * @mod: Used to indicate if the tree search is meant for a read only
+ * lookup, for a modification lookup or for a deletion lookup, so
+ * its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir item does not exists, an error pointer if an error
+ * happened, or a pointer to a dir item if a dir item exists for the given name.
*/
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -273,27 +284,42 @@ out:
}
/*
- * lookup a directory item based on index. 'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory index item by name and index number.
*
- * The name is used to make sure the index really points to the name you were
- * looking for.
+ * @trans: The transaction handle to use. Can be NULL if @mod is 0.
+ * @root: The root of the target tree.
+ * @path: Path to use for the search.
+ * @dir: The inode number (objectid) of the directory.
+ * @index: The index number.
+ * @name: The name associated to the directory entry we are looking for.
+ * @name_len: The length of the name.
+ * @mod: Used to indicate if the tree search is meant for a read only
+ * lookup, for a modification lookup or for a deletion lookup, so
+ * its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir index item does not exists, an error pointer if an
+ * error happened, or a pointer to a dir item if the dir index item exists and
+ * matches the criteria (name and index number).
*/
struct btrfs_dir_item *
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
- u64 objectid, const char *name, int name_len,
+ u64 index, const char *name, int name_len,
int mod)
{
+ struct btrfs_dir_item *di;
struct btrfs_key key;
key.objectid = dir;
key.type = BTRFS_DIR_INDEX_KEY;
- key.offset = objectid;
+ key.offset = index;
- return btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ if (di == ERR_PTR(-ENOENT))
+ return NULL;
+
+ return di;
}
struct btrfs_dir_item *
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index fc3da7585fb7..0ab456cb4bf8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4859,6 +4859,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
out_free_delayed:
btrfs_free_delayed_extent_op(extent_op);
out_free_buf:
+ btrfs_tree_unlock(buf);
free_extent_buffer(buf);
out_free_reserved:
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 7ff577005d0f..a1762363f61f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -734,8 +734,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
if (args->start >= inode->disk_i_size && !args->replace_extent)
modify_tree = 0;
- update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
- root == fs_info->tree_root);
+ update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -2704,14 +2703,16 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
drop_args.bytes_found);
if (ret != -ENOSPC) {
/*
- * When cloning we want to avoid transaction aborts when
- * nothing was done and we are attempting to clone parts
- * of inline extents, in such cases -EOPNOTSUPP is
- * returned by __btrfs_drop_extents() without having
- * changed anything in the file.
+ * The only time we don't want to abort is if we are
+ * attempting to clone a partial inline extent, in which
+ * case we'll get EOPNOTSUPP. However if we aren't
+ * clone we need to abort no matter what, because if we
+ * got EOPNOTSUPP via prealloc then we messed up and
+ * need to abort.
*/
- if (extent_info && !extent_info->is_new_extent &&
- ret && ret != -EOPNOTSUPP)
+ if (ret &&
+ (ret != -EOPNOTSUPP ||
+ (extent_info && extent_info->is_new_extent)))
btrfs_abort_transaction(trans, ret);
break;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f7efc26aa82a..b415c5ec03ea 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -939,9 +939,11 @@ out:
}
/*
- * helper function to see if a given name and sequence number found
- * in an inode back reference are already in a directory and correctly
- * point to this inode
+ * See if a given name and sequence number found in an inode back reference are
+ * already in a directory and correctly point to this inode.
+ *
+ * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
+ * exists.
*/
static noinline int inode_in_dir(struct btrfs_root *root,
struct btrfs_path *path,
@@ -950,29 +952,34 @@ static noinline int inode_in_dir(struct btrfs_root *root,
{
struct btrfs_dir_item *di;
struct btrfs_key location;
- int match = 0;
+ int ret = 0;
di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
index, name, name_len, 0);
- if (di && !IS_ERR(di)) {
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ } else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid != objectid)
goto out;
- } else
+ } else {
goto out;
- btrfs_release_path(path);
+ }
+ btrfs_release_path(path);
di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
- if (di && !IS_ERR(di)) {
- btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
- if (location.objectid != objectid)
- goto out;
- } else
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
goto out;
- match = 1;
+ } else if (di) {
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
+ if (location.objectid == objectid)
+ ret = 1;
+ }
out:
btrfs_release_path(path);
- return match;
+ return ret;
}
/*
@@ -1182,7 +1189,9 @@ next:
/* look for a conflicting sequence number */
di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
ref_index, name, namelen, 0);
- if (di && !IS_ERR(di)) {
+ if (IS_ERR(di)) {
+ return PTR_ERR(di);
+ } else if (di) {
ret = drop_one_dir_item(trans, root, path, dir, di);
if (ret)
return ret;
@@ -1192,7 +1201,9 @@ next:
/* look for a conflicting name */
di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
name, namelen, 0);
- if (di && !IS_ERR(di)) {
+ if (IS_ERR(di)) {
+ return PTR_ERR(di);
+ } else if (di) {
ret = drop_one_dir_item(trans, root, path, dir, di);
if (ret)
return ret;
@@ -1517,10 +1528,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- /* if we already have a perfect match, we're done */
- if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
- btrfs_ino(BTRFS_I(inode)), ref_index,
- name, namelen)) {
+ ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
+ btrfs_ino(BTRFS_I(inode)), ref_index,
+ name, namelen);
+ if (ret < 0) {
+ goto out;
+ } else if (ret == 0) {
/*
* look for a conflicting back reference in the
* metadata. if we find one we have to unlink that name
@@ -1580,6 +1593,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
}
+ /* Else, ret == 1, we already have a perfect match, we're done. */
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
kfree(name);
@@ -1936,8 +1950,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
struct btrfs_key log_key;
struct inode *dir;
u8 log_type;
- int exists;
- int ret = 0;
+ bool exists;
+ int ret;
bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
bool name_added = false;
@@ -1957,12 +1971,12 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
name_len);
btrfs_dir_item_key_to_cpu(eb, di, &log_key);
- exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
- if (exists == 0)
- exists = 1;
- else
- exists = 0;
+ ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
btrfs_release_path(path);
+ if (ret < 0)
+ goto out;
+ exists = (ret == 0);
+ ret = 0;
if (key->type == BTRFS_DIR_ITEM_KEY) {
dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1977,7 +1991,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
ret = -EINVAL;
goto out;
}
- if (IS_ERR_OR_NULL(dst_di)) {
+
+ if (IS_ERR(dst_di)) {
+ ret = PTR_ERR(dst_di);
+ goto out;
+ } else if (!dst_di) {
/* we need a sequence number to insert, so we only
* do inserts for the BTRFS_DIR_INDEX_KEY types
*/
@@ -2281,7 +2299,7 @@ again:
dir_key->offset,
name, name_len, 0);
}
- if (!log_di || log_di == ERR_PTR(-ENOENT)) {
+ if (!log_di) {
btrfs_dir_item_key_to_cpu(eb, di, &location);
btrfs_release_path(path);
btrfs_release_path(log_path);
@@ -3540,8 +3558,7 @@ out_unlock:
if (err == -ENOSPC) {
btrfs_set_log_full_commit(trans);
err = 0;
- } else if (err < 0 && err != -ENOENT) {
- /* ENOENT can be returned if the entry hasn't been fsynced yet */
+ } else if (err < 0) {
btrfs_abort_transaction(trans, err);
}
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index af086d35398a..48b18b4ec117 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -296,10 +296,12 @@ int ksmbd_conn_handler_loop(void *p)
pdu_size = get_rfc1002_len(hdr_buf);
ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
- /* make sure we have enough to get to SMB header end */
- if (!ksmbd_pdu_size_has_room(pdu_size)) {
- ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
- pdu_size);
+ /*
+ * Check if pdu size is valid (min : smb header size,
+ * max : 0x00FFFFFF).
+ */
+ if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+ pdu_size > MAX_STREAM_PROT_LEN) {
continue;
}
diff --git a/fs/ksmbd/glob.h b/fs/ksmbd/glob.h
index 49a5a3afa118..5b8f3e0ebdb3 100644
--- a/fs/ksmbd/glob.h
+++ b/fs/ksmbd/glob.h
@@ -12,7 +12,7 @@
#include "unicode.h"
#include "vfs_cache.h"
-#define KSMBD_VERSION "3.1.9"
+#define KSMBD_VERSION "3.4.2"
extern int ksmbd_debug_types;
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
index 9aa46bb3e10d..9edd9c161b27 100644
--- a/fs/ksmbd/smb2misc.c
+++ b/fs/ksmbd/smb2misc.c
@@ -80,18 +80,21 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
};
/*
- * Returns the pointer to the beginning of the data area. Length of the data
- * area and the offset to it (from the beginning of the smb are also returned.
+ * Set length of the data area and the offset to arguments.
+ * if they are invalid, return error.
*/
-static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
+static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ struct smb2_hdr *hdr)
{
+ int ret = 0;
+
*off = 0;
*len = 0;
/* error reqeusts do not have data area */
if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
(((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE)
- return NULL;
+ return ret;
/*
* Following commands have data areas so we have to get the location
@@ -165,69 +168,60 @@ static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
case SMB2_IOCTL:
*off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
*len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
-
break;
default:
ksmbd_debug(SMB, "no length check for command\n");
break;
}
- /*
- * Invalid length or offset probably means data area is invalid, but
- * we have little choice but to ignore the data area in this case.
- */
if (*off > 4096) {
- ksmbd_debug(SMB, "offset %d too large, data area ignored\n",
- *off);
- *len = 0;
- *off = 0;
- } else if (*off < 0) {
- ksmbd_debug(SMB,
- "negative offset %d to data invalid ignore data area\n",
- *off);
- *off = 0;
- *len = 0;
- } else if (*len < 0) {
- ksmbd_debug(SMB,
- "negative data length %d invalid, data area ignored\n",
- *len);
- *len = 0;
- } else if (*len > 128 * 1024) {
- ksmbd_debug(SMB, "data area larger than 128K: %d\n", *len);
- *len = 0;
+ ksmbd_debug(SMB, "offset %d too large\n", *off);
+ ret = -EINVAL;
+ } else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) {
+ ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n",
+ MAX_STREAM_PROT_LEN, (u64)*off + *len);
+ ret = -EINVAL;
}
- /* return pointer to beginning of data area, ie offset from SMB start */
- if ((*off != 0) && (*len != 0))
- return (char *)hdr + *off;
- else
- return NULL;
+ return ret;
}
/*
* Calculate the size of the SMB message based on the fixed header
* portion, the number of word parameters and the data portion of the message.
*/
-static unsigned int smb2_calc_size(void *buf)
+static int smb2_calc_size(void *buf, unsigned int *len)
{
struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
struct smb2_hdr *hdr = &pdu->hdr;
- int offset; /* the offset from the beginning of SMB to data area */
- int data_length; /* the length of the variable length data area */
+ unsigned int offset; /* the offset from the beginning of SMB to data area */
+ unsigned int data_length; /* the length of the variable length data area */
+ int ret;
+
/* Structure Size has already been checked to make sure it is 64 */
- int len = le16_to_cpu(hdr->StructureSize);
+ *len = le16_to_cpu(hdr->StructureSize);
/*
* StructureSize2, ie length of fixed parameter area has already
* been checked to make sure it is the correct length.
*/
- len += le16_to_cpu(pdu->StructureSize2);
+ *len += le16_to_cpu(pdu->StructureSize2);
+ /*
+ * StructureSize2 of smb2_lock pdu is set to 48, indicating
+ * the size of smb2 lock request with single smb2_lock_element
+ * regardless of number of locks. Subtract single
+ * smb2_lock_element for correct buffer size check.
+ */
+ if (hdr->Command == SMB2_LOCK)
+ *len -= sizeof(struct smb2_lock_element);
if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
goto calc_size_exit;
- smb2_get_data_area_len(&offset, &data_length, hdr);
- ksmbd_debug(SMB, "SMB2 data length %d offset %d\n", data_length,
+ ret = smb2_get_data_area_len(&offset, &data_length, hdr);
+ if (ret)
+ return ret;
+ ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length,
offset);
if (data_length > 0) {
@@ -237,16 +231,19 @@ static unsigned int smb2_calc_size(void *buf)
* for some commands, typically those with odd StructureSize,
* so we must add one to the calculation.
*/
- if (offset + 1 < len)
+ if (offset + 1 < *len) {
ksmbd_debug(SMB,
- "data area offset %d overlaps SMB2 header %d\n",
- offset + 1, len);
- else
- len = offset + data_length;
+ "data area offset %d overlaps SMB2 header %u\n",
+ offset + 1, *len);
+ return -EINVAL;
+ }
+
+ *len = offset + data_length;
}
+
calc_size_exit:
- ksmbd_debug(SMB, "SMB2 len %d\n", len);
- return len;
+ ksmbd_debug(SMB, "SMB2 len %u\n", *len);
+ return 0;
}
static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
@@ -391,9 +388,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
return 1;
}
- clc_len = smb2_calc_size(hdr);
+ if (smb2_calc_size(hdr, &clc_len))
+ return 1;
+
if (len != clc_len) {
- /* server can return one byte more due to implied bcc[0] */
+ /* client can return one byte more due to implied bcc[0] */
if (clc_len == len + 1)
return 0;
@@ -418,9 +417,6 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
return 0;
}
- if (command == SMB2_LOCK_HE && len == 88)
- return 0;
-
ksmbd_debug(SMB,
"cli req too short, len %d not %d. cmd:%d mid:%llu\n",
len, clc_len, command,
diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
index 197473871aa4..b06456eb587b 100644
--- a/fs/ksmbd/smb2ops.c
+++ b/fs/ksmbd/smb2ops.c
@@ -187,11 +187,6 @@ static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
[SMB2_CHANGE_NOTIFY_HE] = { .proc = smb2_notify},
};
-int init_smb2_0_server(struct ksmbd_conn *conn)
-{
- return -EOPNOTSUPP;
-}
-
/**
* init_smb2_1_server() - initialize a smb server connection with smb2.1
* command dispatcher
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index dcf907738610..005aa93a49d6 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -236,9 +236,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
if (conn->need_neg == false)
return -EINVAL;
- if (!(conn->dialect >= SMB20_PROT_ID &&
- conn->dialect <= SMB311_PROT_ID))
- return -EINVAL;
rsp_hdr = work->response_buf;
@@ -1166,13 +1163,6 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
case SMB21_PROT_ID:
init_smb2_1_server(conn);
break;
- case SMB20_PROT_ID:
- rc = init_smb2_0_server(conn);
- if (rc) {
- rsp->hdr.Status = STATUS_NOT_SUPPORTED;
- goto err_out;
- }
- break;
case SMB2X_PROT_ID:
case BAD_PROT_ID:
default:
@@ -1191,11 +1181,9 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
- if (conn->dialect > SMB20_PROT_ID) {
- memcpy(conn->ClientGUID, req->ClientGUID,
- SMB2_CLIENT_GUID_SIZE);
- conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
- }
+ memcpy(conn->ClientGUID, req->ClientGUID,
+ SMB2_CLIENT_GUID_SIZE);
+ conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
rsp->StructureSize = cpu_to_le16(65);
rsp->DialectRevision = cpu_to_le16(conn->dialect);
@@ -1537,11 +1525,9 @@ binding_session:
}
}
- if (conn->dialect > SMB20_PROT_ID) {
- if (!ksmbd_conn_lookup_dialect(conn)) {
- pr_err("fail to verify the dialect\n");
- return -ENOENT;
- }
+ if (!ksmbd_conn_lookup_dialect(conn)) {
+ pr_err("fail to verify the dialect\n");
+ return -ENOENT;
}
return 0;
}
@@ -1623,11 +1609,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
}
}
- if (conn->dialect > SMB20_PROT_ID) {
- if (!ksmbd_conn_lookup_dialect(conn)) {
- pr_err("fail to verify the dialect\n");
- return -ENOENT;
- }
+ if (!ksmbd_conn_lookup_dialect(conn)) {
+ pr_err("fail to verify the dialect\n");
+ return -ENOENT;
}
return 0;
}
@@ -5499,7 +5483,6 @@ static int set_file_basic_info(struct ksmbd_file *fp,
struct ksmbd_share_config *share)
{
struct iattr attrs;
- struct timespec64 ctime;
struct file *filp;
struct inode *inode;
struct user_namespace *user_ns;
@@ -5521,13 +5504,11 @@ static int set_file_basic_info(struct ksmbd_file *fp,
attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
}
- if (file_info->ChangeTime) {
+ attrs.ia_valid |= ATTR_CTIME;
+ if (file_info->ChangeTime)
attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
- ctime = attrs.ia_ctime;
- attrs.ia_valid |= ATTR_CTIME;
- } else {
- ctime = inode->i_ctime;
- }
+ else
+ attrs.ia_ctime = inode->i_ctime;
if (file_info->LastWriteTime) {
attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
@@ -5573,11 +5554,9 @@ static int set_file_basic_info(struct ksmbd_file *fp,
return -EACCES;
inode_lock(inode);
+ inode->i_ctime = attrs.ia_ctime;
+ attrs.ia_valid &= ~ATTR_CTIME;
rc = notify_change(user_ns, dentry, &attrs, NULL);
- if (!rc) {
- inode->i_ctime = ctime;
- mark_inode_dirty(inode);
- }
inode_unlock(inode);
}
return rc;
@@ -8411,20 +8390,18 @@ int smb3_decrypt_req(struct ksmbd_work *work)
struct smb2_hdr *hdr;
unsigned int pdu_length = get_rfc1002_len(buf);
struct kvec iov[2];
- unsigned int buf_data_size = pdu_length + 4 -
+ int buf_data_size = pdu_length + 4 -
sizeof(struct smb2_transform_hdr);
struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
int rc = 0;
- if (pdu_length + 4 <
- sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {
+ if (buf_data_size < sizeof(struct smb2_hdr)) {
pr_err("Transform message is too small (%u)\n",
pdu_length);
return -ECONNABORTED;
}
- if (pdu_length + 4 <
- le32_to_cpu(tr_hdr->OriginalMessageSize) + sizeof(struct smb2_transform_hdr)) {
+ if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) {
pr_err("Transform message is broken\n");
return -ECONNABORTED;
}
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
index 261825d06391..a6dec5ec6a54 100644
--- a/fs/ksmbd/smb2pdu.h
+++ b/fs/ksmbd/smb2pdu.h
@@ -1637,7 +1637,6 @@ struct smb2_posix_info {
} __packed;
/* functions */
-int init_smb2_0_server(struct ksmbd_conn *conn);
void init_smb2_1_server(struct ksmbd_conn *conn);
void init_smb3_0_server(struct ksmbd_conn *conn);
void init_smb3_02_server(struct ksmbd_conn *conn);
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index db8042a173d0..707490ab1f4c 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -21,7 +21,6 @@ static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
#define MAGIC_CHAR '~'
#define PERIOD '.'
#define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
-#define KSMBD_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr))
struct smb_protocol {
int index;
@@ -89,7 +88,7 @@ unsigned int ksmbd_server_side_copy_max_total_size(void)
inline int ksmbd_min_protocol(void)
{
- return SMB2_PROT;
+ return SMB21_PROT;
}
inline int ksmbd_max_protocol(void)
@@ -294,11 +293,6 @@ int ksmbd_init_smb_server(struct ksmbd_work *work)
return 0;
}
-bool ksmbd_pdu_size_has_room(unsigned int pdu)
-{
- return (pdu >= KSMBD_MIN_SUPPORTED_HEADER_SIZE - 4);
-}
-
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
struct ksmbd_file *dir,
struct ksmbd_dir_info *d_info,
@@ -433,7 +427,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
static int __smb2_negotiate(struct ksmbd_conn *conn)
{
- return (conn->dialect >= SMB20_PROT_ID &&
+ return (conn->dialect >= SMB21_PROT_ID &&
conn->dialect <= SMB311_PROT_ID);
}
@@ -463,7 +457,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
}
}
- if (command == SMB2_NEGOTIATE_HE) {
+ if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
ret = smb2_handle_negotiate(work);
init_smb2_neg_rsp(work);
return ret;
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index 994abede27e9..6e79e7577f6b 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -48,6 +48,8 @@
#define CIFS_DEFAULT_IOSIZE (64 * 1024)
#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
+#define MAX_STREAM_PROT_LEN 0x00FFFFFF
+
/* Responses when opening a file. */
#define F_SUPERSEDED 0
#define F_OPENED 1
@@ -493,8 +495,6 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
int ksmbd_init_smb_server(struct ksmbd_work *work);
-bool ksmbd_pdu_size_has_room(unsigned int pdu);
-
struct ksmbd_kstat;
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
int info_level,
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index cc7338f9e0d1..7ce93aaf69f8 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -957,7 +957,7 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
#ifndef iounmap
#define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
{
}
#endif
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 24b40e5c160b..018e776a34b9 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -613,7 +613,7 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
* and is automatically cleaned up after the test case concludes. See &struct
* kunit_resource for more information.
*/
-void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
/**
* kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
@@ -657,9 +657,9 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
*
* See kcalloc() and kunit_kmalloc_array() for more information.
*/
-static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags)
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
- return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO);
+ return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
}
void kunit_cleanup(struct kunit *test);
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 7d1cabe15262..63ccb5252190 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -321,10 +321,20 @@ asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
* from register 0 to 3 on return from the SMC instruction. An optional
* quirk structure provides vendor specific behavior.
*/
+#ifdef CONFIG_HAVE_ARM_SMCCC
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+ *res = (struct arm_smccc_res){};
+}
+#endif
/**
* __arm_smccc_hvc() - make HVC calls
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644
index 000000000000..8c3d45eca46b
--- /dev/null
+++ b/include/linux/dsa/mv88e6xxx.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE 0
+#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index 0fe101e8e190..d42010cf5468 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -5,7 +5,28 @@
#ifndef _NET_DSA_TAG_OCELOT_H
#define _NET_DSA_TAG_OCELOT_H
+#include <linux/kthread.h>
#include <linux/packing.h>
+#include <linux/skbuff.h>
+
+struct ocelot_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_class; /* valid only for clones */
+ u8 ptp_cmd;
+ u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+ ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
#define OCELOT_TAG_LEN 16
#define OCELOT_SHORT_PREFIX_LEN 4
@@ -140,6 +161,17 @@
* +------+------+------+------+------+------+------+------+
*/
+struct felix_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct felix_port {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ struct kthread_worker *xmit_worker;
+};
+
static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
{
packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
@@ -215,4 +247,21 @@ static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci)
packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0);
}
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+ u32 rew_op = 0;
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+ rew_op = ptp_cmd;
+ rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+ } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ rew_op = ptp_cmd;
+ }
+
+ return rew_op;
+}
+
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f3638d09ba77..993204a6c1a1 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -9475,16 +9475,22 @@ struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 reserved_at_10[0x10];
+
u8 entropy_force_cap[0x1];
u8 entropy_calc_cap[0x1];
u8 entropy_gre_calc_cap[0x1];
- u8 reserved_at_23[0x1b];
+ u8 reserved_at_23[0xf];
+ u8 rx_ts_over_crc_cap[0x1];
+ u8 reserved_at_33[0xb];
u8 fcs_cap[0x1];
u8 reserved_at_3f[0x1];
+
u8 entropy_force[0x1];
u8 entropy_calc[0x1];
u8 entropy_gre_calc[0x1];
- u8 reserved_at_43[0x1b];
+ u8 reserved_at_43[0xf];
+ u8 rx_ts_over_crc[0x1];
+ u8 reserved_at_53[0xb];
u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1];
};
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index c0475d1c9885..81cad9e1e412 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -61,7 +61,6 @@ enum qcom_scm_ice_cipher {
#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
-#if IS_ENABLED(CONFIG_QCOM_SCM)
extern bool qcom_scm_is_available(void);
extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
@@ -115,74 +114,4 @@ extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
extern int qcom_scm_lmh_profile_change(u32 profile_id);
extern bool qcom_scm_lmh_dcvsh_available(void);
-#else
-
-#include <linux/errno.h>
-
-static inline bool qcom_scm_is_available(void) { return false; }
-
-static inline int qcom_scm_set_cold_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline int qcom_scm_set_warm_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
- { return -ENODEV; }
-
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
- { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
- { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
- { return -ENODEV; }
-
-static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
- { return -ENODEV; }
-extern inline int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
- u32 cp_nonpixel_start,
- u32 cp_nonpixel_size)
- { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *src, const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt) { return -ENODEV; }
-
-static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
-static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode) { return -ENODEV; }
-static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
- u32 offset, u32 size) { return -ENODEV; }
-
-static inline bool qcom_scm_ice_available(void) { return false; }
-static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; }
-static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
- enum qcom_scm_ice_cipher cipher,
- u32 data_unit_size) { return -ENODEV; }
-
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp) { return -ENODEV; }
-
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
- { return -ENODEV; }
-
-static inline int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
- u64 limit_node, u32 node_id, u64 version)
- { return -ENODEV; }
-
-static inline int qcom_scm_lmh_profile_change(u32 profile_id) { return -ENODEV; }
-
-static inline bool qcom_scm_lmh_dcvsh_available(void) { return -ENODEV; }
-#endif
#endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 2ebef6b1a3d6..74d3c1efd9bb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -399,9 +399,8 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
-struct workqueue_struct *alloc_workqueue(const char *fmt,
- unsigned int flags,
- int max_active, ...);
+__printf(1, 4) struct workqueue_struct *
+alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 06706a9fd5b1..d7055b41982d 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -89,15 +89,6 @@
/* Source PGIDs, one per physical port */
#define PGID_SRC 80
-#define IFH_TAG_TYPE_C 0
-#define IFH_TAG_TYPE_S 1
-
-#define IFH_REW_OP_NOOP 0x0
-#define IFH_REW_OP_DSCP 0x1
-#define IFH_REW_OP_ONE_STEP_PTP 0x2
-#define IFH_REW_OP_TWO_STEP_PTP 0x3
-#define IFH_REW_OP_ORIGIN_PTP 0x5
-
#define OCELOT_NUM_TC 8
#define OCELOT_SPEED_2500 0
@@ -603,10 +594,10 @@ struct ocelot_port {
/* The VLAN ID that will be transmitted as untagged, on egress */
struct ocelot_vlan native_vlan;
+ unsigned int ptp_skbs_in_flight;
u8 ptp_cmd;
struct sk_buff_head tx_skbs;
u8 ts_id;
- spinlock_t ts_id_lock;
phy_interface_t phy_mode;
@@ -680,6 +671,9 @@ struct ocelot {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
struct hwtstamp_config hwtstamp_config;
+ unsigned int ptp_skbs_in_flight;
+ /* Protects the 2-step TX timestamp ID logic */
+ spinlock_t ts_id_lock;
/* Protects the PTP interface state */
struct mutex ptp_lock;
/* Protects the PTP clock */
@@ -692,15 +686,6 @@ struct ocelot_policer {
u32 burst; /* bytes */
};
-struct ocelot_skb_cb {
- struct sk_buff *clone;
- u8 ptp_cmd;
- u8 ts_id;
-};
-
-#define OCELOT_SKB_CB(skb) \
- ((struct ocelot_skb_cb *)((skb)->cb))
-
#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
@@ -752,8 +737,6 @@ u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 val, u32 reg, u32 offset);
-#if IS_ENABLED(CONFIG_MSCC_OCELOT_SWITCH_LIB)
-
/* Packet I/O */
bool ocelot_can_inject(struct ocelot *ocelot, int grp);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
@@ -761,36 +744,6 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
-u32 ocelot_ptp_rew_op(struct sk_buff *skb);
-#else
-
-static inline bool ocelot_can_inject(struct ocelot *ocelot, int grp)
-{
- return false;
-}
-
-static inline void ocelot_port_inject_frame(struct ocelot *ocelot, int port,
- int grp, u32 rew_op,
- struct sk_buff *skb)
-{
-}
-
-static inline int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp,
- struct sk_buff **skb)
-{
- return -EIO;
-}
-
-static inline void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
-{
-}
-
-static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
- return 0;
-}
-#endif
-
/* Hardware initialization */
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
diff --git a/include/soc/mscc/ocelot_ptp.h b/include/soc/mscc/ocelot_ptp.h
index ded497d72bdb..f085884b1fa2 100644
--- a/include/soc/mscc/ocelot_ptp.h
+++ b/include/soc/mscc/ocelot_ptp.h
@@ -13,6 +13,9 @@
#include <linux/ptp_clock_kernel.h>
#include <soc/mscc/ocelot.h>
+#define OCELOT_MAX_PTP_ID 63
+#define OCELOT_PTP_FIFO_SIZE 128
+
#define PTP_PIN_CFG_RSZ 0x20
#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 01570dbda503..0e45963bb767 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -224,6 +224,7 @@ struct hda_codec {
#endif
/* misc flags */
+ unsigned int configured:1; /* codec was configured */
unsigned int in_freeing:1; /* being released */
unsigned int registered:1; /* codec was registered */
unsigned int display_power_control:1; /* needs display power */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index db28e79b77ee..a3584a357f35 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -52,12 +52,12 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
#if defined(CONFIG_XEN_PV)
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
+ unsigned int domid, bool no_translate);
#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,
- bool no_translate, struct page **pages)
+ bool no_translate)
{
BUG();
return 0;
@@ -134,7 +134,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
*/
BUG_ON(err_ptr == NULL);
return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
- false, pages);
+ false);
}
/*
@@ -146,7 +146,6 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
* @err_ptr: Returns per-MFN error status.
* @prot: page protection mask
* @domid: Domain owning the pages
- * @pages: Array of pages if this domain has an auto-translated physmap
*
* @mfn and @err_ptr may point to the same buffer, the MFNs will be
* overwritten by the error codes after they are mapped.
@@ -157,14 +156,13 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
unsigned long addr, xen_pfn_t *mfn,
int nr, int *err_ptr,
- pgprot_t prot, unsigned int domid,
- struct page **pages)
+ pgprot_t prot, unsigned int domid)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
- true, pages);
+ true);
}
/* xen_remap_domain_gfn_range() - map a range of foreign frames
@@ -188,8 +186,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
- return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
- pages);
+ return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
}
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index df1ccf4558f8..2a9695ccb65f 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -311,17 +311,19 @@ static struct cpuset top_cpuset = {
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
- * There are two global locks guarding cpuset structures - cpuset_mutex and
+ * There are two global locks guarding cpuset structures - cpuset_rwsem and
* callback_lock. We also require taking task_lock() when dereferencing a
* task's cpuset pointer. See "The task_lock() exception", at the end of this
- * comment.
+ * comment. The cpuset code uses only cpuset_rwsem write lock. Other
+ * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
+ * prevent change to cpuset structures.
*
* A task must hold both locks to modify cpusets. If a task holds
- * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
+ * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
* is the only task able to also acquire callback_lock and be able to
* modify cpusets. It can perform various checks on the cpuset structure
* first, knowing nothing will change. It can also allocate memory while
- * just holding cpuset_mutex. While it is performing these checks, various
+ * just holding cpuset_rwsem. While it is performing these checks, various
* callback routines can briefly acquire callback_lock to query cpusets.
* Once it is ready to make the changes, it takes callback_lock, blocking
* everyone else.
@@ -393,7 +395,7 @@ static inline bool is_in_v2_mode(void)
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
*
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
*/
static void guarantee_online_cpus(struct task_struct *tsk,
struct cpumask *pmask)
@@ -435,7 +437,7 @@ out_unlock:
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_MEMORY].
*
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
@@ -447,7 +449,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
*/
static void cpuset_update_task_spread_flag(struct cpuset *cs,
struct task_struct *tsk)
@@ -468,7 +470,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set. Call holding cpuset_mutex.
+ * are only set if the other's are set. Call holding cpuset_rwsem.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -577,7 +579,7 @@ static inline void free_cpuset(struct cpuset *cs)
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
- * cpuset_mutex held.
+ * cpuset_rwsem held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
@@ -700,7 +702,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
rcu_read_unlock();
}
-/* Must be called with cpuset_mutex held. */
+/* Must be called with cpuset_rwsem held. */
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
@@ -726,7 +728,7 @@ static inline int nr_cpusets(void)
* domains when operating in the severe memory shortage situations
* that could cause allocation failures below.
*
- * Must be called with cpuset_mutex held.
+ * Must be called with cpuset_rwsem held.
*
* The three key local variables below are:
* cp - cpuset pointer, used (together with pos_css) to perform a
@@ -1005,7 +1007,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
* 'cpus' is removed, then call this routine to rebuild the
* scheduler's dynamic sched domains.
*
- * Call with cpuset_mutex held. Takes cpus_read_lock().
+ * Call with cpuset_rwsem held. Takes cpus_read_lock().
*/
static void rebuild_sched_domains_locked(void)
{
@@ -1078,7 +1080,7 @@ void rebuild_sched_domains(void)
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its cpus_allowed to the
- * effective cpuset's. As this function is called with cpuset_mutex held,
+ * effective cpuset's. As this function is called with cpuset_rwsem held,
* cpuset membership stays stable.
*/
static void update_tasks_cpumask(struct cpuset *cs)
@@ -1347,7 +1349,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
*
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
*
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
{
@@ -1704,12 +1706,12 @@ static void *cpuset_being_rebound;
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its mems_allowed to the
- * effective cpuset's. As this function is called with cpuset_mutex held,
+ * effective cpuset's. As this function is called with cpuset_rwsem held,
* cpuset membership stays stable.
*/
static void update_tasks_nodemask(struct cpuset *cs)
{
- static nodemask_t newmems; /* protected by cpuset_mutex */
+ static nodemask_t newmems; /* protected by cpuset_rwsem */
struct css_task_iter it;
struct task_struct *task;
@@ -1722,7 +1724,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
- * the global cpuset_mutex, we know that no other rebind effort
+ * the global cpuset_rwsem, we know that no other rebind effort
* will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
@@ -1768,7 +1770,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
*
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
*
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
*/
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
{
@@ -1821,7 +1823,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
- * Call with cpuset_mutex held. May take callback_lock during call.
+ * Call with cpuset_rwsem held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
@@ -1911,7 +1913,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
* @cs: the cpuset in which each task's spread flags needs to be changed
*
* Iterate through each task of @cs updating its spread flags. As this
- * function is called with cpuset_mutex held, cpuset membership stays
+ * function is called with cpuset_rwsem held, cpuset membership stays
* stable.
*/
static void update_tasks_flags(struct cpuset *cs)
@@ -1931,7 +1933,7 @@ static void update_tasks_flags(struct cpuset *cs)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
*
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1980,7 +1982,7 @@ out:
* cs: the cpuset to update
* new_prs: new partition root state
*
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
*/
static int update_prstate(struct cpuset *cs, int new_prs)
{
@@ -2167,7 +2169,7 @@ static int fmeter_getrate(struct fmeter *fmp)
static struct cpuset *cpuset_attach_old_cs;
-/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
+/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
@@ -2219,7 +2221,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
}
/*
- * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
+ * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach()
* but we can't allocate it dynamically there. Define it global and
* allocate from cpuset_init().
*/
@@ -2227,7 +2229,7 @@ static cpumask_var_t cpus_attach;
static void cpuset_attach(struct cgroup_taskset *tset)
{
- /* static buf protected by cpuset_mutex */
+ /* static buf protected by cpuset_rwsem */
static nodemask_t cpuset_attach_nodemask_to;
struct task_struct *task;
struct task_struct *leader;
@@ -2417,7 +2419,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
* operation like this one can lead to a deadlock through kernfs
* active_ref protection. Let's break the protection. Losing the
* protection is okay as we check whether @cs is online after
- * grabbing cpuset_mutex anyway. This only happens on the legacy
+ * grabbing cpuset_rwsem anyway. This only happens on the legacy
* hierarchies.
*/
css_get(&cs->css);
@@ -3672,7 +3674,7 @@ void __cpuset_memory_pressure_bump(void)
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
- * and we take cpuset_mutex, keeping cpuset_attach() from changing it
+ * and we take cpuset_rwsem, keeping cpuset_attach() from changing it
* anyway.
*/
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
diff --git a/kernel/module.c b/kernel/module.c
index 40ec9a030eec..5c26a76e800b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4489,8 +4489,10 @@ static void cfi_init(struct module *mod)
/* Fix init/exit functions to point to the CFI jump table */
if (init)
mod->init = *init;
+#ifdef CONFIG_MODULE_UNLOAD
if (exit)
mod->exit = *exit;
+#endif
cfi_module_add(mod, module_addr_min);
#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 33a6b4a2443d..1b3eb1e9531f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4830,8 +4830,16 @@ void show_workqueue_state(void)
for_each_pwq(pwq, wq) {
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
- if (pwq->nr_active || !list_empty(&pwq->inactive_works))
+ if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+ /*
+ * Defer printing to avoid deadlocks in console
+ * drivers that queue work while holding locks
+ * also taken in their write paths.
+ */
+ printk_deferred_enter();
show_pwq(pwq);
+ printk_deferred_exit();
+ }
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
@@ -4849,7 +4857,12 @@ void show_workqueue_state(void)
raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
-
+ /*
+ * Defer printing to avoid deadlocks in console drivers that
+ * queue work while holding locks also taken in their write
+ * paths.
+ */
+ printk_deferred_enter();
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d",
@@ -4864,6 +4877,7 @@ void show_workqueue_state(void)
first = false;
}
pr_cont("\n");
+ printk_deferred_exit();
next_pool:
raw_spin_unlock_irqrestore(&pool->lock, flags);
/*
diff --git a/lib/Makefile b/lib/Makefile
index 5efd1b435a37..a841be5244ac 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -351,7 +351,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
-CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index cdbe54b16501..e14a18af573d 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -116,8 +116,8 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
/* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
if (IS_ERR_OR_NULL(to_free))
return;
- kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
- (void *)to_free);
+ kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+ (void *)to_free);
}
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index eab5fc88a002..d8b9dbabd4a4 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -77,8 +77,8 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
- seq_printf(seq, "%9s: %16llu %12llu %4llu %6llu %4llu %5llu %10llu %9llu "
- "%16llu %12llu %4llu %6llu %4llu %5llu %7llu %10llu\n",
+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
stats->rx_dropped + stats->rx_missed_errors,
@@ -103,11 +103,11 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
static int dev_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Interface| Receive "
- " | Transmit\n"
- " | bytes packets errs drop fifo frame "
- "compressed multicast| bytes packets errs "
- " drop fifo colls carrier compressed\n");
+ seq_puts(seq, "Inter-| Receive "
+ " | Transmit\n"
+ " face |bytes packets errs drop fifo frame "
+ "compressed multicast|bytes packets errs "
+ "drop fifo colls carrier compressed\n");
else
dev_seq_printf_stats(seq, v);
return 0;
@@ -259,14 +259,14 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
struct packet_type *pt = v;
if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Type Device Function\n");
+ seq_puts(seq, "Type Device Function\n");
else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
if (pt->type == htons(ETH_P_ALL))
seq_puts(seq, "ALL ");
else
seq_printf(seq, "%04x", ntohs(pt->type));
- seq_printf(seq, " %-9s %ps\n",
+ seq_printf(seq, " %-8s %ps\n",
pt->dev ? pt->dev->name : "", pt->func);
}
@@ -327,14 +327,12 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
struct netdev_hw_addr *ha;
struct net_device *dev = v;
- if (v == SEQ_START_TOKEN) {
- seq_puts(seq, "Ifindex Interface Refcount Global_use Address\n");
+ if (v == SEQ_START_TOKEN)
return 0;
- }
netif_addr_lock_bh(dev);
netdev_for_each_mc_addr(ha, dev) {
- seq_printf(seq, "%-7d %-9s %-8d %-10d %*phN\n",
+ seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
dev->ifindex, dev->name,
ha->refcount, ha->global_use,
(int)dev->addr_len, ha->addr);
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index bca1b5d66df2..d8ee15f1c7a9 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -101,8 +101,6 @@ config NET_DSA_TAG_RTL4_A
config NET_DSA_TAG_OCELOT
tristate "Tag driver for Ocelot family of switches, using NPI port"
- depends on MSCC_OCELOT_SWITCH_LIB || \
- (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
select PACKING
help
Say Y or M if you want to enable NPI tagging for the Ocelot switches
@@ -114,8 +112,6 @@ config NET_DSA_TAG_OCELOT
config NET_DSA_TAG_OCELOT_8021Q
tristate "Tag driver for Ocelot family of switches, using VLAN"
- depends on MSCC_OCELOT_SWITCH_LIB || \
- (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
help
Say Y or M if you want to enable support for tagging frames with a
custom VLAN-based header. Frames that require timestamping, such as
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 42d9fcd3c31b..691d27498b24 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -170,7 +170,7 @@ void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
/* Check if the bridge is still in use, otherwise it is time
* to clean it up so we can reuse this bridge_num later.
*/
- if (!dsa_bridge_num_find(bridge_dev))
+ if (dsa_bridge_num_find(bridge_dev) < 0)
clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
@@ -811,7 +811,9 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
if (!dsa_is_cpu_port(ds, port))
continue;
+ rtnl_lock();
err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
+ rtnl_unlock();
if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err));
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 1c797ec8e2c2..6466d0539af9 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -168,7 +168,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
if (extack._msg)
dev_err(ds->dev, "port %d: %s\n", info->port,
extack._msg);
- if (err && err != EOPNOTSUPP)
+ if (err && err != -EOPNOTSUPP)
return err;
}
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index e5127b7d1c6a..b3da4b2ea11c 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -45,6 +45,7 @@
* 6 6 2 2 4 2 N
*/
+#include <linux/dsa/mv88e6xxx.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -129,12 +130,9 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
u8 tag_dev, tag_port;
enum dsa_cmd cmd;
u8 *dsa_header;
- u16 pvid = 0;
- int err;
if (skb->offload_fwd_mark) {
struct dsa_switch_tree *dst = dp->ds->dst;
- struct net_device *br = dp->bridge_dev;
cmd = DSA_CMD_FORWARD;
@@ -144,19 +142,6 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
*/
tag_dev = dst->last_switch + 1 + dp->bridge_num;
tag_port = 0;
-
- /* If we are offloading forwarding for a VLAN-unaware bridge,
- * inject packets to hardware using the bridge's pvid, since
- * that's where the packets ingressed from.
- */
- if (!br_vlan_enabled(br)) {
- /* Safe because __dev_queue_xmit() runs under
- * rcu_read_lock_bh()
- */
- err = br_vlan_get_pvid_rcu(br, &pvid);
- if (err)
- return NULL;
- }
} else {
cmd = DSA_CMD_FROM_CPU;
tag_dev = dp->ds->index;
@@ -180,16 +165,21 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
dsa_header[2] &= ~0x10;
}
} else {
+ struct net_device *br = dp->bridge_dev;
+ u16 vid;
+
+ vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
+
skb_push(skb, DSA_HLEN + extra);
dsa_alloc_etype_header(skb, DSA_HLEN + extra);
- /* Construct untagged DSA tag. */
+ /* Construct DSA header from untagged frame. */
dsa_header = dsa_etype_header_pos_tx(skb) + extra;
dsa_header[0] = (cmd << 6) | tag_dev;
dsa_header[1] = tag_port << 3;
- dsa_header[2] = pvid >> 8;
- dsa_header[3] = pvid & 0xff;
+ dsa_header[2] = vid >> 8;
+ dsa_header[3] = vid & 0xff;
}
return skb;
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index d1d070523ea3..cd60b94fc175 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -2,7 +2,6 @@
/* Copyright 2019 NXP
*/
#include <linux/dsa/ocelot.h>
-#include <soc/mscc/ocelot.h>
#include "dsa_priv.h"
/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 59072930cb02..3412051981d7 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -9,10 +9,32 @@
* that on egress
*/
#include <linux/dsa/8021q.h>
-#include <soc/mscc/ocelot.h>
-#include <soc/mscc/ocelot_ptp.h>
+#include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
+static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
+ struct sk_buff *skb)
+{
+ struct felix_deferred_xmit_work *xmit_work;
+ struct felix_port *felix_port = dp->priv;
+
+ xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+ if (!xmit_work)
+ return NULL;
+
+ /* Calls felix_port_deferred_xmit in felix.c */
+ kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn);
+ /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ * won't really free the packet.
+ */
+ xmit_work->dp = dp;
+ xmit_work->skb = skb_get(skb);
+
+ kthread_queue_work(felix_port->xmit_worker, &xmit_work->work);
+
+ return NULL;
+}
+
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -20,18 +42,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
- struct ocelot *ocelot = dp->ds->priv;
- int port = dp->index;
- u32 rew_op = 0;
+ struct ethhdr *hdr = eth_hdr(skb);
- rew_op = ocelot_ptp_rew_op(skb);
- if (rew_op) {
- if (!ocelot_can_inject(ocelot, 0))
- return NULL;
-
- ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
- return NULL;
- }
+ if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest))
+ return ocelot_defer_xmit(dp, skb);
return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 8b30cadff708..b7e277d8a84d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1054,14 +1054,19 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio);
if (!ext_hdr || !iio)
goto send_mal_query;
- if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr))
+ if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) ||
+ ntohs(iio->extobj_hdr.length) > sizeof(_iio))
goto send_mal_query;
ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr);
+ iio = skb_header_pointer(skb, sizeof(_ext_hdr),
+ sizeof(iio->extobj_hdr) + ident_len, &_iio);
+ if (!iio)
+ goto send_mal_query;
+
status = 0;
dev = NULL;
switch (iio->extobj_hdr.class_type) {
case ICMP_EXT_ECHO_CTYPE_NAME:
- iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio);
if (ident_len >= IFNAMSIZ)
goto send_mal_query;
memset(buff, 0, sizeof(buff));
@@ -1069,30 +1074,24 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
dev = dev_get_by_name(net, buff);
break;
case ICMP_EXT_ECHO_CTYPE_INDEX:
- iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) +
- sizeof(iio->ident.ifindex), &_iio);
if (ident_len != sizeof(iio->ident.ifindex))
goto send_mal_query;
dev = dev_get_by_index(net, ntohl(iio->ident.ifindex));
break;
case ICMP_EXT_ECHO_CTYPE_ADDR:
- if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
+ if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) ||
+ ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
iio->ident.addr.ctype3_hdr.addrlen)
goto send_mal_query;
switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) {
case ICMP_AFI_IP:
- iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) +
- sizeof(struct in_addr), &_iio);
- if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
- sizeof(struct in_addr))
+ if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr))
goto send_mal_query;
dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case ICMP_AFI_IP6:
- iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio);
- if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
- sizeof(struct in6_addr))
+ if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr))
goto send_mal_query;
dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev);
dev_hold(dev);
diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c
index 4e5583dbadac..122a3d47424c 100644
--- a/net/ipv6/ioam6.c
+++ b/net/ipv6/ioam6.c
@@ -770,6 +770,66 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
data += sizeof(__be32);
}
+ /* bit12 undefined: filled with empty value */
+ if (trace->type.bit12) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit13 undefined: filled with empty value */
+ if (trace->type.bit13) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit14 undefined: filled with empty value */
+ if (trace->type.bit14) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit15 undefined: filled with empty value */
+ if (trace->type.bit15) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit16 undefined: filled with empty value */
+ if (trace->type.bit16) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit17 undefined: filled with empty value */
+ if (trace->type.bit17) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit18 undefined: filled with empty value */
+ if (trace->type.bit18) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit19 undefined: filled with empty value */
+ if (trace->type.bit19) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit20 undefined: filled with empty value */
+ if (trace->type.bit20) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit21 undefined: filled with empty value */
+ if (trace->type.bit21) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
/* opaque state snapshot */
if (trace->type.bit22) {
if (!sc) {
@@ -792,16 +852,10 @@ void ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_schema *sc;
u8 sclen = 0;
- /* Skip if Overflow flag is set OR
- * if an unknown type (bit 12-21) is set
+ /* Skip if Overflow flag is set
*/
- if (trace->overflow ||
- trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
- trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
- trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
- trace->type.bit21) {
+ if (trace->overflow)
return;
- }
/* NodeLen does not include Opaque State Snapshot length. We need to
* take it into account if the corresponding bit is set (bit 22) and
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index 392c183076ce..f90a87389fcc 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -67,7 +67,11 @@ static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
u32 fields;
if (!trace->type_be32 || !trace->remlen ||
- trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4)
+ trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4 ||
+ trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
+ trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
+ trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
+ trace->type.bit21)
return false;
trace->nodelen = 0;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index fda47011b19c..cd6b11c9b54d 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -528,7 +528,6 @@ static bool mptcp_check_data_fin(struct sock *sk)
sk->sk_shutdown |= RCV_SHUTDOWN;
smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
- set_bit(MPTCP_DATA_READY, &msk->flags);
switch (sk->sk_state) {
case TCP_ESTABLISHED:
@@ -742,10 +741,9 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
- if (move_skbs_to_msk(msk, ssk)) {
- set_bit(MPTCP_DATA_READY, &msk->flags);
+ if (move_skbs_to_msk(msk, ssk))
sk->sk_data_ready(sk);
- }
+
mptcp_data_unlock(sk);
}
@@ -847,7 +845,6 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
sk->sk_shutdown |= RCV_SHUTDOWN;
smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
- set_bit(MPTCP_DATA_READY, &msk->flags);
sk->sk_data_ready(sk);
}
@@ -1801,21 +1798,6 @@ out:
return copied ? : ret;
}
-static void mptcp_wait_data(struct sock *sk, long *timeo)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct mptcp_sock *msk = mptcp_sk(sk);
-
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
- sk_wait_event(sk, timeo,
- test_bit(MPTCP_DATA_READY, &msk->flags), &wait);
-
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
-}
-
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
struct msghdr *msg,
size_t len, int flags,
@@ -2119,19 +2101,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
pr_debug("block timeout %ld", timeo);
- mptcp_wait_data(sk, &timeo);
- }
-
- if (skb_queue_empty_lockless(&sk->sk_receive_queue) &&
- skb_queue_empty(&msk->receive_queue)) {
- /* entire backlog drained, clear DATA_READY. */
- clear_bit(MPTCP_DATA_READY, &msk->flags);
-
- /* .. race-breaker: ssk might have gotten new data
- * after last __mptcp_move_skbs() returned false.
- */
- if (unlikely(__mptcp_move_skbs(msk)))
- set_bit(MPTCP_DATA_READY, &msk->flags);
+ sk_wait_data(sk, &timeo, NULL);
}
out_err:
@@ -2140,9 +2110,9 @@ out_err:
tcp_recv_timestamp(msg, sk, &tss);
}
- pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
- msk, test_bit(MPTCP_DATA_READY, &msk->flags),
- skb_queue_empty_lockless(&sk->sk_receive_queue), copied);
+ pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
+ msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+ skb_queue_empty(&msk->receive_queue), copied);
if (!(flags & MSG_PEEK))
mptcp_rcv_space_adjust(msk, copied);
@@ -2406,7 +2376,6 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
inet_sk_state_store(sk, TCP_CLOSE);
sk->sk_shutdown = SHUTDOWN_MASK;
smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
- set_bit(MPTCP_DATA_READY, &msk->flags);
set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
mptcp_close_wake_up(sk);
@@ -3429,8 +3398,14 @@ unlock_fail:
static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
{
- return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
- 0;
+ /* Concurrent splices from sk_receive_queue into receive_queue will
+ * always show at least one non-empty queue when checked in this order.
+ */
+ if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
+ skb_queue_empty_lockless(&msk->receive_queue))
+ return 0;
+
+ return EPOLLIN | EPOLLRDNORM;
}
static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
@@ -3465,7 +3440,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
state = inet_sk_state_load(sk);
pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
if (state == TCP_LISTEN)
- return mptcp_check_readable(msk);
+ return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0;
if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
mask |= mptcp_check_readable(msk);
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index 6024fad905ff..dda323e0a473 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -60,6 +60,9 @@ int nfc_proto_register(const struct nfc_protocol *nfc_proto)
proto_tab[nfc_proto->id] = nfc_proto;
write_unlock(&proto_tab_lock);
+ if (rc)
+ proto_unregister(nfc_proto->proto);
+
return rc;
}
EXPORT_SYMBOL(nfc_proto_register);
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
index fefc03674f4f..d63d2e5dc60c 100644
--- a/net/nfc/digital_core.c
+++ b/net/nfc/digital_core.c
@@ -277,6 +277,7 @@ int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
{
struct digital_tg_mdaa_params *params;
+ int rc;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
@@ -291,8 +292,12 @@ static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
params->sc = DIGITAL_SENSF_FELICA_SC;
- return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
- 500, digital_tg_recv_atr_req, NULL);
+ rc = digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
+ 500, digital_tg_recv_atr_req, NULL);
+ if (rc)
+ kfree(params);
+
+ return rc;
}
static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech)
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
index 84d2345c75a3..3adf4589852a 100644
--- a/net/nfc/digital_technology.c
+++ b/net/nfc/digital_technology.c
@@ -465,8 +465,12 @@ static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
skb_put_u8(skb, sel_cmd);
skb_put_u8(skb, DIGITAL_SDD_REQ_SEL_PAR);
- return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
- target);
+ rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
+ target);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
}
static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg,
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index a2e72c003805..b911ab78bed9 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -334,6 +334,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
ndev->cur_conn_id);
if (conn_info) {
list_del(&conn_info->list);
+ if (conn_info == ndev->rf_conn_info)
+ ndev->rf_conn_info = NULL;
devm_kfree(&ndev->nfc_dev->dev, conn_info);
}
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 0bc10234e306..e1904e62425e 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -529,22 +529,28 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
for (i = tc.offset; i < tc.offset + tc.count; i++) {
struct netdev_queue *q = netdev_get_tx_queue(dev, i);
struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
- struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
- struct gnet_stats_queue __percpu *cpu_qstats = NULL;
spin_lock_bh(qdisc_lock(qdisc));
+
if (qdisc_is_percpu_stats(qdisc)) {
- cpu_bstats = qdisc->cpu_bstats;
- cpu_qstats = qdisc->cpu_qstats;
+ qlen = qdisc_qlen_sum(qdisc);
+
+ __gnet_stats_copy_basic(NULL, &bstats,
+ qdisc->cpu_bstats,
+ &qdisc->bstats);
+ __gnet_stats_copy_queue(&qstats,
+ qdisc->cpu_qstats,
+ &qdisc->qstats,
+ qlen);
+ } else {
+ qlen += qdisc->q.qlen;
+ bstats.bytes += qdisc->bstats.bytes;
+ bstats.packets += qdisc->bstats.packets;
+ qstats.backlog += qdisc->qstats.backlog;
+ qstats.drops += qdisc->qstats.drops;
+ qstats.requeues += qdisc->qstats.requeues;
+ qstats.overlimits += qdisc->qstats.overlimits;
}
-
- qlen = qdisc_qlen_sum(qdisc);
- __gnet_stats_copy_basic(NULL, &sch->bstats,
- cpu_bstats, &qdisc->bstats);
- __gnet_stats_copy_queue(&sch->qstats,
- cpu_qstats,
- &qdisc->qstats,
- qlen);
spin_unlock_bh(qdisc_lock(qdisc));
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b8fa8f1a7277..c7503fd64915 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3697,7 +3697,7 @@ struct sctp_chunk *sctp_make_strreset_req(
outlen = (sizeof(outreq) + stream_len) * out;
inlen = (sizeof(inreq) + stream_len) * in;
- retval = sctp_make_reconf(asoc, outlen + inlen);
+ retval = sctp_make_reconf(asoc, SCTP_PAD4(outlen) + SCTP_PAD4(inlen));
if (!retval)
return NULL;
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index f23f558054a7..99acd337ba90 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -150,9 +150,11 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
again:
link = conn->lnk;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
if (rc)
- return rc;
+ goto put_out;
spin_lock_bh(&conn->send_lock);
if (link != conn->lnk) {
@@ -160,6 +162,7 @@ again:
spin_unlock_bh(&conn->send_lock);
smc_wr_tx_put_slot(link,
(struct smc_wr_tx_pend_priv *)pend);
+ smc_wr_tx_link_put(link);
if (again)
return -ENOLINK;
again = true;
@@ -167,6 +170,8 @@ again:
}
rc = smc_cdc_msg_send(conn, wr_buf, pend);
spin_unlock_bh(&conn->send_lock);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index f57449089a16..4d463701a759 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -943,7 +943,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
to_lnk = &lgr->lnk[i];
break;
}
- if (!to_lnk) {
+ if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
smc_lgr_terminate_sched(lgr);
return NULL;
}
@@ -975,24 +975,26 @@ again:
read_unlock_bh(&lgr->conns_lock);
/* pre-fetch buffer outside of send_lock, might sleep */
rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
- if (rc) {
- smcr_link_down_cond_sched(to_lnk);
- return NULL;
- }
+ if (rc)
+ goto err_out;
/* avoid race with smcr_tx_sndbuf_nonempty() */
spin_lock_bh(&conn->send_lock);
smc_switch_link_and_count(conn, to_lnk);
rc = smc_switch_cursor(smc, pend, wr_buf);
spin_unlock_bh(&conn->send_lock);
sock_put(&smc->sk);
- if (rc) {
- smcr_link_down_cond_sched(to_lnk);
- return NULL;
- }
+ if (rc)
+ goto err_out;
goto again;
}
read_unlock_bh(&lgr->conns_lock);
+ smc_wr_tx_link_put(to_lnk);
return to_lnk;
+
+err_out:
+ smcr_link_down_cond_sched(to_lnk);
+ smc_wr_tx_link_put(to_lnk);
+ return NULL;
}
static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 2e7560eba981..72f4b72eb175 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -383,9 +383,11 @@ int smc_llc_send_confirm_link(struct smc_link *link,
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
memset(confllc, 0, sizeof(*confllc));
confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
@@ -402,6 +404,8 @@ int smc_llc_send_confirm_link(struct smc_link *link,
confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -415,9 +419,11 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
struct smc_link *link;
int i, rc, rtok_ix;
+ if (!smc_wr_tx_link_hold(send_link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
memset(rkeyllc, 0, sizeof(*rkeyllc));
rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
@@ -444,6 +450,8 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
(u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
/* send llc message */
rc = smc_wr_tx_send(send_link, pend);
+put_out:
+ smc_wr_tx_link_put(send_link);
return rc;
}
@@ -456,9 +464,11 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
memset(rkeyllc, 0, sizeof(*rkeyllc));
rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
@@ -467,6 +477,8 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -480,9 +492,11 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
addllc = (struct smc_llc_msg_add_link *)wr_buf;
memset(addllc, 0, sizeof(*addllc));
@@ -504,6 +518,8 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
}
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -517,9 +533,11 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
delllc = (struct smc_llc_msg_del_link *)wr_buf;
memset(delllc, 0, sizeof(*delllc));
@@ -536,6 +554,8 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
delllc->reason = htonl(reason);
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -547,9 +567,11 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
testllc = (struct smc_llc_msg_test_link *)wr_buf;
memset(testllc, 0, sizeof(*testllc));
testllc->hd.common.type = SMC_LLC_TEST_LINK;
@@ -557,6 +579,8 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -567,13 +591,16 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
struct smc_wr_buf *wr_buf;
int rc;
- if (!smc_link_usable(link))
+ if (!smc_wr_tx_link_hold(link))
return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
- return smc_wr_tx_send(link, pend);
+ rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
+ return rc;
}
/* schedule an llc send on link, may wait for buffers,
@@ -586,13 +613,16 @@ static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
struct smc_wr_buf *wr_buf;
int rc;
- if (!smc_link_usable(link))
+ if (!smc_wr_tx_link_hold(link))
return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
- return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
+ rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
+put_out:
+ smc_wr_tx_link_put(link);
+ return rc;
}
/********************************* receive ***********************************/
@@ -672,9 +702,11 @@ static int smc_llc_add_link_cont(struct smc_link *link,
struct smc_buf_desc *rmb;
u8 n;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
memset(addc_llc, 0, sizeof(*addc_llc));
@@ -706,7 +738,10 @@ static int smc_llc_add_link_cont(struct smc_link *link,
addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
if (lgr->role == SMC_CLNT)
addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
- return smc_wr_tx_send(link, pend);
+ rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
+ return rc;
}
static int smc_llc_cli_rkey_exchange(struct smc_link *link,
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index c79361dfcdfb..738a4a99c827 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
/* Wakeup sndbuf consumers from any context (IRQ or process)
* since there is more data to transmit; usable snd_wnd as max transmit
*/
-static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
{
struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
struct smc_link *link = conn->lnk;
@@ -505,8 +505,11 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
struct smc_wr_buf *wr_buf;
int rc;
+ if (!link || !smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
if (rc < 0) {
+ smc_wr_tx_link_put(link);
if (rc == -EBUSY) {
struct smc_sock *smc =
container_of(conn, struct smc_sock, conn);
@@ -547,22 +550,7 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
out_unlock:
spin_unlock_bh(&conn->send_lock);
- return rc;
-}
-
-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
-{
- struct smc_link *link = conn->lnk;
- int rc = -ENOLINK;
-
- if (!link)
- return rc;
-
- atomic_inc(&link->wr_tx_refcnt);
- if (smc_link_usable(link))
- rc = _smcr_tx_sndbuf_nonempty(conn);
- if (atomic_dec_and_test(&link->wr_tx_refcnt))
- wake_up_all(&link->wr_tx_wait);
+ smc_wr_tx_link_put(link);
return rc;
}
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 423b8709f1c9..2bc626f230a5 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -60,6 +60,20 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
atomic_long_set(wr_tx_id, val);
}
+static inline bool smc_wr_tx_link_hold(struct smc_link *link)
+{
+ if (!smc_link_usable(link))
+ return false;
+ atomic_inc(&link->wr_tx_refcnt);
+ return true;
+}
+
+static inline void smc_wr_tx_link_put(struct smc_link *link)
+{
+ if (atomic_dec_and_test(&link->wr_tx_refcnt))
+ wake_up_all(&link->wr_tx_wait);
+}
+
static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
{
wake_up_all(&lnk->wr_tx_wait);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0878ab86597b..89f9e85ae970 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -828,7 +828,7 @@ static void unix_unhash(struct sock *sk)
}
struct proto unix_dgram_proto = {
- .name = "UNIX-DGRAM",
+ .name = "UNIX",
.owner = THIS_MODULE,
.obj_size = sizeof(struct unix_sock),
.close = unix_close,
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 952e46876329..4aad28480035 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) \
+= -fplugin-arg-structleak_plugin-byref
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \
+= -fplugin-arg-structleak_plugin-byref-all
+ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
+ DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
+endif
+export DISABLE_STRUCTLEAK_PLUGIN
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
+= -DSTRUCTLEAK_PLUGIN
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index fd9777f63f14..9dbab13329fa 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -82,10 +82,8 @@ cat << EOF
#define __IGNORE_truncate64
#define __IGNORE_stat64
#define __IGNORE_lstat64
-#define __IGNORE_fstat64
#define __IGNORE_fcntl64
#define __IGNORE_fadvise64_64
-#define __IGNORE_fstatat64
#define __IGNORE_fstatfs64
#define __IGNORE_statfs64
#define __IGNORE_llseek
@@ -253,6 +251,10 @@ cat << EOF
#define __IGNORE_getpmsg
#define __IGNORE_putpmsg
#define __IGNORE_vserver
+
+/* 64-bit ports never needed these, and new 32-bit ports can use statx */
+#define __IGNORE_fstat64
+#define __IGNORE_fstatat64
EOF
}
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index a59de24695ec..dfe5a64e19d2 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -468,6 +468,76 @@ static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
}
#endif /* CONFIG_X86_X32 */
+#ifdef __BIG_ENDIAN
+typedef char __pad_before_u32[4];
+typedef char __pad_after_u32[0];
+#else
+typedef char __pad_before_u32[0];
+typedef char __pad_after_u32[4];
+#endif
+
+/* PCM 2.0.15 API definition had a bug in mmap control; it puts the avail_min
+ * at the wrong offset due to a typo in padding type.
+ * The bug hits only 32bit.
+ * A workaround for incorrect read/write is needed only in 32bit compat mode.
+ */
+struct __snd_pcm_mmap_control64_buggy {
+ __pad_before_u32 __pad1;
+ __u32 appl_ptr;
+ __pad_before_u32 __pad2; /* SiC! here is the bug */
+ __pad_before_u32 __pad3;
+ __u32 avail_min;
+ __pad_after_uframe __pad4;
+};
+
+static int snd_pcm_ioctl_sync_ptr_buggy(struct snd_pcm_substream *substream,
+ struct snd_pcm_sync_ptr __user *_sync_ptr)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_sync_ptr sync_ptr;
+ struct __snd_pcm_mmap_control64_buggy *sync_cp;
+ volatile struct snd_pcm_mmap_status *status;
+ volatile struct snd_pcm_mmap_control *control;
+ int err;
+
+ memset(&sync_ptr, 0, sizeof(sync_ptr));
+ sync_cp = (struct __snd_pcm_mmap_control64_buggy *)&sync_ptr.c.control;
+ if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
+ return -EFAULT;
+ if (copy_from_user(sync_cp, &(_sync_ptr->c.control), sizeof(*sync_cp)))
+ return -EFAULT;
+ status = runtime->status;
+ control = runtime->control;
+ if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+ err = snd_pcm_hwsync(substream);
+ if (err < 0)
+ return err;
+ }
+ snd_pcm_stream_lock_irq(substream);
+ if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
+ err = pcm_lib_apply_appl_ptr(substream, sync_cp->appl_ptr);
+ if (err < 0) {
+ snd_pcm_stream_unlock_irq(substream);
+ return err;
+ }
+ } else {
+ sync_cp->appl_ptr = control->appl_ptr;
+ }
+ if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+ control->avail_min = sync_cp->avail_min;
+ else
+ sync_cp->avail_min = control->avail_min;
+ sync_ptr.s.status.state = status->state;
+ sync_ptr.s.status.hw_ptr = status->hw_ptr;
+ sync_ptr.s.status.tstamp = status->tstamp;
+ sync_ptr.s.status.suspended_state = status->suspended_state;
+ sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
+ snd_pcm_stream_unlock_irq(substream);
+ if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
+ return -EFAULT;
+ return 0;
+}
+
/*
*/
enum {
@@ -537,7 +607,7 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
if (in_x32_syscall())
return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
#endif /* CONFIG_X86_X32 */
- return snd_pcm_common_ioctl(file, substream, cmd, argp);
+ return snd_pcm_ioctl_sync_ptr_buggy(substream, argp);
case SNDRV_PCM_IOCTL_HW_REFINE32:
return snd_pcm_ioctl_hw_params_compat(substream, 1, argp);
case SNDRV_PCM_IOCTL_HW_PARAMS32:
diff --git a/sound/core/seq_device.c b/sound/core/seq_device.c
index 382275c5b193..7f3fd8eb016f 100644
--- a/sound/core/seq_device.c
+++ b/sound/core/seq_device.c
@@ -156,6 +156,8 @@ static int snd_seq_device_dev_free(struct snd_device *device)
struct snd_seq_device *dev = device->device_data;
cancel_autoload_drivers();
+ if (dev->private_free)
+ dev->private_free(dev);
put_device(&dev->dev);
return 0;
}
@@ -183,11 +185,7 @@ static int snd_seq_device_dev_disconnect(struct snd_device *device)
static void snd_seq_dev_release(struct device *dev)
{
- struct snd_seq_device *sdev = to_seq_dev(dev);
-
- if (sdev->private_free)
- sdev->private_free(sdev);
- kfree(sdev);
+ kfree(to_seq_dev(dev));
}
/*
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 062da7a7a586..f7bd6e2db085 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -421,8 +421,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
if (!full_reset)
goto skip_reset;
- /* clear STATESTS */
- snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
+ /* clear STATESTS if not in reset */
+ if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
+ snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
/* reset controller */
snd_hdac_bus_enter_link_reset(bus);
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 2523b23389e9..1c8bffc3eec6 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -298,29 +298,31 @@ int snd_hda_codec_configure(struct hda_codec *codec)
{
int err;
+ if (codec->configured)
+ return 0;
+
if (is_generic_config(codec))
codec->probe_id = HDA_CODEC_ID_GENERIC;
else
codec->probe_id = 0;
- err = snd_hdac_device_register(&codec->core);
- if (err < 0)
- return err;
+ if (!device_is_registered(&codec->core.dev)) {
+ err = snd_hdac_device_register(&codec->core);
+ if (err < 0)
+ return err;
+ }
if (!codec->preset)
codec_bind_module(codec);
if (!codec->preset) {
err = codec_bind_generic(codec);
if (err < 0) {
- codec_err(codec, "Unable to bind the codec\n");
- goto error;
+ codec_dbg(codec, "Unable to bind the codec\n");
+ return err;
}
}
+ codec->configured = 1;
return 0;
-
- error:
- snd_hdac_device_unregister(&codec->core);
- return err;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_configure);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a9ebefd60cf6..0c4a337c9fc0 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -791,6 +791,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
snd_array_free(&codec->nids);
remove_conn_list(codec);
snd_hdac_regmap_exit(&codec->core);
+ codec->configured = 0;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 7cd452831fd3..930ae4002a81 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -25,6 +25,7 @@
#include <sound/core.h>
#include <sound/initval.h>
#include "hda_controller.h"
+#include "hda_local.h"
#define CREATE_TRACE_POINTS
#include "hda_controller_trace.h"
@@ -1248,17 +1249,24 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
int azx_codec_configure(struct azx *chip)
{
struct hda_codec *codec, *next;
+ int success = 0;
- /* use _safe version here since snd_hda_codec_configure() deregisters
- * the device upon error and deletes itself from the bus list.
- */
- list_for_each_codec_safe(codec, next, &chip->bus) {
- snd_hda_codec_configure(codec);
+ list_for_each_codec(codec, &chip->bus) {
+ if (!snd_hda_codec_configure(codec))
+ success++;
}
- if (!azx_bus(chip)->num_codecs)
- return -ENODEV;
- return 0;
+ if (success) {
+ /* unregister failed codecs if any codec has been probed */
+ list_for_each_codec_safe(codec, next, &chip->bus) {
+ if (!codec->configured) {
+ codec_err(codec, "Unable to configure, disabling\n");
+ snd_hdac_device_unregister(&codec->core);
+ }
+ }
+ }
+
+ return success ? 0 : -ENODEV;
}
EXPORT_SYMBOL_GPL(azx_codec_configure);
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 3062f87380b1..f5bf295eb830 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -41,7 +41,7 @@
/* 24 unused */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
-/* 27 unused */
+#define AZX_DCAPS_RETRY_PROBE (1 << 27) /* retry probe if no codec is configured */
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 47777439961c..4d22e7adeee8 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -307,7 +307,8 @@ enum {
/* quirks for AMD SB */
#define AZX_DCAPS_PRESET_AMD_SB \
(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_AMD_WORKAROUND |\
- AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME |\
+ AZX_DCAPS_RETRY_PROBE)
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
@@ -1723,7 +1724,7 @@ static void azx_check_snoop_available(struct azx *chip)
static void azx_probe_work(struct work_struct *work)
{
- struct hda_intel *hda = container_of(work, struct hda_intel, probe_work);
+ struct hda_intel *hda = container_of(work, struct hda_intel, probe_work.work);
azx_probe_continue(&hda->chip);
}
@@ -1828,7 +1829,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
}
/* continue probing in work context as may trigger request module */
- INIT_WORK(&hda->probe_work, azx_probe_work);
+ INIT_DELAYED_WORK(&hda->probe_work, azx_probe_work);
*rchip = chip;
@@ -2142,7 +2143,7 @@ static int azx_probe(struct pci_dev *pci,
#endif
if (schedule_probe)
- schedule_work(&hda->probe_work);
+ schedule_delayed_work(&hda->probe_work, 0);
dev++;
if (chip->disabled)
@@ -2228,6 +2229,11 @@ static int azx_probe_continue(struct azx *chip)
int dev = chip->dev_index;
int err;
+ if (chip->disabled || hda->init_failed)
+ return -EIO;
+ if (hda->probe_retry)
+ goto probe_retry;
+
to_hda_bus(bus)->bus_probing = 1;
hda->probe_continued = 1;
@@ -2289,10 +2295,20 @@ static int azx_probe_continue(struct azx *chip)
#endif
}
#endif
+
+ probe_retry:
if (bus->codec_mask && !(probe_only[dev] & 1)) {
err = azx_codec_configure(chip);
- if (err < 0)
+ if (err) {
+ if ((chip->driver_caps & AZX_DCAPS_RETRY_PROBE) &&
+ ++hda->probe_retry < 60) {
+ schedule_delayed_work(&hda->probe_work,
+ msecs_to_jiffies(1000));
+ return 0; /* keep things up */
+ }
+ dev_err(chip->card->dev, "Cannot probe codecs, giving up\n");
goto out_free;
+ }
}
err = snd_card_register(chip->card);
@@ -2322,6 +2338,7 @@ out_free:
display_power(chip, false);
complete_all(&hda->probe_wait);
to_hda_bus(bus)->bus_probing = 0;
+ hda->probe_retry = 0;
return 0;
}
@@ -2347,7 +2364,7 @@ static void azx_remove(struct pci_dev *pci)
* device during cancel_work_sync() call.
*/
device_unlock(&pci->dev);
- cancel_work_sync(&hda->probe_work);
+ cancel_delayed_work_sync(&hda->probe_work);
device_lock(&pci->dev);
snd_card_free(card);
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index 3fb119f09040..0f39418f9328 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -14,7 +14,7 @@ struct hda_intel {
/* sync probing */
struct completion probe_wait;
- struct work_struct probe_work;
+ struct delayed_work probe_work;
/* card list (for power_save trigger) */
struct list_head list;
@@ -30,6 +30,8 @@ struct hda_intel {
unsigned int freed:1; /* resources already released */
bool need_i915_power:1; /* the hda controller needs i915 power */
+
+ int probe_retry; /* being probe-retry */
};
#endif
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4407f7da57c4..22d27b12c4e7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -526,6 +526,8 @@ static void alc_shutup_pins(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
+ case 0x10ec0256:
case 0x10ec0283:
case 0x10ec0286:
case 0x10ec0288:
@@ -2537,7 +2539,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
- SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED),
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
@@ -3528,7 +3531,8 @@ static void alc256_shutup(struct hda_codec *codec)
/* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
* when booting with headset plugged. So skip setting it for the codec alc257
*/
- if (codec->core.vendor_id != 0x10ec0257)
+ if (spec->codec_variant != ALC269_TYPE_ALC257 &&
+ spec->codec_variant != ALC269_TYPE_ALC256)
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
if (!spec->no_shutup_pins)
@@ -6449,6 +6453,24 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
/* for alc285_fixup_ideapad_s740_coef() */
#include "ideapad_s740_helper.c"
+static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ /*
+ * A certain other OS sets these coeffs to different values. On at least one TongFang
+ * barebone these settings might survive even a cold reboot. So to restore a clean slate the
+ * values are explicitly reset to default here. Without this, the external microphone is
+ * always in a plugged-in state, while the internal microphone is always in an unplugged
+ * state, breaking the ability to use the internal microphone.
+ */
+ alc_write_coef_idx(codec, 0x24, 0x0000);
+ alc_write_coef_idx(codec, 0x26, 0x0000);
+ alc_write_coef_idx(codec, 0x29, 0x3000);
+ alc_write_coef_idx(codec, 0x37, 0xfe05);
+ alc_write_coef_idx(codec, 0x45, 0x5089);
+}
+
enum {
ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
@@ -6663,7 +6685,8 @@ enum {
ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
- ALC287_FIXUP_13S_GEN2_SPEAKERS
+ ALC287_FIXUP_13S_GEN2_SPEAKERS,
+ ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8344,7 +8367,7 @@ static const struct hda_fixup alc269_fixups[] = {
.v.verbs = (const struct hda_verb[]) {
{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
{ 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
- { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
{ 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
{ 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
{ 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
@@ -8361,6 +8384,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE,
},
+ [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8452,6 +8479,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8789,6 +8819,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -10166,6 +10197,9 @@ enum {
ALC671_FIXUP_HP_HEADSET_MIC2,
ALC662_FIXUP_ACER_X2660G_HEADSET_MODE,
ALC662_FIXUP_ACER_NITRO_HEADSET_MODE,
+ ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
+ ALC668_FIXUP_HEADSET_MIC,
+ ALC668_FIXUP_MIC_DET_COEF,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -10549,6 +10583,29 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC662_FIXUP_USI_FUNC
},
+ [ALC668_FIXUP_ASUS_NO_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1b, 0x04a1112c },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC668_FIXUP_HEADSET_MIC
+ },
+ [ALC668_FIXUP_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_headset_mic,
+ .chained = true,
+ .chain_id = ALC668_FIXUP_MIC_DET_COEF
+ },
+ [ALC668_FIXUP_MIC_DET_COEF] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x15 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0d60 },
+ {}
+ },
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10584,6 +10641,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+ SND_PCI_QUIRK(0x1043, 0x185d, "ASUS G551JW", ALC668_FIXUP_ASUS_NO_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 3d5848d5481b..53ebabf42472 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -2450,6 +2450,8 @@ static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
err = scarlett2_usb_get_config(mixer,
SCARLETT2_CONFIG_TALKBACK_MAP,
1, &bitmap);
+ if (err < 0)
+ return err;
for (i = 0; i < num_mixes; i++, bitmap >>= 1)
private->talkback_map[i] = bitmap & 1;
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index e03043f7dad3..de18fff69280 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -78,6 +78,48 @@
{ USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) },
/*
+ * Creative Technology, Ltd Live! Cam Sync HD [VF0770]
+ * The device advertises 8 formats, but only a rate of 48kHz is honored by the
+ * hardware and 24 bits give chopped audio, so only report the one working
+ * combination.
+ */
+{
+ USB_DEVICE(0x041e, 0x4095),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .fmt_bits = 16,
+ .iface = 3,
+ .altsetting = 4,
+ .altset_idx = 4,
+ .endpoint = 0x82,
+ .ep_attr = 0x05,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) { 48000 },
+ },
+ },
+ {
+ .ifnum = -1
+ },
+ },
+ },
+},
+
+/*
* HP Wireless Audio
* When not ignored, causes instability issues for some users, forcing them to
* skip the entire module.
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6ee6d24c847f..889c855addfc 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1900,6 +1900,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_CTL_MSG_DELAY | QUIRK_FLAG_IFACE_DELAY),
VENDOR_FLG(0x07fd, /* MOTU */
QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+ QUIRK_FLAG_VALIDATE_RATES),
VENDOR_FLG(0x152a, /* Thesycon devices */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x1de7, /* Phoenix Audio */
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index bc821056aba9..0893436cc09f 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
sec = find_section_by_name(elf, ".altinstructions");
if (!sec) {
sec = elf_create_section(elf, ".altinstructions",
- SHF_ALLOC, size, 0);
+ SHF_ALLOC, 0, 0);
if (!sec) {
WARN_ELF("elf_create_section");
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e5947fbb9e7a..06b5c164ae93 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -292,7 +292,7 @@ static int decode_instructions(struct objtool_file *file)
!strcmp(sec->name, ".entry.text"))
sec->noinstr = true;
- for (offset = 0; offset < sec->len; offset += insn->len) {
+ for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
insn = malloc(sizeof(*insn));
if (!insn) {
WARN("malloc failed");
@@ -307,7 +307,7 @@ static int decode_instructions(struct objtool_file *file)
insn->offset = offset;
ret = arch_decode_instruction(file->elf, sec, offset,
- sec->len - offset,
+ sec->sh.sh_size - offset,
&insn->len, &insn->type,
&insn->immediate,
&insn->stack_ops);
@@ -349,9 +349,9 @@ static struct instruction *find_last_insn(struct objtool_file *file,
{
struct instruction *insn = NULL;
unsigned int offset;
- unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
+ unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
- for (offset = sec->len - 1; offset >= end && !insn; offset--)
+ for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
insn = find_insn(file, sec, offset);
return insn;
@@ -389,7 +389,7 @@ static int add_dead_ends(struct objtool_file *file)
insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (insn)
insn = list_prev_entry(insn, list);
- else if (reloc->addend == reloc->sym->sec->len) {
+ else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find unreachable insn at %s+0x%x",
@@ -424,7 +424,7 @@ reachable:
insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (insn)
insn = list_prev_entry(insn, list);
- else if (reloc->addend == reloc->sym->sec->len) {
+ else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find reachable insn at %s+0x%x",
@@ -1561,14 +1561,14 @@ static int read_unwind_hints(struct objtool_file *file)
return -1;
}
- if (sec->len % sizeof(struct unwind_hint)) {
+ if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
WARN("struct unwind_hint size mismatch");
return -1;
}
file->hints = true;
- for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+ for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
hint = (struct unwind_hint *)sec->data->d_buf + i;
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 8676c7598728..b18f0055b50b 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -286,10 +286,9 @@ static int read_sections(struct elf *elf)
return -1;
}
}
- sec->len = sec->sh.sh_size;
if (sec->sh.sh_flags & SHF_EXECINSTR)
- elf->text_size += sec->len;
+ elf->text_size += sec->sh.sh_size;
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(section, &sec->hash, sec->idx);
@@ -734,8 +733,8 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
data->d_size = strlen(str) + 1;
data->d_align = 1;
- len = strtab->len;
- strtab->len += data->d_size;
+ len = strtab->sh.sh_size;
+ strtab->sh.sh_size += data->d_size;
strtab->changed = true;
return len;
@@ -790,9 +789,9 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
data->d_align = 1;
data->d_type = ELF_T_SYM;
- sym->idx = symtab->len / sizeof(sym->sym);
+ sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
- symtab->len += data->d_size;
+ symtab->sh.sh_size += data->d_size;
symtab->changed = true;
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -814,7 +813,7 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
data->d_align = 4;
data->d_type = ELF_T_WORD;
- symtab_shndx->len += 4;
+ symtab_shndx->sh.sh_size += 4;
symtab_shndx->changed = true;
}
@@ -855,7 +854,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
}
sec->idx = elf_ndxscn(s);
- sec->len = size;
sec->changed = true;
sec->data = elf_newdata(s);
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index e34395047530..075d8291b854 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -38,7 +38,6 @@ struct section {
Elf_Data *data;
char *name;
int idx;
- unsigned int len;
bool changed, text, rodata, noinstr;
};
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index dc9b7dd314b0..b5865e2450cb 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -204,7 +204,7 @@ int orc_create(struct objtool_file *file)
/* Add a section terminator */
if (!empty) {
- orc_list_add(&orc_list, &null, sec, sec->len);
+ orc_list_add(&orc_list, &null, sec, sec->sh.sh_size);
nr++;
}
}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index f1428e32a505..06c3eacab3d5 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -58,22 +58,11 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
{
}
-static bool reloc2sec_off(struct reloc *reloc, struct section **sec, unsigned long *off)
+static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
+ unsigned long *off)
{
- switch (reloc->sym->type) {
- case STT_FUNC:
- *sec = reloc->sym->sec;
- *off = reloc->sym->offset + reloc->addend;
- return true;
-
- case STT_SECTION:
- *sec = reloc->sym->sec;
- *off = reloc->addend;
- return true;
-
- default:
- return false;
- }
+ *sec = reloc->sym->sec;
+ *off = reloc->sym->offset + reloc->addend;
}
static int get_alt_entry(struct elf *elf, struct special_entry *entry,
@@ -109,13 +98,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
return -1;
}
- if (!reloc2sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off)) {
- WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
- sec, offset + entry->orig,
- orig_reloc->sym->type,
- orig_reloc->sym->name);
- return -1;
- }
+
+ reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
if (!entry->group || alt->new_len) {
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
@@ -133,13 +117,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
if (arch_is_retpoline(new_reloc->sym))
return 1;
- if (!reloc2sec_off(new_reloc, &alt->new_sec, &alt->new_off)) {
- WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
- sec, offset + entry->new,
- new_reloc->sym->type,
- new_reloc->sym->name);
- return -1;
- }
+ reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
/* _ASM_EXTABLE_EX hack */
if (alt->new_off >= 0x7ffffff0)
@@ -181,13 +159,13 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
if (!sec)
continue;
- if (sec->len % entry->size != 0) {
+ if (sec->sh.sh_size % entry->size != 0) {
WARN("%s size not a multiple of %d",
sec->name, entry->size);
return -1;
}
- nr_entries = sec->len / entry->size;
+ nr_entries = sec->sh.sh_size / entry->size;
for (idx = 0; idx < nr_entries; idx++) {
alt = malloc(sizeof(*alt));
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 5a931456e718..ac35c61f65f5 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
from collections import namedtuple
from enum import Enum, auto
-from typing import Iterable
+from typing import Iterable, Sequence
import kunit_config
import kunit_json
@@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
exec_result.elapsed_time))
return parse_result
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+ '--json': 'stdout',
+ '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+ def massage_arg(arg: str) -> str:
+ if arg not in pseudo_bool_flag_defaults:
+ return arg
+ return f'{arg}={pseudo_bool_flag_defaults[arg]}'
+ return list(map(massage_arg, argv))
+
def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
@@ -303,7 +323,7 @@ def main(argv, linux=None):
help='Specifies the file to read results from.',
type=str, nargs='?', metavar='input_file')
- cli_args = parser.parse_args(argv)
+ cli_args = parser.parse_args(massage_argv(argv))
if get_kernel_root_path():
os.chdir(get_kernel_root_path())
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..1edcc8373b4e 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ def test_run_raw_output_does_not_take_positional_args(self):
+ # --raw_output is a string flag, but we don't want it to consume
+ # any positional arguments, only ones after an '='
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
diff --git a/tools/testing/selftests/net/ioam6.sh b/tools/testing/selftests/net/ioam6.sh
index 90700303d8a9..a2b9fad5a9a6 100755
--- a/tools/testing/selftests/net/ioam6.sh
+++ b/tools/testing/selftests/net/ioam6.sh
@@ -536,10 +536,25 @@ out_bits()
do
ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
trace prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \
- dev veth0
+ dev veth0 &>/dev/null
+
+ local cmd_res=$?
+ local descr="${desc/<n>/$i}"
- run_test "out_bit$i" "${desc/<n>/$i} ($1 mode)" ioam-node-alpha \
+ if [[ $i -ge 12 && $i -le 21 ]]
+ then
+ if [ $cmd_res != 0 ]
+ then
+ npassed=$((npassed+1))
+ log_test_passed "$descr"
+ else
+ nfailed=$((nfailed+1))
+ log_test_failed "$descr"
+ fi
+ else
+ run_test "out_bit$i" "$descr ($1 mode)" ioam-node-alpha \
ioam-node-beta db01::2 db01::1 veth0 ${bit2type[$i]} 123
+ fi
done
[ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
@@ -632,7 +647,7 @@ in_bits()
[ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
[ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
- for i in {0..22}
+ for i in {0..11} {22..22}
do
ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
trace prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \
diff --git a/tools/testing/selftests/net/ioam6_parser.c b/tools/testing/selftests/net/ioam6_parser.c
index d376cb2c383c..8f6997d35816 100644
--- a/tools/testing/selftests/net/ioam6_parser.c
+++ b/tools/testing/selftests/net/ioam6_parser.c
@@ -94,16 +94,6 @@ enum {
TEST_OUT_BIT9,
TEST_OUT_BIT10,
TEST_OUT_BIT11,
- TEST_OUT_BIT12,
- TEST_OUT_BIT13,
- TEST_OUT_BIT14,
- TEST_OUT_BIT15,
- TEST_OUT_BIT16,
- TEST_OUT_BIT17,
- TEST_OUT_BIT18,
- TEST_OUT_BIT19,
- TEST_OUT_BIT20,
- TEST_OUT_BIT21,
TEST_OUT_BIT22,
TEST_OUT_FULL_SUPP_TRACE,
@@ -125,16 +115,6 @@ enum {
TEST_IN_BIT9,
TEST_IN_BIT10,
TEST_IN_BIT11,
- TEST_IN_BIT12,
- TEST_IN_BIT13,
- TEST_IN_BIT14,
- TEST_IN_BIT15,
- TEST_IN_BIT16,
- TEST_IN_BIT17,
- TEST_IN_BIT18,
- TEST_IN_BIT19,
- TEST_IN_BIT20,
- TEST_IN_BIT21,
TEST_IN_BIT22,
TEST_IN_FULL_SUPP_TRACE,
@@ -199,30 +179,6 @@ static int check_ioam_header(int tid, struct ioam6_trace_hdr *ioam6h,
ioam6h->nodelen != 2 ||
ioam6h->remlen;
- case TEST_OUT_BIT12:
- case TEST_IN_BIT12:
- case TEST_OUT_BIT13:
- case TEST_IN_BIT13:
- case TEST_OUT_BIT14:
- case TEST_IN_BIT14:
- case TEST_OUT_BIT15:
- case TEST_IN_BIT15:
- case TEST_OUT_BIT16:
- case TEST_IN_BIT16:
- case TEST_OUT_BIT17:
- case TEST_IN_BIT17:
- case TEST_OUT_BIT18:
- case TEST_IN_BIT18:
- case TEST_OUT_BIT19:
- case TEST_IN_BIT19:
- case TEST_OUT_BIT20:
- case TEST_IN_BIT20:
- case TEST_OUT_BIT21:
- case TEST_IN_BIT21:
- return ioam6h->overflow ||
- ioam6h->nodelen ||
- ioam6h->remlen != 1;
-
case TEST_OUT_BIT22:
case TEST_IN_BIT22:
return ioam6h->overflow ||
@@ -326,6 +282,66 @@ static int check_ioam6_data(__u8 **p, struct ioam6_trace_hdr *ioam6h,
*p += sizeof(__u32);
}
+ if (ioam6h->type.bit12) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit13) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit14) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit15) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit16) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit17) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit18) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit19) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit20) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit21) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
if (ioam6h->type.bit22) {
len = cnf.sc_data ? strlen(cnf.sc_data) : 0;
aligned = cnf.sc_data ? __ALIGN_KERNEL(len, 4) : 0;
@@ -455,26 +471,6 @@ static int str2id(const char *tname)
return TEST_OUT_BIT10;
if (!strcmp("out_bit11", tname))
return TEST_OUT_BIT11;
- if (!strcmp("out_bit12", tname))
- return TEST_OUT_BIT12;
- if (!strcmp("out_bit13", tname))
- return TEST_OUT_BIT13;
- if (!strcmp("out_bit14", tname))
- return TEST_OUT_BIT14;
- if (!strcmp("out_bit15", tname))
- return TEST_OUT_BIT15;
- if (!strcmp("out_bit16", tname))
- return TEST_OUT_BIT16;
- if (!strcmp("out_bit17", tname))
- return TEST_OUT_BIT17;
- if (!strcmp("out_bit18", tname))
- return TEST_OUT_BIT18;
- if (!strcmp("out_bit19", tname))
- return TEST_OUT_BIT19;
- if (!strcmp("out_bit20", tname))
- return TEST_OUT_BIT20;
- if (!strcmp("out_bit21", tname))
- return TEST_OUT_BIT21;
if (!strcmp("out_bit22", tname))
return TEST_OUT_BIT22;
if (!strcmp("out_full_supp_trace", tname))
@@ -509,26 +505,6 @@ static int str2id(const char *tname)
return TEST_IN_BIT10;
if (!strcmp("in_bit11", tname))
return TEST_IN_BIT11;
- if (!strcmp("in_bit12", tname))
- return TEST_IN_BIT12;
- if (!strcmp("in_bit13", tname))
- return TEST_IN_BIT13;
- if (!strcmp("in_bit14", tname))
- return TEST_IN_BIT14;
- if (!strcmp("in_bit15", tname))
- return TEST_IN_BIT15;
- if (!strcmp("in_bit16", tname))
- return TEST_IN_BIT16;
- if (!strcmp("in_bit17", tname))
- return TEST_IN_BIT17;
- if (!strcmp("in_bit18", tname))
- return TEST_IN_BIT18;
- if (!strcmp("in_bit19", tname))
- return TEST_IN_BIT19;
- if (!strcmp("in_bit20", tname))
- return TEST_IN_BIT20;
- if (!strcmp("in_bit21", tname))
- return TEST_IN_BIT21;
if (!strcmp("in_bit22", tname))
return TEST_IN_BIT22;
if (!strcmp("in_full_supp_trace", tname))
@@ -606,16 +582,6 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
[TEST_OUT_BIT9] = check_ioam_header_and_data,
[TEST_OUT_BIT10] = check_ioam_header_and_data,
[TEST_OUT_BIT11] = check_ioam_header_and_data,
- [TEST_OUT_BIT12] = check_ioam_header,
- [TEST_OUT_BIT13] = check_ioam_header,
- [TEST_OUT_BIT14] = check_ioam_header,
- [TEST_OUT_BIT15] = check_ioam_header,
- [TEST_OUT_BIT16] = check_ioam_header,
- [TEST_OUT_BIT17] = check_ioam_header,
- [TEST_OUT_BIT18] = check_ioam_header,
- [TEST_OUT_BIT19] = check_ioam_header,
- [TEST_OUT_BIT20] = check_ioam_header,
- [TEST_OUT_BIT21] = check_ioam_header,
[TEST_OUT_BIT22] = check_ioam_header_and_data,
[TEST_OUT_FULL_SUPP_TRACE] = check_ioam_header_and_data,
[TEST_IN_UNDEF_NS] = check_ioam_header,
@@ -633,16 +599,6 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
[TEST_IN_BIT9] = check_ioam_header_and_data,
[TEST_IN_BIT10] = check_ioam_header_and_data,
[TEST_IN_BIT11] = check_ioam_header_and_data,
- [TEST_IN_BIT12] = check_ioam_header,
- [TEST_IN_BIT13] = check_ioam_header,
- [TEST_IN_BIT14] = check_ioam_header,
- [TEST_IN_BIT15] = check_ioam_header,
- [TEST_IN_BIT16] = check_ioam_header,
- [TEST_IN_BIT17] = check_ioam_header,
- [TEST_IN_BIT18] = check_ioam_header,
- [TEST_IN_BIT19] = check_ioam_header,
- [TEST_IN_BIT20] = check_ioam_header,
- [TEST_IN_BIT21] = check_ioam_header,
[TEST_IN_BIT22] = check_ioam_header_and_data,
[TEST_IN_FULL_SUPP_TRACE] = check_ioam_header_and_data,
[TEST_FWD_FULL_SUPP_TRACE] = check_ioam_header_and_data,