aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--.clippy.toml2
-rw-r--r--.mailmap4
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon4
-rw-r--r--Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml97
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml19
-rw-r--r--Documentation/netlink/specs/ethtool.yaml4
-rw-r--r--MAINTAINERS77
-rw-r--r--Makefile9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi3
-rw-r--r--arch/arm64/boot/dts/arm/morello.dtsi22
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi25
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi26
-rw-r--r--arch/arm64/boot/dts/freescale/imx95.dtsi8
-rw-r--r--arch/arm64/boot/dts/st/stm32mp211.dtsi8
-rw-r--r--arch/arm64/boot/dts/st/stm32mp231.dtsi9
-rw-r--r--arch/arm64/boot/dts/st/stm32mp251.dtsi9
-rw-r--r--arch/arm64/include/asm/el2_setup.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/vdso/gettimeofday.h13
-rw-r--r--arch/arm64/kernel/cpufeature.c9
-rw-r--r--arch/arm64/kernel/proton-pack.c2
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h13
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c2
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c36
-rw-r--r--arch/arm64/kvm/mmu.c13
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/mips/include/asm/idle.h5
-rw-r--r--arch/mips/include/asm/ptrace.h3
-rw-r--r--arch/mips/kernel/genex.S71
-rw-r--r--arch/mips/kernel/idle.c7
-rw-r--r--arch/mips/kernel/smp-cps.c4
-rw-r--r--arch/mips/kernel/traps.c10
-rw-r--r--arch/parisc/math-emu/driver.c16
-rwxr-xr-xarch/powerpc/boot/wrapper6
-rw-r--r--arch/powerpc/kernel/module_64.c4
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c20
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/riscv/kernel/process.c6
-rw-r--r--arch/riscv/kernel/traps.c64
-rw-r--r--arch/riscv/kernel/traps_misaligned.c19
-rw-r--r--arch/riscv/kvm/vcpu.c2
-rw-r--r--arch/s390/configs/debug_defconfig28
-rw-r--r--arch/s390/configs/defconfig24
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/kernel/entry.S3
-rw-r--r--arch/s390/pci/pci_clp.c2
-rw-r--r--arch/um/include/asm/uaccess.h2
-rw-r--r--arch/um/kernel/trap.c26
-rw-r--r--arch/um/kernel/um_arch.c2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/boot/bioscall.S4
-rw-r--r--arch/x86/boot/boot.h6
-rw-r--r--arch/x86/boot/compressed/Makefile10
-rw-r--r--arch/x86/boot/compressed/head_64.S1
-rw-r--r--arch/x86/boot/compressed/mem.c5
-rw-r--r--arch/x86/boot/compressed/misc.c1
-rw-r--r--arch/x86/boot/compressed/misc.h8
-rw-r--r--arch/x86/boot/compressed/pgtable.h18
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c1
-rw-r--r--arch/x86/boot/compressed/sev-handle-vc.c134
-rw-r--r--arch/x86/boot/compressed/sev.c194
-rw-r--r--arch/x86/boot/compressed/sev.h23
-rw-r--r--arch/x86/boot/compressed/string.c8
-rw-r--r--arch/x86/boot/copy.S8
-rw-r--r--arch/x86/boot/header.S2
-rw-r--r--arch/x86/boot/startup/Makefile30
-rw-r--r--arch/x86/boot/startup/efi-mixed.S (renamed from drivers/firmware/efi/libstub/x86-mixed.S)0
-rw-r--r--arch/x86/boot/startup/gdt_idt.c71
-rw-r--r--arch/x86/boot/startup/la57toggle.S (renamed from arch/x86/boot/compressed/la57toggle.S)1
-rw-r--r--arch/x86/boot/startup/map_kernel.c223
-rw-r--r--arch/x86/boot/startup/sev-shared.c (renamed from arch/x86/coco/sev/shared.c)894
-rw-r--r--arch/x86/boot/startup/sev-startup.c368
-rw-r--r--arch/x86/boot/startup/sme.c (renamed from arch/x86/mm/mem_encrypt_identity.c)28
-rw-r--r--arch/x86/boot/string.c2
-rw-r--r--arch/x86/boot/video.c2
-rw-r--r--arch/x86/coco/core.c2
-rw-r--r--arch/x86/coco/sev/Makefile23
-rw-r--r--arch/x86/coco/sev/core.c1713
-rw-r--r--arch/x86/coco/sev/sev-nmi.c108
-rw-r--r--arch/x86/coco/sev/vc-handle.c1061
-rw-r--r--arch/x86/coco/sev/vc-shared.c504
-rw-r--r--arch/x86/events/core.c11
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/ds.c21
-rw-r--r--arch/x86/events/perf_event.h11
-rw-r--r--arch/x86/include/asm/alternative.h11
-rw-r--r--arch/x86/include/asm/arch_hweight.h6
-rw-r--r--arch/x86/include/asm/asm.h5
-rw-r--r--arch/x86/include/asm/bitops.h7
-rw-r--r--arch/x86/include/asm/boot.h10
-rw-r--r--arch/x86/include/asm/coco.h2
-rw-r--r--arch/x86/include/asm/debugreg.h12
-rw-r--r--arch/x86/include/asm/inat.h6
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/kexec.h7
-rw-r--r--arch/x86/include/asm/linkage.h10
-rw-r--r--arch/x86/include/asm/mem_encrypt.h2
-rw-r--r--arch/x86/include/asm/microcode.h2
-rw-r--r--arch/x86/include/asm/mmu.h4
-rw-r--r--arch/x86/include/asm/mmu_context.h15
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/page_types.h4
-rw-r--r--arch/x86/include/asm/percpu.h20
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/setup.h1
-rw-r--r--arch/x86/include/asm/sev-internal.h105
-rw-r--r--arch/x86/include/asm/sev.h59
-rw-r--r--arch/x86/include/asm/special_insns.h21
-rw-r--r--arch/x86/include/asm/string_32.h15
-rw-r--r--arch/x86/include/asm/text-patching.h29
-rw-r--r--arch/x86/include/asm/uaccess_64.h6
-rw-r--r--arch/x86/include/asm/vdso/processor.h8
-rw-r--r--arch/x86/kernel/alternative.c439
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/callthunks.c6
-rw-r--r--arch/x86/kernel/cpu/bugs.c1181
-rw-r--r--arch/x86/kernel/cpu/common.c18
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c6
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c58
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/internal.h1
-rw-r--r--arch/x86/kernel/early_printk.c9
-rw-r--r--arch/x86/kernel/ftrace.c18
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c277
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/head_64.S8
-rw-r--r--arch/x86/kernel/jump_label.c6
-rw-r--r--arch/x86/kernel/kprobes/core.c4
-rw-r--r--arch/x86/kernel/kprobes/opt.c6
-rw-r--r--arch/x86/kernel/machine_kexec_64.c48
-rw-r--r--arch/x86/kernel/module.c2
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S6
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S250
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/static_call.c2
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/kernel/vmlinux.lds.S14
-rw-r--r--arch/x86/kvm/mmu.h3
-rw-r--r--arch/x86/kvm/mmu/mmu.c70
-rw-r--r--arch/x86/kvm/smm.c1
-rw-r--r--arch/x86/kvm/svm/sev.c32
-rw-r--r--arch/x86/kvm/svm/svm.c75
-rw-r--r--arch/x86/kvm/svm/svm.h2
-rw-r--r--arch/x86/kvm/vmx/vmx.c8
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/insn-eval.c13
-rw-r--r--arch/x86/lib/insn.c7
-rw-r--r--arch/x86/lib/iomem.c2
-rw-r--r--arch/x86/lib/memcpy_64.S1
-rw-r--r--arch/x86/lib/memset_64.S1
-rw-r--r--arch/x86/lib/retpoline.S2
-rw-r--r--arch/x86/lib/string_32.c17
-rw-r--r--arch/x86/lib/strstr_32.c6
-rw-r--r--arch/x86/lib/usercopy_32.c18
-rw-r--r--arch/x86/lib/x86-opcode-map.txt56
-rw-r--r--arch/x86/mm/Makefile6
-rw-r--r--arch/x86/mm/init.c19
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c2
-rw-r--r--arch/x86/mm/pgtable.c1
-rw-r--r--arch/x86/mm/tlb.c104
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c8
-rw-r--r--arch/x86/platform/pvh/head.S3
-rw-r--r--arch/x86/power/hibernate_asm_32.S3
-rw-r--r--arch/x86/power/hibernate_asm_64.S3
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk7
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_32.h2
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_64.h2
-rw-r--r--block/blk.h3
-rw-r--r--block/ioprio.c6
-rw-r--r--crypto/scompress.c5
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c4
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h2
-rw-r--r--drivers/accel/ivpu/ivpu_job.c35
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c18
-rw-r--r--drivers/base/module.c13
-rw-r--r--drivers/base/platform.c6
-rw-r--r--drivers/block/loop.c23
-rw-r--r--drivers/block/ublk_drv.c62
-rw-r--r--drivers/bluetooth/btintel_pcie.c57
-rw-r--r--drivers/bluetooth/btmtksdio.c12
-rw-r--r--drivers/bluetooth/btusb.c101
-rw-r--r--drivers/clocksource/i8253.c4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c15
-rw-r--r--drivers/cpufreq/cpufreq.c22
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/cpufreq/freq_table.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c3
-rw-r--r--drivers/edac/altera_edac.c9
-rw-r--r--drivers/edac/altera_edac.h2
-rw-r--r--drivers/firmware/arm_ffa/driver.c3
-rw-r--r--drivers/firmware/arm_scmi/bus.c3
-rw-r--r--drivers/firmware/arm_scmi/driver.c13
-rw-r--r--drivers/firmware/cirrus/Kconfig5
-rw-r--r--drivers/firmware/efi/libstub/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_file.c6
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c14
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c25
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c28
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c7
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c14
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c22
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c11
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c2
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c14
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h8
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c8
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c3
-rw-r--r--drivers/gpu/nova-core/gpu.rs2
-rw-r--r--drivers/hv/hv_common.c10
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/vmbus_drv.c109
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/iio/accel/adis16201.c4
-rw-r--r--drivers/iio/accel/adxl355_core.c2
-rw-r--r--drivers/iio/accel/adxl367.c10
-rw-r--r--drivers/iio/accel/fxls8962af-core.c7
-rw-r--r--drivers/iio/adc/ad7266.c2
-rw-r--r--drivers/iio/adc/ad7380.c32
-rw-r--r--drivers/iio/adc/ad7606.c11
-rw-r--r--drivers/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/iio/adc/ad7768-1.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c17
-rw-r--r--drivers/iio/chemical/pms7003.c5
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c4
-rw-r--r--drivers/iio/imu/adis16550.c2
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c7
-rw-r--r--drivers/iio/light/hid-sensor-prox.c22
-rw-r--r--drivers/iio/light/opt3001.c5
-rw-r--r--drivers/iio/pressure/mprls0025pa.h17
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/input/joystick/xpad.c49
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c4
-rw-r--r--drivers/input/misc/hisi_powerkey.c2
-rw-r--r--drivers/input/misc/sparcspkr.c22
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/touchscreen/cyttsp5.c7
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c7
-rw-r--r--drivers/iommu/amd/init.c8
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c6
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c23
-rw-r--r--drivers/iommu/intel/iommu.c35
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c3
-rw-r--r--drivers/md/dm-bufio.c9
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-table.c11
-rw-r--r--drivers/media/cec/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/Kconfig5
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Kconfig1
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig3
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c12
-rw-r--r--drivers/net/can/m_can/m_can.c3
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c42
-rw-r--r--drivers/net/dsa/b53/b53_common.c207
-rw-r--r--drivers/net/dsa/b53/b53_priv.h3
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c5
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c10
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c82
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c76
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c37
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c197
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c142
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c14
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c8
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c15
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c16
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c36
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c3
-rw-r--r--drivers/net/usb/rndis_host.c16
-rw-r--r--drivers/net/virtio_net.c23
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c2
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c247
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c15
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c1
-rw-r--r--drivers/nvme/host/Kconfig1
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/nvme/host/tcp.c31
-rw-r--r--drivers/nvme/target/Kconfig1
-rw-r--r--drivers/nvme/target/auth.c3
-rw-r--r--drivers/nvme/target/tcp.c3
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c6
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c159
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c4
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c7
-rw-r--r--drivers/platform/x86/asus-wmi.c11
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c14
-rw-r--r--drivers/platform/x86/ideapad-laptop.c16
-rw-r--r--drivers/platform/x86/intel/hid.c21
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c13
-rw-r--r--drivers/ptp/ptp_ocp.c52
-rw-r--r--drivers/s390/block/Kconfig3
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/soundwire/intel_auxdevice.c36
-rw-r--r--drivers/spi/spi-mem.c6
-rw-r--r--drivers/spi/spi-qpic-snand.c5
-rw-r--r--drivers/spi/spi-stm32-ospi.c4
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c14
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c1
-rw-r--r--drivers/ufs/core/ufshcd.c2
-rw-r--r--drivers/uio/uio_hv_generic.c39
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c31
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h6
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c3
-rw-r--r--drivers/usb/cdns3/core.h3
-rw-r--r--drivers/usb/class/usbtmc.c59
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/dwc3/gadget.c60
-rw-r--r--drivers/usb/gadget/composite.c12
-rw-r--r--drivers/usb/gadget/function/f_ecm.c7
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c4
-rw-r--r--drivers/usb/host/uhci-platform.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c19
-rw-r--r--drivers/usb/host/xhci-dbgcap.h3
-rw-r--r--drivers/usb/host/xhci-ring.c19
-rw-r--r--drivers/usb/host/xhci-tegra.c3
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c10
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c2
-rw-r--r--drivers/usb/typec/ucsi/displayport.c21
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c34
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h2
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c12
-rw-r--r--drivers/xen/swiotlb-xen.c1
-rw-r--r--drivers/xen/xenbus/xenbus.h2
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c9
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c18
-rw-r--r--fs/bcachefs/alloc_foreground.c22
-rw-r--r--fs/bcachefs/btree_gc.c27
-rw-r--r--fs/bcachefs/btree_io.c9
-rw-r--r--fs/bcachefs/btree_journal_iter.c2
-rw-r--r--fs/bcachefs/btree_update_interior.c49
-rw-r--r--fs/bcachefs/buckets.c15
-rw-r--r--fs/bcachefs/buckets.h1
-rw-r--r--fs/bcachefs/dirent.c4
-rw-r--r--fs/bcachefs/disk_groups.c25
-rw-r--r--fs/bcachefs/ec.c4
-rw-r--r--fs/bcachefs/ec.h1
-rw-r--r--fs/bcachefs/errcode.h2
-rw-r--r--fs/bcachefs/error.c4
-rw-r--r--fs/bcachefs/extents.c5
-rw-r--r--fs/bcachefs/fs-io.c44
-rw-r--r--fs/bcachefs/fs.c24
-rw-r--r--fs/bcachefs/io_write.c21
-rw-r--r--fs/bcachefs/journal_io.c6
-rw-r--r--fs/bcachefs/journal_reclaim.c7
-rw-r--r--fs/bcachefs/move.c3
-rw-r--r--fs/bcachefs/namei.c3
-rw-r--r--fs/bcachefs/sb-downgrade.c4
-rw-r--r--fs/bcachefs/sb-errors_format.h13
-rw-r--r--fs/bcachefs/sb-members.c6
-rw-r--r--fs/bcachefs/sb-members.h13
-rw-r--r--fs/bcachefs/subvolume.c5
-rw-r--r--fs/bcachefs/super.c51
-rw-r--r--fs/bcachefs/thread_with_file.c4
-rw-r--r--fs/bcachefs/xattr_format.h8
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/extent_io.c6
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/inode.c13
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/volumes.c91
-rw-r--r--fs/erofs/fileio.c4
-rw-r--r--fs/erofs/super.c1
-rw-r--r--fs/erofs/zdata.c31
-rw-r--r--fs/namespace.c17
-rw-r--r--fs/nilfs2/the_nilfs.c3
-rw-r--r--fs/notify/fanotify/fanotify_user.c7
-rw-r--r--fs/ocfs2/alloc.c1
-rw-r--r--fs/ocfs2/journal.c80
-rw-r--r--fs/ocfs2/journal.h1
-rw-r--r--fs/ocfs2/ocfs2.h17
-rw-r--r--fs/ocfs2/quota_local.c9
-rw-r--r--fs/ocfs2/suballoc.c38
-rw-r--r--fs/ocfs2/suballoc.h1
-rw-r--r--fs/ocfs2/super.c3
-rw-r--r--fs/pnode.c17
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/smb/client/cached_dir.c10
-rw-r--r--fs/smb/client/cifspdu.h5
-rw-r--r--fs/smb/client/cifsproto.h4
-rw-r--r--fs/smb/client/cifssmb.c57
-rw-r--r--fs/smb/client/connect.c23
-rw-r--r--fs/smb/client/file.c6
-rw-r--r--fs/smb/client/fs_context.c25
-rw-r--r--fs/smb/client/fs_context.h47
-rw-r--r--fs/smb/client/smb1ops.c223
-rw-r--r--fs/smb/client/smb2inode.c2
-rw-r--r--fs/smb/client/smb2pdu.c9
-rw-r--r--fs/smb/server/auth.c14
-rw-r--r--fs/smb/server/mgmt/user_session.c20
-rw-r--r--fs/smb/server/mgmt/user_session.h1
-rw-r--r--fs/smb/server/oplock.c7
-rw-r--r--fs/smb/server/smb2pdu.c23
-rw-r--r--fs/smb/server/vfs.c7
-rw-r--r--fs/smb/server/vfs_cache.c33
-rw-r--r--fs/userfaultfd.c28
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/drm/ttm/ttm_backup.h18
-rw-r--r--include/drm/ttm/ttm_tt.h2
-rw-r--r--include/linux/cpufreq.h83
-rw-r--r--include/linux/fsnotify_backend.h15
-rw-r--r--include/linux/hyperv.h6
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/iommu.h8
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/timekeeper_internal.h8
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/net/bluetooth/hci.h4
-rw-r--r--include/net/bluetooth/hci_core.h20
-rw-r--r--include/net/bluetooth/hci_sync.h3
-rw-r--r--include/net/netdev_queues.h6
-rw-r--r--include/net/xdp_sock.h3
-rw-r--r--include/net/xsk_buff_pool.h4
-rw-r--r--include/sound/soc_sdw_utils.h1
-rw-r--r--include/sound/ump_convert.h2
-rw-r--r--include/trace/events/btrfs.h2
-rw-r--r--include/uapi/linux/bpf.h3
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h5
-rw-r--r--init/Kconfig3
-rw-r--r--io_uring/fdinfo.c4
-rw-r--r--io_uring/io_uring.c58
-rw-r--r--io_uring/sqpoll.c2
-rw-r--r--kernel/irq/msi.c2
-rw-r--r--kernel/params.c51
-rw-r--r--kernel/time/timekeeping.c50
-rw-r--r--kernel/time/vsyscall.c4
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace.c9
-rw-r--r--kernel/trace/trace_output.c4
-rw-r--r--mm/huge_memory.c11
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/internal.h27
-rw-r--r--mm/memblock.c21
-rw-r--r--mm/mm_init.c2
-rw-r--r--mm/slub.c30
-rw-r--r--mm/swapfile.c23
-rw-r--r--mm/vmalloc.c31
-rw-r--r--net/bluetooth/hci_conn.c181
-rw-r--r--net/bluetooth/hci_event.c15
-rw-r--r--net/bluetooth/hci_sync.c150
-rw-r--r--net/bluetooth/iso.c26
-rw-r--r--net/bluetooth/l2cap_core.c3
-rw-r--r--net/can/gw.c149
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/dev_api.c23
-rw-r--r--net/core/filter.c1
-rw-r--r--net/core/netdev-genl.c69
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv4/udp_offload.c61
-rw-r--r--net/ipv6/addrconf.c15
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/mac80211/mlme.c12
-rw-r--r--net/mac80211/status.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c27
-rw-r--r--net/openvswitch/actions.c3
-rw-r--r--net/sched/sch_drr.c9
-rw-r--r--net/sched/sch_ets.c9
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_htb.c15
-rw-r--r--net/sched/sch_qfq.c11
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xdp/xsk.c6
-rw-r--r--net/xdp/xsk_buff_pool.c1
-rw-r--r--rust/bindings/lib.rs1
-rw-r--r--rust/kernel/alloc/kvec.rs3
-rw-r--r--rust/kernel/list.rs3
-rw-r--r--rust/kernel/str.rs46
-rw-r--r--rust/macros/kunit.rs13
-rw-r--r--rust/macros/module.rs19
-rw-r--r--rust/macros/paste.rs2
-rw-r--r--rust/pin-init/internal/src/pinned_drop.rs3
-rw-r--r--rust/uapi/lib.rs1
-rw-r--r--scripts/Makefile.extrawarn9
-rw-r--r--scripts/Makefile.vmlinux2
-rw-r--r--sound/pci/hda/patch_realtek.c91
-rw-r--r--sound/soc/amd/acp/acp-i2s.c2
-rw-r--r--sound/soc/amd/acp/acp-legacy-common.c2
-rw-r--r--sound/soc/amd/acp/acp-rembrandt.c2
-rw-r--r--sound/soc/amd/acp/acp-renoir.c2
-rw-r--r--sound/soc/amd/acp/acp63.c2
-rw-r--r--sound/soc/amd/acp/acp70.c2
-rw-r--r--sound/soc/amd/ps/pci-ps.c5
-rw-r--r--sound/soc/codecs/Kconfig5
-rw-r--r--sound/soc/codecs/cs42l43-jack.c7
-rw-r--r--sound/soc/fsl/imx-card.c2
-rw-r--r--sound/soc/generic/simple-card-utils.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c13
-rw-r--r--sound/soc/intel/catpt/dsp.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ptl-match.c3
-rw-r--r--sound/soc/renesas/rz-ssi.c2
-rw-r--r--sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c4
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs42l43.c10
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs_amp.c24
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt_dmic.c2
-rw-r--r--sound/soc/soc-pcm.c5
-rw-r--r--sound/soc/stm/stm32_sai_sub.c16
-rw-r--r--sound/usb/endpoint.c7
-rw-r--r--sound/usb/format.c3
-rw-r--r--sound/usb/midi.c16
-rw-r--r--tools/arch/x86/include/asm/amd/ibs.h (renamed from tools/arch/x86/include/asm/amd-ibs.h)2
-rw-r--r--tools/arch/x86/include/asm/inat.h6
-rw-r--r--tools/arch/x86/kcpuid/cpuid.csv4
-rw-r--r--tools/arch/x86/lib/insn.c7
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt56
-rw-r--r--tools/arch/x86/tools/gen-insn-attr-x86.awk7
-rw-r--r--tools/hv/hv_kvp_daemon.c108
-rw-r--r--tools/include/uapi/linux/bpf.h3
-rw-r--r--tools/lib/perf/Makefile14
-rw-r--r--tools/net/ynl/lib/ynl.c2
-rw-r--r--tools/objtool/arch/x86/decode.c6
-rw-r--r--tools/objtool/check.c1
-rw-r--r--tools/perf/Makefile.config1
-rw-r--r--tools/testing/kunit/configs/all_tests.config2
-rw-r--r--tools/testing/memblock/tests/basic_api.c102
l---------tools/testing/selftests/drivers/net/dsa/tc_taprio.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/psfp.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/ping.py45
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c57
-rw-r--r--tools/testing/selftests/kexec/Makefile7
-rw-r--r--tools/testing/selftests/kexec/test_kexec_jump.c72
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_jump.sh42
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c8
-rw-r--r--tools/testing/selftests/mm/compaction_test.c19
-rw-r--r--tools/testing/selftests/mm/guard-regions.c16
-rw-r--r--tools/testing/selftests/mm/pkey-powerpc.h14
-rw-r--r--tools/testing/selftests/mm/pkey_util.c1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh96
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_taprio.sh421
-rw-r--r--tools/testing/selftests/net/forwarding/tsn_lib.sh26
-rwxr-xr-xtools/testing/selftests/net/gre_ipv6_lladdr.sh177
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json221
-rw-r--r--tools/testing/selftests/ublk/Makefile1
-rw-r--r--tools/testing/selftests/ublk/kublk.c22
-rw-r--r--tools/testing/selftests/ublk/kublk.h1
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_07.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_05.sh8
669 files changed, 11429 insertions, 6865 deletions
diff --git a/.clippy.toml b/.clippy.toml
index 815c94732ed7..137f41d203de 100644
--- a/.clippy.toml
+++ b/.clippy.toml
@@ -7,5 +7,5 @@ check-private-items = true
disallowed-macros = [
# The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate
# it here, see: https://github.com/rust-lang/rust-clippy/issues/11303.
- { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" },
+ { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool", allow-invalid = true },
]
diff --git a/.mailmap b/.mailmap
index 9afde79e1936..1c70e51c789d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -447,6 +447,8 @@ Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
Luca Weiss <luca@lucaweiss.eu> <luca@z3ntu.xyz>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org>
+Lance Yang <lance.yang@linux.dev> <ioworker0@gmail.com>
+Lance Yang <lance.yang@linux.dev> <mingzhe.yang@ly.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
Maharaja Kennadyrajan <quic_mkenna@quicinc.com> <mkenna@codeaurora.org>
@@ -483,6 +485,7 @@ Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net>
Matthieu CASTET <castet.matthieu@free.fr>
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
+Mattijs Korpershoek <mkorpershoek@kernel.org> <mkorpershoek@baylibre.com>
Matt Ranostay <matt@ranostay.sg> <matt.ranostay@konsulko.com>
Matt Ranostay <matt@ranostay.sg> <matt@ranostay.consulting>
Matt Ranostay <matt@ranostay.sg> Matthew Ranostay <mranostay@embeddedalley.com>
@@ -749,6 +752,7 @@ Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
+Uwe Kleine-König <u.kleine-koenig@baylibre.com> <ukleinek@baylibre.com>
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Uwe Kleine-König <ukleinek@strlen.de>
Uwe Kleine-König <ukl@pengutronix.de>
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
index 9bce281314df..cb207c79680d 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -111,7 +111,7 @@ Description: RO. Package current voltage in millivolt.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp2_input
Date: March 2025
-KernelVersion: 6.14
+KernelVersion: 6.15
Contact: intel-xe@lists.freedesktop.org
Description: RO. Package temperature in millidegree Celsius.
@@ -119,7 +119,7 @@ Description: RO. Package temperature in millidegree Celsius.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp3_input
Date: March 2025
-KernelVersion: 6.14
+KernelVersion: 6.15
Contact: intel-xe@lists.freedesktop.org
Description: RO. VRAM temperature in millidegree Celsius.
diff --git a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
index 517a4ac1bea3..e365413732e7 100644
--- a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
+++ b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mediatek's Keypad Controller
maintainers:
- - Mattijs Korpershoek <mkorpershoek@baylibre.com>
+ - Mattijs Korpershoek <mkorpershoek@kernel.org>
allOf:
- $ref: /schemas/input/matrix-keymap.yaml#
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 45819b235800..a2d4c626f659 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -74,19 +74,17 @@ properties:
- rev-rmii
- moca
- # RX and TX delays are added by the MAC when required
+ # RX and TX delays are provided by the PCB. See below
- rgmii
- # RGMII with internal RX and TX delays provided by the PHY,
- # the MAC should not add the RX or TX delays in this case
+ # RX and TX delays are not provided by the PCB. This is the most
+ # frequent case. See below
- rgmii-id
- # RGMII with internal RX delay provided by the PHY, the MAC
- # should not add an RX delay in this case
+ # TX delay is provided by the PCB. See below
- rgmii-rxid
- # RGMII with internal TX delay provided by the PHY, the MAC
- # should not add an TX delay in this case
+ # RX delay is provided by the PCB. See below
- rgmii-txid
- rtbi
- smii
@@ -286,4 +284,89 @@ allOf:
additionalProperties: true
+# Informative
+# ===========
+#
+# 'phy-modes' & 'phy-connection-type' properties 'rgmii', 'rgmii-id',
+# 'rgmii-rxid', and 'rgmii-txid' are frequently used wrongly by
+# developers. This informative section clarifies their usage.
+#
+# The RGMII specification requires a 2ns delay between the data and
+# clock signals on the RGMII bus. How this delay is implemented is not
+# specified.
+#
+# One option is to make the clock traces on the PCB longer than the
+# data traces. A sufficiently difference in length can provide the 2ns
+# delay. If both the RX and TX delays are implemented in this manner,
+# 'rgmii' should be used, so indicating the PCB adds the delays.
+#
+# If the PCB does not add these delays via extra long traces,
+# 'rgmii-id' should be used. Here, 'id' refers to 'internal delay',
+# where either the MAC or PHY adds the delay.
+#
+# If only one of the two delays are implemented via extra long clock
+# lines, either 'rgmii-rxid' or 'rgmii-txid' should be used,
+# indicating the MAC or PHY should implement one of the delays
+# internally, while the PCB implements the other delay.
+#
+# Device Tree describes hardware, and in this case, it describes the
+# PCB between the MAC and the PHY, if the PCB implements delays or
+# not.
+#
+# In practice, very few PCBs make use of extra long clock lines. Hence
+# any RGMII phy mode other than 'rgmii-id' is probably wrong, and is
+# unlikely to be accepted during review without details provided in
+# the commit description and comments in the .dts file.
+#
+# When the PCB does not implement the delays, the MAC or PHY must. As
+# such, this is software configuration, and so not described in Device
+# Tree.
+#
+# The following describes how Linux implements the configuration of
+# the MAC and PHY to add these delays when the PCB does not. As stated
+# above, developers often get this wrong, and the aim of this section
+# is reduce the frequency of these errors by Linux developers. Other
+# users of the Device Tree may implement it differently, and still be
+# consistent with both the normative and informative description
+# above.
+#
+# By default in Linux, when using phylib/phylink, the MAC is expected
+# to read the 'phy-mode' from Device Tree, not implement any delays,
+# and pass the value to the PHY. The PHY will then implement delays as
+# specified by the 'phy-mode'. The PHY should always be reconfigured
+# to implement the needed delays, replacing any setting performed by
+# strapping or the bootloader, etc.
+#
+# Experience to date is that all PHYs which implement RGMII also
+# implement the ability to add or not add the needed delays. Hence
+# this default is expected to work in all cases. Ignoring this default
+# is likely to be questioned by Reviews, and require a strong argument
+# to be accepted.
+#
+# There are a small number of cases where the MAC has hard coded
+# delays which cannot be disabled. The 'phy-mode' only describes the
+# PCB. The inability to disable the delays in the MAC does not change
+# the meaning of 'phy-mode'. It does however mean that a 'phy-mode' of
+# 'rgmii' is now invalid, it cannot be supported, since both the PCB
+# and the MAC and PHY adding delays cannot result in a functional
+# link. Thus the MAC should report a fatal error for any modes which
+# cannot be supported. When the MAC implements the delay, it must
+# ensure that the PHY does not also implement the same delay. So it
+# must modify the phy-mode it passes to the PHY, removing the delay it
+# has added. Failure to remove the delay will result in a
+# non-functioning link.
+#
+# Sometimes there is a need to fine tune the delays. Often the MAC or
+# PHY can perform this fine tuning. In the MAC node, the Device Tree
+# properties 'rx-internal-delay-ps' and 'tx-internal-delay-ps' should
+# be used to indicate fine tuning performed by the MAC. The values
+# expected here are small. A value of 2000ps, i.e 2ns, and a phy-mode
+# of 'rgmii' will not be accepted by Reviewers.
+#
+# If the PHY is to perform fine tuning, the properties
+# 'rx-internal-delay-ps' and 'tx-internal-delay-ps' in the PHY node
+# should be used. When the PHY is implementing delays, e.g. 'rgmii-id'
+# these properties should have a value near to 2000ps. If the PCB is
+# implementing delays, e.g. 'rgmii', a small value can be used to fine
+# tune the delay added by the PCB.
...
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
index bccd00a1ddd0..53d00ca643b3 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
@@ -56,19 +56,18 @@ properties:
enum:
- snps,dw-apb-ssi
- snps,dwc-ssi-1.01a
- - description: Microsemi Ocelot/Jaguar2 SoC SPI Controller
- items:
- - enum:
- - mscc,ocelot-spi
- - mscc,jaguar2-spi
- - const: snps,dw-apb-ssi
- description: Microchip Sparx5 SoC SPI Controller
const: microchip,sparx5-spi
- description: Amazon Alpine SPI Controller
const: amazon,alpine-dw-apb-ssi
- - description: Renesas RZ/N1 SPI Controller
+ - description: Vendor controllers which use snps,dw-apb-ssi as fallback
items:
- - const: renesas,rzn1-spi
+ - enum:
+ - mscc,ocelot-spi
+ - mscc,jaguar2-spi
+ - renesas,rzn1-spi
+ - sophgo,sg2042-spi
+ - thead,th1520-spi
- const: snps,dw-apb-ssi
- description: Intel Keem Bay SPI Controller
const: intel,keembay-ssi
@@ -88,10 +87,6 @@ properties:
- renesas,r9a06g032-spi # RZ/N1D
- renesas,r9a06g033-spi # RZ/N1S
- const: renesas,rzn1-spi # RZ/N1
- - description: T-HEAD TH1520 SoC SPI Controller
- items:
- - const: thead,th1520-spi
- - const: snps,dw-apb-ssi
reg:
minItems: 1
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 655d8d10fe24..c650cd3dcb80 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -89,8 +89,10 @@ definitions:
doc: Group of short_detected states
-
name: phy-upstream-type
- enum-name:
+ enum-name: phy-upstream
+ header: linux/ethtool.h
type: enum
+ name-prefix: phy-upstream
entries: [ mac, phy ]
-
name: tcp-data-split
diff --git a/MAINTAINERS b/MAINTAINERS
index fedcbcba8397..92de5ecfb19e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2519,6 +2519,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/boot/dts/nxp/imx/
F: arch/arm/boot/dts/nxp/mxs/
F: arch/arm64/boot/dts/freescale/
+X: Documentation/devicetree/bindings/media/i2c/
X: arch/arm64/boot/dts/freescale/fsl-*
X: arch/arm64/boot/dts/freescale/qoriq-*
X: drivers/media/i2c/
@@ -8726,6 +8727,7 @@ M: Chao Yu <chao@kernel.org>
R: Yue Hu <zbestahu@gmail.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
R: Sandeep Dhavale <dhavale@google.com>
+R: Hongbo Li <lihongbo22@huawei.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
W: https://erofs.docs.kernel.org
@@ -11235,7 +11237,6 @@ S: Maintained
F: drivers/i2c/busses/i2c-cht-wc.c
I2C/SMBUS ISMT DRIVER
-M: Seth Heasley <seth.heasley@intel.com>
M: Neil Horman <nhorman@tuxdriver.com>
L: linux-i2c@vger.kernel.org
F: Documentation/i2c/busses/i2c-ismt.rst
@@ -15071,7 +15072,7 @@ F: Documentation/devicetree/bindings/media/mediatek-jpeg-*.yaml
F: drivers/media/platform/mediatek/jpeg/
MEDIATEK KEYPAD DRIVER
-M: Mattijs Korpershoek <mkorpershoek@baylibre.com>
+M: Mattijs Korpershoek <mkorpershoek@kernel.org>
S: Supported
F: Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
F: drivers/input/keyboard/mt6779-keypad.c
@@ -15494,24 +15495,45 @@ F: Documentation/mm/
F: include/linux/gfp.h
F: include/linux/gfp_types.h
F: include/linux/memfd.h
-F: include/linux/memory.h
F: include/linux/memory_hotplug.h
F: include/linux/memory-tiers.h
F: include/linux/mempolicy.h
F: include/linux/mempool.h
F: include/linux/memremap.h
-F: include/linux/mm.h
-F: include/linux/mm_*.h
F: include/linux/mmzone.h
F: include/linux/mmu_notifier.h
F: include/linux/pagewalk.h
-F: include/linux/rmap.h
F: include/trace/events/ksm.h
F: mm/
F: tools/mm/
F: tools/testing/selftests/mm/
N: include/linux/page[-_]*
+MEMORY MANAGEMENT - CORE
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Mike Rapoport <rppt@kernel.org>
+R: Suren Baghdasaryan <surenb@google.com>
+R: Michal Hocko <mhocko@suse.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/linux/memory.h
+F: include/linux/mm.h
+F: include/linux/mm_*.h
+F: include/linux/mmdebug.h
+F: include/linux/pagewalk.h
+F: mm/Kconfig
+F: mm/debug.c
+F: mm/init-mm.c
+F: mm/memory.c
+F: mm/pagewalk.c
+F: mm/util.c
+
MEMORY MANAGEMENT - EXECMEM
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15545,6 +15567,19 @@ F: mm/page_alloc.c
F: include/linux/gfp.h
F: include/linux/compaction.h
+MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Rik van Riel <riel@surriel.com>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Harry Yoo <harry.yoo@oracle.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/rmap.h
+F: mm/rmap.c
+
MEMORY MANAGEMENT - SECRETMEM
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15553,6 +15588,30 @@ S: Maintained
F: include/linux/secretmem.h
F: mm/secretmem.c
+MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Zi Yan <ziy@nvidia.com>
+R: Baolin Wang <baolin.wang@linux.alibaba.com>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Nico Pache <npache@redhat.com>
+R: Ryan Roberts <ryan.roberts@arm.com>
+R: Dev Jain <dev.jain@arm.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: Documentation/admin-guide/mm/transhuge.rst
+F: include/linux/huge_mm.h
+F: include/linux/khugepaged.h
+F: include/trace/events/huge_memory.h
+F: mm/huge_memory.c
+F: mm/khugepaged.c
+F: tools/testing/selftests/mm/khugepaged.c
+F: tools/testing/selftests/mm/split_huge_page_test.c
+F: tools/testing/selftests/mm/transhuge-stress.c
+
MEMORY MANAGEMENT - USERFAULTFD
M: Andrew Morton <akpm@linux-foundation.org>
R: Peter Xu <peterx@redhat.com>
@@ -22738,9 +22797,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
F: Documentation/devicetree/bindings/sound/
F: Documentation/sound/soc/
F: include/dt-bindings/sound/
+F: include/sound/cs-amp-lib.h
+F: include/sound/cs35l*
+F: include/sound/cs4271.h
+F: include/sound/cs42l*
+F: include/sound/madera-pdata.h
F: include/sound/soc*
F: include/sound/sof.h
F: include/sound/sof/
+F: include/sound/wm*.h
F: include/trace/events/sof*.h
F: include/uapi/sound/asoc.h
F: sound/soc/
diff --git a/Makefile b/Makefile
index 5aa9ee52a765..64c514f4bc19 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -1052,13 +1052,6 @@ NOSTDINC_FLAGS += -nostdinc
# perform bounds checking.
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
-#Currently, disable -Wstringop-overflow for GCC 11, globally.
-KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
-KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
-
-#Currently, disable -Wunterminated-string-initialization as broken
-KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
-
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
index f2386dcb9ff2..dda4fa91b2f2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
@@ -40,6 +40,9 @@
reg = <1>;
interrupt-parent = <&gpio4>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET_REF>;
+ clock-names = "rmii-ref";
status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/arm/morello.dtsi b/arch/arm64/boot/dts/arm/morello.dtsi
index 0bab0b3ea969..5bc1c725dc86 100644
--- a/arch/arm64/boot/dts/arm/morello.dtsi
+++ b/arch/arm64/boot/dts/arm/morello.dtsi
@@ -44,7 +44,7 @@
next-level-cache = <&l2_0>;
clocks = <&scmi_dvfs 0>;
- l2_0: l2-cache-0 {
+ l2_0: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -53,13 +53,6 @@
cache-sets = <2048>;
cache-unified;
next-level-cache = <&l3_0>;
-
- l3_0: l3-cache {
- compatible = "cache";
- cache-level = <3>;
- cache-size = <0x100000>;
- cache-unified;
- };
};
};
@@ -78,7 +71,7 @@
next-level-cache = <&l2_1>;
clocks = <&scmi_dvfs 0>;
- l2_1: l2-cache-1 {
+ l2_1: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -105,7 +98,7 @@
next-level-cache = <&l2_2>;
clocks = <&scmi_dvfs 1>;
- l2_2: l2-cache-2 {
+ l2_2: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -132,7 +125,7 @@
next-level-cache = <&l2_3>;
clocks = <&scmi_dvfs 1>;
- l2_3: l2-cache-3 {
+ l2_3: l2-cache {
compatible = "cache";
cache-level = <2>;
/* 8 ways set associative */
@@ -143,6 +136,13 @@
next-level-cache = <&l3_0>;
};
};
+
+ l3_0: l3-cache {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-size = <0x100000>;
+ cache-unified;
+ };
};
firmware {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index 7251ad3a0017..b46566f3ce20 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -144,6 +144,19 @@
startup-delay-us = <20000>;
};
+ reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
+ compatible = "regulator-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2_vsel>;
+ gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ states = <1800000 0x1>,
+ <3300000 0x0>;
+ regulator-name = "PMIC_USDHC_VSELECT";
+ vin-supply = <&reg_nvcc_sd>;
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -269,7 +282,7 @@
"SODIMM_19",
"",
"",
- "",
+ "PMIC_USDHC_VSELECT",
"",
"",
"",
@@ -785,6 +798,7 @@
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>;
vmmc-supply = <&reg_usdhc2_vmmc>;
+ vqmmc-supply = <&reg_usdhc2_vqmmc>;
};
&wdog1 {
@@ -1206,13 +1220,17 @@
<MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x6>; /* SODIMM 76 */
};
+ pinctrl_usdhc2_vsel: usdhc2vselgrp {
+ fsl,pins =
+ <MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x10>; /* PMIC_USDHC_VSELECT */
+ };
+
/*
* Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the
* on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here.
*/
pinctrl_usdhc2: usdhc2grp {
fsl,pins =
- <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90>, /* SODIMM 78 */
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x90>, /* SODIMM 74 */
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x90>, /* SODIMM 80 */
@@ -1223,7 +1241,6 @@
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins =
- <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x94>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x94>,
@@ -1234,7 +1251,6 @@
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins =
- <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x96>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x96>,
@@ -1246,7 +1262,6 @@
/* Avoid backfeeding with removed card power */
pinctrl_usdhc2_sleep: usdhc2slpgrp {
fsl,pins =
- <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x0>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi
index a1b75c9068b2..dc0ccd723c6d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi
@@ -24,6 +24,20 @@
fsl,operating-mode = "nominal";
};
+&gpu2d {
+ assigned-clocks = <&clk IMX8MP_CLK_GPU2D_CORE>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <800000000>;
+};
+
+&gpu3d {
+ assigned-clocks = <&clk IMX8MP_CLK_GPU3D_CORE>,
+ <&clk IMX8MP_CLK_GPU3D_SHADER_CORE>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <800000000>, <800000000>;
+};
+
&pgc_hdmimix {
assigned-clocks = <&clk IMX8MP_CLK_HDMI_AXI>,
<&clk IMX8MP_CLK_HDMI_APB>;
@@ -46,6 +60,18 @@
assigned-clock-rates = <600000000>, <300000000>;
};
+&pgc_mlmix {
+ assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>,
+ <&clk IMX8MP_CLK_ML_AXI>,
+ <&clk IMX8MP_CLK_ML_AHB>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <800000000>,
+ <800000000>,
+ <300000000>;
+};
+
&media_blk_ctrl {
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>,
<&clk IMX8MP_CLK_MEDIA_APB>,
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index 9bb26b466a06..59f057ba6fa7 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -1626,7 +1626,7 @@
reg = <0 0x4c300000 0 0x10000>,
<0 0x60100000 0 0xfe00000>,
<0 0x4c360000 0 0x10000>,
- <0 0x4c340000 0 0x2000>;
+ <0 0x4c340000 0 0x4000>;
reg-names = "dbi", "config", "atu", "app";
ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>,
<0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>;
@@ -1673,7 +1673,7 @@
reg = <0 0x4c300000 0 0x10000>,
<0 0x4c360000 0 0x1000>,
<0 0x4c320000 0 0x1000>,
- <0 0x4c340000 0 0x2000>,
+ <0 0x4c340000 0 0x4000>,
<0 0x4c370000 0 0x10000>,
<0x9 0 1 0>;
reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
@@ -1700,7 +1700,7 @@
reg = <0 0x4c380000 0 0x10000>,
<8 0x80100000 0 0xfe00000>,
<0 0x4c3e0000 0 0x10000>,
- <0 0x4c3c0000 0 0x2000>;
+ <0 0x4c3c0000 0 0x4000>;
reg-names = "dbi", "config", "atu", "app";
ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>,
<0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>;
@@ -1749,7 +1749,7 @@
reg = <0 0x4c380000 0 0x10000>,
<0 0x4c3e0000 0 0x1000>,
<0 0x4c3a0000 0 0x1000>,
- <0 0x4c3c0000 0 0x2000>,
+ <0 0x4c3c0000 0 0x4000>,
<0 0x4c3f0000 0 0x10000>,
<0xa 0 1 0>;
reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space";
diff --git a/arch/arm64/boot/dts/st/stm32mp211.dtsi b/arch/arm64/boot/dts/st/stm32mp211.dtsi
index 6dd1377f3e1d..bf888d60cd4f 100644
--- a/arch/arm64/boot/dts/st/stm32mp211.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp211.dtsi
@@ -116,11 +116,11 @@
};
intc: interrupt-controller@4ac10000 {
- compatible = "arm,cortex-a7-gic";
+ compatible = "arm,gic-400";
reg = <0x4ac10000 0x0 0x1000>,
- <0x4ac20000 0x0 0x2000>,
- <0x4ac40000 0x0 0x2000>,
- <0x4ac60000 0x0 0x2000>;
+ <0x4ac20000 0x0 0x20000>,
+ <0x4ac40000 0x0 0x20000>,
+ <0x4ac60000 0x0 0x20000>;
#interrupt-cells = <3>;
interrupt-controller;
};
diff --git a/arch/arm64/boot/dts/st/stm32mp231.dtsi b/arch/arm64/boot/dts/st/stm32mp231.dtsi
index 8820d219a33e..75697acd1345 100644
--- a/arch/arm64/boot/dts/st/stm32mp231.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp231.dtsi
@@ -1201,13 +1201,12 @@
};
intc: interrupt-controller@4ac10000 {
- compatible = "arm,cortex-a7-gic";
+ compatible = "arm,gic-400";
reg = <0x4ac10000 0x1000>,
- <0x4ac20000 0x2000>,
- <0x4ac40000 0x2000>,
- <0x4ac60000 0x2000>;
+ <0x4ac20000 0x20000>,
+ <0x4ac40000 0x20000>,
+ <0x4ac60000 0x20000>;
#interrupt-cells = <3>;
- #address-cells = <1>;
interrupt-controller;
};
};
diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi
index f3c6cdfd7008..87110f91e489 100644
--- a/arch/arm64/boot/dts/st/stm32mp251.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi
@@ -115,14 +115,13 @@
};
intc: interrupt-controller@4ac00000 {
- compatible = "arm,cortex-a7-gic";
+ compatible = "arm,gic-400";
#interrupt-cells = <3>;
- #address-cells = <1>;
interrupt-controller;
reg = <0x0 0x4ac10000 0x0 0x1000>,
- <0x0 0x4ac20000 0x0 0x2000>,
- <0x0 0x4ac40000 0x0 0x2000>,
- <0x0 0x4ac60000 0x0 0x2000>;
+ <0x0 0x4ac20000 0x0 0x20000>,
+ <0x0 0x4ac40000 0x0 0x20000>,
+ <0x0 0x4ac60000 0x0 0x20000>;
};
psci {
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index ebceaae3c749..d40e427ddad9 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -52,7 +52,7 @@
mrs x0, id_aa64mmfr1_el1
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x0, .Lskip_hcrx_\@
- mov_q x0, HCRX_HOST_FLAGS
+ mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
/* Enable GCS if supported */
mrs_s x1, SYS_ID_AA64PFR1_EL1
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 974d72b5905b..e9c8a581e16f 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -100,9 +100,8 @@
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
-#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
+#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)
-#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
#define MPAMHCR_HOST_FLAGS 0
/* TCR_EL2 Registers bits */
diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h
index 92a2b59a9f3d..3322c7047d84 100644
--- a/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -99,6 +99,19 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return res;
}
+#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB)
+static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
+{
+ const struct vdso_time_data *ret = &vdso_u_time_data;
+
+ /* Work around invalid absolute relocations */
+ OPTIMIZER_HIDE_VAR(ret);
+
+ return ret;
+}
+#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
+#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9c4d6d552b25..4c46d80aa64b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -114,7 +114,14 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NC
DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
-bool arm64_use_ng_mappings = false;
+/*
+ * arm64_use_ng_mappings must be placed in the .data section, otherwise it
+ * ends up in the .bss section where it is initialized in early_map_kernel()
+ * after the MMU (with the idmap) was enabled. create_init_idmap() - which
+ * runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG -
+ * may end up generating an incorrect idmap page table attributes.
+ */
+bool arm64_use_ng_mappings __read_mostly = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index b198dde79e59..b607f6dfc5e6 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void)
static const struct midr_range spectre_bhb_k132_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ {},
};
static const struct midr_range spectre_bhb_k38_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+ {},
};
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index b741ea6aefa5..96f625dc7256 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -235,6 +235,8 @@ static inline void __deactivate_traps_mpam(void)
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
@@ -245,11 +247,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* EL1 instead of being trapped to EL2.
*/
if (system_supports_pmuv3()) {
- struct kvm_cpu_context *hctxt;
-
write_sysreg(0, pmselr_el0);
- hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
@@ -269,6 +268,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
hcrx &= ~clr;
}
+ ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
write_sysreg_s(hcrx, SYS_HCRX_EL2);
}
@@ -278,19 +278,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2);
if (system_supports_pmuv3()) {
- struct kvm_cpu_context *hctxt;
-
- hctxt = host_data_ptr(host_ctxt);
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
if (cpus_have_final_cap(ARM64_HAS_HCX))
- write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
__deactivate_traps_hfgxtr(vcpu);
__deactivate_traps_mpam();
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 2a5284f749b4..e80f3ebd3e2a 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -503,7 +503,7 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{
int ret;
- if (!addr_is_memory(addr))
+ if (!range_is_memory(addr, addr + size))
return -EPERM;
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index ed363aa3027e..50aa8dbcae75 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -429,23 +429,27 @@ u64 __vgic_v3_get_gic_config(void)
/*
* To check whether we have a MMIO-based (GICv2 compatible)
* CPU interface, we need to disable the system register
- * view. To do that safely, we have to prevent any interrupt
- * from firing (which would be deadly).
+ * view.
*
- * Note that this only makes sense on VHE, as interrupts are
- * already masked for nVHE as part of the exception entry to
- * EL2.
- */
- if (has_vhe())
- flags = local_daif_save();
-
- /*
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
* interrupt overrides must be set. You've got to love this.
+ *
+ * As we always run VHE with HCR_xMO set, no extra xMO
+ * manipulation is required in that case.
+ *
+ * To safely disable SRE, we have to prevent any interrupt
+ * from firing (which would be deadly). This only makes sense
+ * on VHE, as interrupts are already masked for nVHE as part
+ * of the exception entry to EL2.
*/
- sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
- isb();
+ if (has_vhe()) {
+ flags = local_daif_save();
+ } else {
+ sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
+ isb();
+ }
+
write_gicreg(0, ICC_SRE_EL1);
isb();
@@ -453,11 +457,13 @@ u64 __vgic_v3_get_gic_config(void)
write_gicreg(sre, ICC_SRE_EL1);
isb();
- sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
- isb();
- if (has_vhe())
+ if (has_vhe()) {
local_daif_restore(flags);
+ } else {
+ sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
+ isb();
+ }
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
val |= read_gicreg(ICH_VTR_EL2);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 754f2fe0cc67..eeda92330ade 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
+ if (!is_protected_kvm_enabled())
+ memcache = &vcpu->arch.mmu_page_cache;
+ else
+ memcache = &vcpu->arch.pkvm_memcache;
+
/*
* Permission faults just need to update the existing leaf entry,
* and so normally don't require allocations from the memcache. The
@@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!fault_is_perm || (logging_active && write_fault)) {
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
- if (!is_protected_kvm_enabled()) {
- memcache = &vcpu->arch.mmu_page_cache;
+ if (!is_protected_kvm_enabled())
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
- } else {
- memcache = &vcpu->arch.pkvm_memcache;
+ else
ret = topup_hyp_memcache(memcache, min_pages);
- }
+
if (ret)
return ret;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 005ad28f7306..5dde9285afc8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1945,6 +1945,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
+ /* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
+ if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
+ !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
+ (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
+ return -EINVAL;
+
return set_id_reg(vcpu, rd, user_val);
}
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
index 0992cad9c632..c7d75807d13f 100644
--- a/arch/mips/include/asm/idle.h
+++ b/arch/mips/include/asm/idle.h
@@ -6,11 +6,10 @@
#include <linux/linkage.h>
extern void (*cpu_wait)(void);
-extern void r4k_wait(void);
-extern asmlinkage void __r4k_wait(void);
+extern asmlinkage void r4k_wait(void);
extern void r4k_wait_irqoff(void);
-static inline int using_rollback_handler(void)
+static inline int using_skipover_handler(void)
{
return cpu_wait == r4k_wait;
}
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 85fa9962266a..ef72c46b5568 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+#define MAX_REG_OFFSET \
+ (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a572ce36a24f..08c0a01d9a29 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -104,48 +104,59 @@ handle_vcei:
__FINIT
- .align 5 /* 32 byte rollback region */
-LEAF(__r4k_wait)
- .set push
- .set noreorder
- /* start of rollback region */
- LONG_L t0, TI_FLAGS($28)
- nop
- andi t0, _TIF_NEED_RESCHED
- bnez t0, 1f
- nop
- nop
- nop
-#ifdef CONFIG_CPU_MICROMIPS
- nop
- nop
- nop
- nop
-#endif
+ .section .cpuidle.text,"ax"
+ /* Align to 32 bytes for the maximum idle interrupt region size. */
+ .align 5
+LEAF(r4k_wait)
+ /* Keep the ISA bit clear for calculations on local labels here. */
+0: .fill 0
+ /* Start of idle interrupt region. */
+ local_irq_enable
+ /*
+ * If an interrupt lands here, before going idle on the next
+ * instruction, we must *NOT* go idle since the interrupt could
+ * have set TIF_NEED_RESCHED or caused a timer to need resched.
+ * Fall through -- see skipover_handler below -- and have the
+ * idle loop take care of things.
+ */
+1: .fill 0
+ /* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */
+ .if 1b - 0b > 32
+ .error "overlong idle interrupt region"
+ .elseif 1b - 0b > 8
+ .align 4
+ .endif
+2: .fill 0
+ .equ r4k_wait_idle_size, 2b - 0b
+ /* End of idle interrupt region; size has to be a power of 2. */
.set MIPS_ISA_ARCH_LEVEL_RAW
+r4k_wait_insn:
wait
- /* end of rollback region (the region size must be power of two) */
-1:
+r4k_wait_exit:
+ .set mips0
+ local_irq_disable
jr ra
- nop
- .set pop
- END(__r4k_wait)
+ END(r4k_wait)
+ .previous
- .macro BUILD_ROLLBACK_PROLOGUE handler
- FEXPORT(rollback_\handler)
+ .macro BUILD_SKIPOVER_PROLOGUE handler
+ FEXPORT(skipover_\handler)
.set push
.set noat
MFC0 k0, CP0_EPC
- PTR_LA k1, __r4k_wait
- ori k0, 0x1f /* 32 byte rollback region */
- xori k0, 0x1f
+ /* Subtract/add 2 to let the ISA bit propagate through the mask. */
+ PTR_LA k1, r4k_wait_insn - 2
+ ori k0, r4k_wait_idle_size - 2
+ .set noreorder
bne k0, k1, \handler
+ PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2
+ .set reorder
MTC0 k0, CP0_EPC
.set pop
.endm
.align 5
-BUILD_ROLLBACK_PROLOGUE handle_int
+BUILD_SKIPOVER_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
.cfi_signal_frame
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
-BUILD_ROLLBACK_PROLOGUE except_vec_vi
+BUILD_SKIPOVER_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
SAVE_SOME docfi=1
SAVE_AT docfi=1
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 5abc8b7340f8..80e8a04a642e 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void)
write_c0_conf(cfg | R30XX_CONF_HALT);
}
-void __cpuidle r4k_wait(void)
-{
- raw_local_irq_enable();
- __r4k_wait();
- raw_local_irq_disable();
-}
-
/*
* This variant is preferable as it allows testing need_resched and going to
* sleep depending on the outcome atomically. Unfortunately the "It is
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index e85bd087467e..cc26d56f3ab6 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -332,6 +332,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
mips_cps_cluster_bootcfg = kcalloc(nclusters,
sizeof(*mips_cps_cluster_bootcfg),
GFP_KERNEL);
+ if (!mips_cps_cluster_bootcfg)
+ goto err_out;
if (nclusters > 1)
mips_cm_update_property();
@@ -348,6 +350,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
mips_cps_cluster_bootcfg[cl].core_power =
kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
GFP_KERNEL);
+ if (!mips_cps_cluster_bootcfg[cl].core_power)
+ goto err_out;
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 39e248d0ed59..8ec1e185b35c 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -77,7 +77,7 @@
#include "access-helper.h"
extern void check_wait(void);
-extern asmlinkage void rollback_handle_int(void);
+extern asmlinkage void skipover_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
@@ -2066,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
{
extern const u8 except_vec_vi[];
extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
- extern const u8 rollback_except_vec_vi[];
+ extern const u8 skipover_except_vec_vi[];
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
@@ -2095,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
change_c0_srsmap(0xf << n*4, 0 << n*4);
}
- vec_start = using_rollback_handler() ? rollback_except_vec_vi :
+ vec_start = using_skipover_handler() ? skipover_except_vec_vi :
except_vec_vi;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
ori_offset = except_vec_vi_ori - vec_start + 2;
@@ -2426,8 +2426,8 @@ void __init trap_init(void)
if (board_be_init)
board_be_init();
- set_except_vector(EXCCODE_INT, using_rollback_handler() ?
- rollback_handle_int : handle_int);
+ set_except_vector(EXCCODE_INT, using_skipover_handler() ?
+ skipover_handle_int : handle_int);
set_except_vector(EXCCODE_MOD, handle_tlbm);
set_except_vector(EXCCODE_TLBL, handle_tlbl);
set_except_vector(EXCCODE_TLBS, handle_tlbs);
diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c
index 34495446e051..71829cb7bc81 100644
--- a/arch/parisc/math-emu/driver.c
+++ b/arch/parisc/math-emu/driver.c
@@ -97,9 +97,19 @@ handle_fpe(struct pt_regs *regs)
memcpy(regs->fr, frcopy, sizeof regs->fr);
if (signalcode != 0) {
- force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
- (void __user *) regs->iaoq[0]);
- return -1;
+ int sig = signalcode >> 24;
+
+ if (sig == SIGFPE) {
+ /*
+ * Clear floating point trap bit to avoid trapping
+ * again on the first floating-point instruction in
+ * the userspace signal handler.
+ */
+ regs->fr[0] &= ~(1ULL << 38);
+ }
+ force_sig_fault(sig, signalcode & 0xffffff,
+ (void __user *) regs->iaoq[0]);
+ return -1;
}
return signalcode ? -1 : 0;
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 1db60fe13802..3d8dc822282a 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -234,10 +234,8 @@ fi
# suppress some warnings in recent ld versions
nowarn="-z noexecstack"
-if ! ld_is_lld; then
- if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then
- nowarn="$nowarn --no-warn-rwx-segments"
- fi
+if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
+ nowarn="$nowarn --no-warn-rwx-segments"
fi
platformo=$object/"$platform".o
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 34a5aec4908f..126bf3b06ab7 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -258,10 +258,6 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
break;
}
}
- if (i == hdr->e_shnum) {
- pr_err("%s: doesn't contain __patchable_function_entries.\n", me->name);
- return -ENOEXEC;
- }
#endif
pr_debug("Looks like a total of %lu stubs, max\n", relocs);
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 311e2112d782..9f764bc42b8c 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -976,7 +976,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
return 0;
}
-
+#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
{
if (radix_enabled())
@@ -984,6 +984,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
return false;
}
+#endif
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
unsigned long addr, unsigned long next)
@@ -1120,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
pmd_t *pmd;
pte_t *pte;
+ /*
+ * Make sure we align the start vmemmap addr so that we calculate
+ * the correct start_pfn in altmap boundary check to decided whether
+ * we should use altmap or RAM based backing memory allocation. Also
+ * the address need to be aligned for set_pte operation.
+
+ * If the start addr is already PMD_SIZE aligned we will try to use
+ * a pmd mapping. We don't want to be too aggressive here beacause
+ * that will cause more allocations in RAM. So only if the namespace
+ * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
+ */
+
+ start = ALIGN_DOWN(start, PAGE_SIZE);
for (addr = start; addr < end; addr = next) {
next = pmd_addr_end(addr, end);
@@ -1145,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
* in altmap block allocation failures, in which case
* we fallback to RAM for vmemmap allocation.
*/
- if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
- altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
+ if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
+ altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
/*
* make sure we don't create altmap mappings
* covering things outside the device.
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 3fbe0295ce14..95d7ba73d43d 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -17,7 +17,7 @@ config PPC_POWERNV
select MMU_NOTIFIER
select FORCE_SMP
select ARCH_SUPPORTS_PER_VMA_LOCK
- select PPC_RADIX_BROADCAST_TLBIE
+ select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU
default y
config OPAL_PRD
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index a934c2a262f6..fa3c2fff082a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -23,7 +23,7 @@ config PPC_PSERIES
select FORCE_SMP
select SWIOTLB
select ARCH_SUPPORTS_PER_VMA_LOCK
- select PPC_RADIX_BROADCAST_TLBIE
+ select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU
default y
config PARAVIRT
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 7c244de77180..15d8f75902f8 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -275,6 +275,9 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
unsigned long pmm;
u8 pmlen;
+ if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
+ return -EINVAL;
+
if (is_compat_thread(ti))
return -EINVAL;
@@ -330,6 +333,9 @@ long get_tagged_addr_ctrl(struct task_struct *task)
struct thread_info *ti = task_thread_info(task);
long ret = 0;
+ if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
+ return -EINVAL;
+
if (is_compat_thread(ti))
return -EINVAL;
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 8ff8e8b36524..9c83848797a7 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
DO_ERROR_INFO(do_trap_load_fault,
SIGSEGV, SEGV_ACCERR, "load access fault");
-asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
+enum misaligned_access_type {
+ MISALIGNED_STORE,
+ MISALIGNED_LOAD,
+};
+static const struct {
+ const char *type_str;
+ int (*handler)(struct pt_regs *regs);
+} misaligned_handler[] = {
+ [MISALIGNED_STORE] = {
+ .type_str = "Oops - store (or AMO) address misaligned",
+ .handler = handle_misaligned_store,
+ },
+ [MISALIGNED_LOAD] = {
+ .type_str = "Oops - load address misaligned",
+ .handler = handle_misaligned_load,
+ },
+};
+
+static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
{
+ irqentry_state_t state;
+
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
+ local_irq_enable();
+ } else {
+ state = irqentry_nmi_enter(regs);
+ }
- if (handle_misaligned_load(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - load address misaligned");
+ if (misaligned_handler[type].handler(regs))
+ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+ misaligned_handler[type].type_str);
+ if (user_mode(regs)) {
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
- irqentry_state_t state = irqentry_nmi_enter(regs);
-
- if (handle_misaligned_load(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - load address misaligned");
-
irqentry_nmi_exit(regs, state);
}
}
-asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
{
- if (user_mode(regs)) {
- irqentry_enter_from_user_mode(regs);
-
- if (handle_misaligned_store(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - store (or AMO) address misaligned");
-
- irqentry_exit_to_user_mode(regs);
- } else {
- irqentry_state_t state = irqentry_nmi_enter(regs);
-
- if (handle_misaligned_store(regs))
- do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
- "Oops - store (or AMO) address misaligned");
+ do_trap_misaligned(regs, MISALIGNED_LOAD);
+}
- irqentry_nmi_exit(regs, state);
- }
+asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
+{
+ do_trap_misaligned(regs, MISALIGNED_STORE);
}
+
DO_ERROR_INFO(do_trap_store_fault,
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
DO_ERROR_INFO(do_trap_ecall_s,
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 4354c87c0376..77c788660223 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -88,6 +88,13 @@
#define INSN_MATCH_C_FSWSP 0xe002
#define INSN_MASK_C_FSWSP 0xe003
+#define INSN_MATCH_C_LHU 0x8400
+#define INSN_MASK_C_LHU 0xfc43
+#define INSN_MATCH_C_LH 0x8440
+#define INSN_MASK_C_LH 0xfc43
+#define INSN_MATCH_C_SH 0x8c00
+#define INSN_MASK_C_SH 0xfc43
+
#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
#if defined(CONFIG_64BIT)
@@ -268,7 +275,7 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
int __ret; \
\
if (user_mode(regs)) { \
- __ret = __get_user(insn, (type __user *) insn_addr); \
+ __ret = get_user(insn, (type __user *) insn_addr); \
} else { \
insn = *(type *)insn_addr; \
__ret = 0; \
@@ -431,6 +438,13 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
fp = 1;
len = 4;
#endif
+ } else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
+ len = 2;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
+ len = 2;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
} else {
regs->epc = epc;
return -1;
@@ -530,6 +544,9 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
len = 4;
val.data_ulong = GET_F32_RS2C(insn, regs);
#endif
+ } else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
+ len = 2;
+ val.data_ulong = GET_RS2S(insn, regs);
} else {
regs->epc = epc;
return -1;
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 60d684c76c58..02635bac91f1 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -77,6 +77,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
memcpy(cntx, reset_cntx, sizeof(*cntx));
spin_unlock(&vcpu->arch.reset_cntx_lock);
+ memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
+
kvm_riscv_vcpu_fp_reset(vcpu);
kvm_riscv_vcpu_vector_reset(vcpu);
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 6f2c9ce1b154..24b22f6a9e99 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -38,7 +38,6 @@ CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
-# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
@@ -92,7 +91,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
-CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_BUCKETS=y
CONFIG_SLUB_STATS=y
@@ -395,6 +393,9 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
@@ -405,6 +406,9 @@ CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_VLAN=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
+CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_GATE=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_DNS_RESOLVER=y
@@ -628,8 +632,16 @@ CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_MEM=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_VDPA=m
+CONFIG_VDPA_SIM=m
+CONFIG_VDPA_SIM_NET=m
+CONFIG_VDPA_SIM_BLOCK=m
+CONFIG_VDPA_USER=m
+CONFIG_MLX5_VDPA_NET=m
+CONFIG_VP_VDPA=m
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+CONFIG_VHOST_VDPA=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -654,7 +666,6 @@ CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=y
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
-CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y
@@ -724,11 +735,10 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
@@ -741,6 +751,8 @@ CONFIG_IMA=y
CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -756,7 +768,6 @@ CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
@@ -801,7 +812,6 @@ CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
@@ -812,9 +822,9 @@ CONFIG_PKEY_UV=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
+CONFIG_CRYPTO_KRB5=m
+CONFIG_CRYPTO_KRB5_SELFTESTS=y
CONFIG_CORDIC=m
-CONFIG_CRYPTO_LIB_CURVE25519=m
-CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_RANDOM32_SELFTEST=y
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index f18a7d97ac21..2b8b42d569bc 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -36,7 +36,6 @@ CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
-# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
@@ -86,7 +85,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
-CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_BUCKETS=y
# CONFIG_COMPAT_BRK is not set
@@ -385,6 +383,9 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
@@ -395,6 +396,9 @@ CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_VLAN=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
+CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_GATE=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_DNS_RESOLVER=y
@@ -618,8 +622,16 @@ CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_MEM=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_VDPA=m
+CONFIG_VDPA_SIM=m
+CONFIG_VDPA_SIM_NET=m
+CONFIG_VDPA_SIM_BLOCK=m
+CONFIG_VDPA_USER=m
+CONFIG_MLX5_VDPA_NET=m
+CONFIG_VP_VDPA=m
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+CONFIG_VHOST_VDPA=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -641,7 +653,6 @@ CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
-CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y
@@ -711,6 +722,7 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
@@ -742,7 +754,6 @@ CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
@@ -788,7 +799,6 @@ CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
@@ -799,10 +809,10 @@ CONFIG_PKEY_UV=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
+CONFIG_CRYPTO_KRB5=m
+CONFIG_CRYPTO_KRB5_SELFTESTS=y
CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRYPTO_LIB_CURVE25519=m
-CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 853b2326a171..8163c1702720 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -70,7 +70,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_FS=y
CONFIG_PANIC_ON_OOPS=y
-# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index dd291c9ad6a6..9980c17ba22d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -602,7 +602,8 @@ SYM_CODE_START(stack_invalid)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(64,%r11),0(%r14)
- stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+ GET_LC %r2
+ mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_invalid
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 9a929bbcc397..241f7251c873 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -428,6 +428,8 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
return;
}
zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
+ if (IS_ERR(zdev))
+ return;
list_add_tail(&zdev->entry, scan_list);
}
diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h
index 3a08f9029a3f..1c6e0ae41b0c 100644
--- a/arch/um/include/asm/uaccess.h
+++ b/arch/um/include/asm/uaccess.h
@@ -55,6 +55,7 @@ do { \
goto err_label; \
} \
*((type *)dst) = get_unaligned((type *)(src)); \
+ barrier(); \
current->thread.segv_continue = NULL; \
} while (0)
@@ -66,6 +67,7 @@ do { \
if (__faulted) \
goto err_label; \
put_unaligned(*((type *)src), (type *)(dst)); \
+ barrier(); \
current->thread.segv_continue = NULL; \
} while (0)
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index ce073150dc20..ef2272e92a43 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -225,20 +225,20 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
panic("Failed to sync kernel TLBs: %d", err);
goto out;
}
- else if (current->mm == NULL) {
- if (current->pagefault_disabled) {
- if (!mc) {
- show_regs(container_of(regs, struct pt_regs, regs));
- panic("Segfault with pagefaults disabled but no mcontext");
- }
- if (!current->thread.segv_continue) {
- show_regs(container_of(regs, struct pt_regs, regs));
- panic("Segfault without recovery target");
- }
- mc_set_rip(mc, current->thread.segv_continue);
- current->thread.segv_continue = NULL;
- goto out;
+ else if (current->pagefault_disabled) {
+ if (!mc) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault with pagefaults disabled but no mcontext");
}
+ if (!current->thread.segv_continue) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault without recovery target");
+ }
+ mc_set_rip(mc, current->thread.segv_continue);
+ current->thread.segv_continue = NULL;
+ goto out;
+ }
+ else if (current->mm == NULL) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with no mm");
}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index d4b3b6742ec8..2f5ee045bc7a 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -477,7 +477,7 @@ void *text_poke_copy(void *addr, const void *opcode, size_t len)
return text_poke(addr, opcode, len);
}
-void text_poke_sync(void)
+void smp_text_poke_sync_each_cpu(void)
{
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4b9f378e05f6..297e33cfa760 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -153,6 +153,7 @@ config X86
select ARCH_WANT_HUGETLB_VMEMMAP_PREINIT if X86_64
select ARCH_WANTS_THP_SWAP if X86_64
select ARCH_HAS_PARANOID_L1D_FLUSH
+ select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select BUILDTIME_TABLE_SORT
select CLKEVT_I8253
select CLOCKSOURCE_WATCHDOG
@@ -2368,6 +2369,7 @@ config STRICT_SIGALTSTACK_SIZE
config CFI_AUTO_DEFAULT
bool "Attempt to use FineIBT by default at boot time"
depends on FINEIBT
+ depends on !RUST || RUSTC_VERSION >= 108800
default y
help
Attempt to use FineIBT by default at boot time. If enabled,
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 594723005d95..53daf4654f6c 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -281,6 +281,7 @@ archprepare: $(cpufeaturemasks.hdr)
###
# Kernel objects
+core-y += arch/x86/boot/startup/
libs-y += arch/x86/lib/
# drivers-y are linked after core-y
diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S
index aa9b96457584..cf4a6155714e 100644
--- a/arch/x86/boot/bioscall.S
+++ b/arch/x86/boot/bioscall.S
@@ -32,7 +32,7 @@ intcall:
movw %dx, %si
movw %sp, %di
movw $11, %cx
- rep; movsl
+ rep movsl
/* Pop full state from the stack */
popal
@@ -67,7 +67,7 @@ intcall:
jz 4f
movw %sp, %si
movw $11, %cx
- rep; movsl
+ rep movsl
4: addw $44, %sp
/* Restore state and return */
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 38f17a1e1e36..60580836daf7 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -34,7 +34,7 @@
extern struct setup_header hdr;
extern struct boot_params boot_params;
-#define cpu_relax() asm volatile("rep; nop")
+#define cpu_relax() asm volatile("pause")
static inline void io_delay(void)
{
@@ -155,14 +155,14 @@ static inline void wrgs32(u32 v, addr_t addr)
static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
{
bool diff;
- asm volatile("fs; repe; cmpsb" CC_SET(nz)
+ asm volatile("fs repe cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
{
bool diff;
- asm volatile("gs; repe; cmpsb" CC_SET(nz)
+ asm volatile("gs repe cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index fdbce022db55..f4f7b22d8113 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -44,10 +44,10 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
KBUILD_CFLAGS += $(call cc-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
-# sev.c indirectly includes inat-table.h which is generated during
+# sev-decode-insn.c indirectly includes inat-table.c which is generated during
# compilation and stored in $(objtree). Add the directory to the includes so
# that the compiler finds it even with out-of-tree builds (make O=/some/path).
-CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
+CFLAGS_sev-handle-vc.o += -I$(objtree)/arch/x86/lib/
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
@@ -73,7 +73,7 @@ LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
-sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
+sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABbCDGRSTtVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
quiet_cmd_voffset = VOFFSET $@
cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
@@ -96,8 +96,7 @@ ifdef CONFIG_X86_64
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
- vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
- vmlinux-objs-y += $(obj)/la57toggle.o
+ vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o $(obj)/sev-handle-vc.o
endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
@@ -106,6 +105,7 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-libs-$(CONFIG_X86_64) += $(objtree)/arch/x86/boot/startup/lib.a
$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
$(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index eafd4f185e77..d9dab940ff62 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -35,7 +35,6 @@
#include <asm/bootparam.h>
#include <asm/desc_defs.h>
#include <asm/trapnr.h>
-#include "pgtable.h"
/*
* Fix alignment at 16 bytes. Following CONFIG_FUNCTION_ALIGNMENT will result
diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
index f676156d9f3d..0e9f84ab4bdc 100644
--- a/arch/x86/boot/compressed/mem.c
+++ b/arch/x86/boot/compressed/mem.c
@@ -34,14 +34,11 @@ static bool early_is_tdx_guest(void)
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{
- static bool sevsnp;
-
/* Platform-specific memory-acceptance call goes here */
if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
- } else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
- sevsnp = true;
+ } else if (early_is_sevsnp_guest()) {
snp_accept_memory(start, end);
} else {
error("Cannot accept memory: unknown platform\n");
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 1cdcd4aaf395..94b5991da001 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -14,7 +14,6 @@
#include "misc.h"
#include "error.h"
-#include "pgtable.h"
#include "../string.h"
#include "../voffset.h"
#include <asm/bootparam_utils.h>
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index dd8d1a85f671..db1048621ea2 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -136,6 +136,9 @@ static inline void console_init(void)
#endif
#ifdef CONFIG_AMD_MEM_ENCRYPT
+struct es_em_ctxt;
+struct insn;
+
void sev_enable(struct boot_params *bp);
void snp_check_features(void);
void sev_es_shutdown_ghcb(void);
@@ -143,6 +146,11 @@ extern bool sev_es_check_ghcb_fault(unsigned long address);
void snp_set_page_private(unsigned long paddr);
void snp_set_page_shared(unsigned long paddr);
void sev_prep_identity_maps(unsigned long top_level_pgt);
+
+enum es_result vc_decode_insn(struct es_em_ctxt *ctxt);
+bool insn_has_rep_prefix(struct insn *insn);
+void sev_insn_decode_init(void);
+bool early_setup_ghcb(void);
#else
static inline void sev_enable(struct boot_params *bp)
{
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
deleted file mode 100644
index 6d595abe06b3..000000000000
--- a/arch/x86/boot/compressed/pgtable.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef BOOT_COMPRESSED_PAGETABLE_H
-#define BOOT_COMPRESSED_PAGETABLE_H
-
-#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE)
-
-#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
-#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0
-
-#ifndef __ASSEMBLER__
-
-extern unsigned long *trampoline_32bit;
-
-extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
-
-extern const u16 trampoline_ljmp_imm_offset;
-
-#endif /* __ASSEMBLER__ */
-#endif /* BOOT_COMPRESSED_PAGETABLE_H */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index d8c5de40669d..5a6c7a190e5b 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -4,7 +4,6 @@
#include <asm/bootparam_utils.h>
#include <asm/e820/types.h>
#include <asm/processor.h>
-#include "pgtable.h"
#include "../string.h"
#include "efi.h"
diff --git a/arch/x86/boot/compressed/sev-handle-vc.c b/arch/x86/boot/compressed/sev-handle-vc.c
new file mode 100644
index 000000000000..89dd02de2a0f
--- /dev/null
+++ b/arch/x86/boot/compressed/sev-handle-vc.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "misc.h"
+#include "sev.h"
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/insn.h>
+#include <asm/pgtable_types.h>
+#include <asm/ptrace.h>
+#include <asm/sev.h>
+#include <asm/trapnr.h>
+#include <asm/trap_pf.h>
+#include <asm/fpu/xcr.h>
+
+#define __BOOT_COMPRESSED
+
+/* Basic instruction decoding support needed */
+#include "../../lib/inat.c"
+#include "../../lib/insn.c"
+
+/*
+ * Copy a version of this function here - insn-eval.c can't be used in
+ * pre-decompression code.
+ */
+bool insn_has_rep_prefix(struct insn *insn)
+{
+ insn_byte_t p;
+ int i;
+
+ insn_get_prefixes(insn);
+
+ for_each_insn_prefix(insn, i, p) {
+ if (p == 0xf2 || p == 0xf3)
+ return true;
+ }
+
+ return false;
+}
+
+enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ int ret;
+
+ memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+
+ ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
+ if (ret < 0)
+ return ES_DECODE_FAILED;
+
+ return ES_OK;
+}
+
+extern void sev_insn_decode_init(void) __alias(inat_init_tables);
+
+/*
+ * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
+ * doesn't use segments.
+ */
+static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
+{
+ return 0UL;
+}
+
+static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ void *dst, char *buf, size_t size)
+{
+ memcpy(dst, buf, size);
+
+ return ES_OK;
+}
+
+static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ void *src, char *buf, size_t size)
+{
+ memcpy(buf, src, size);
+
+ return ES_OK;
+}
+
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ return ES_OK;
+}
+
+static bool fault_in_kernel_space(unsigned long address)
+{
+ return false;
+}
+
+#define sev_printk(fmt, ...)
+
+#include "../../coco/sev/vc-shared.c"
+
+void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
+{
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+
+ if (!boot_ghcb && !early_setup_ghcb())
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+
+ vc_ghcb_invalidate(boot_ghcb);
+ result = vc_init_em_ctxt(&ctxt, regs, exit_code);
+ if (result != ES_OK)
+ goto finish;
+
+ result = vc_check_opcode_bytes(&ctxt, exit_code);
+ if (result != ES_OK)
+ goto finish;
+
+ switch (exit_code) {
+ case SVM_EXIT_RDTSC:
+ case SVM_EXIT_RDTSCP:
+ result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
+ break;
+ case SVM_EXIT_IOIO:
+ result = vc_handle_ioio(boot_ghcb, &ctxt);
+ break;
+ case SVM_EXIT_CPUID:
+ result = vc_handle_cpuid(boot_ghcb, &ctxt);
+ break;
+ default:
+ result = ES_UNSUPPORTED;
+ break;
+ }
+
+finish:
+ if (result == ES_OK)
+ vc_finish_insn(&ctxt);
+ else if (result != ES_RETRY)
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+}
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 89ba168f4f0f..612b443296d3 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -24,96 +24,11 @@
#include <asm/cpuid.h>
#include "error.h"
-#include "../msr.h"
+#include "sev.h"
static struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
struct ghcb *boot_ghcb;
-/*
- * Copy a version of this function here - insn-eval.c can't be used in
- * pre-decompression code.
- */
-static bool insn_has_rep_prefix(struct insn *insn)
-{
- insn_byte_t p;
- int i;
-
- insn_get_prefixes(insn);
-
- for_each_insn_prefix(insn, i, p) {
- if (p == 0xf2 || p == 0xf3)
- return true;
- }
-
- return false;
-}
-
-/*
- * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
- * doesn't use segments.
- */
-static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
-{
- return 0UL;
-}
-
-static inline u64 sev_es_rd_ghcb_msr(void)
-{
- struct msr m;
-
- boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
-
- return m.q;
-}
-
-static inline void sev_es_wr_ghcb_msr(u64 val)
-{
- struct msr m;
-
- m.q = val;
- boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
-}
-
-static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
-{
- char buffer[MAX_INSN_SIZE];
- int ret;
-
- memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
-
- ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
- if (ret < 0)
- return ES_DECODE_FAILED;
-
- return ES_OK;
-}
-
-static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
- void *dst, char *buf, size_t size)
-{
- memcpy(dst, buf, size);
-
- return ES_OK;
-}
-
-static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
- void *src, char *buf, size_t size)
-{
- memcpy(buf, src, size);
-
- return ES_OK;
-}
-
-static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
-{
- return ES_OK;
-}
-
-static bool fault_in_kernel_space(unsigned long address)
-{
- return false;
-}
-
#undef __init
#define __init
@@ -122,24 +37,27 @@ static bool fault_in_kernel_space(unsigned long address)
#define __BOOT_COMPRESSED
-/* Basic instruction decoding support needed */
-#include "../../lib/inat.c"
-#include "../../lib/insn.c"
+extern struct svsm_ca *boot_svsm_caa;
+extern u64 boot_svsm_caa_pa;
-/* Include code for early handlers */
-#include "../../coco/sev/shared.c"
-
-static struct svsm_ca *svsm_get_caa(void)
+struct svsm_ca *svsm_get_caa(void)
{
return boot_svsm_caa;
}
-static u64 svsm_get_caa_pa(void)
+u64 svsm_get_caa_pa(void)
{
return boot_svsm_caa_pa;
}
-static int svsm_perform_call_protocol(struct svsm_call *call)
+int svsm_perform_call_protocol(struct svsm_call *call);
+
+u8 snp_vmpl;
+
+/* Include code for early handlers */
+#include "../../boot/startup/sev-shared.c"
+
+int svsm_perform_call_protocol(struct svsm_call *call)
{
struct ghcb *ghcb;
int ret;
@@ -157,7 +75,7 @@ static int svsm_perform_call_protocol(struct svsm_call *call)
return ret;
}
-bool sev_snp_enabled(void)
+static bool sev_snp_enabled(void)
{
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
}
@@ -212,7 +130,7 @@ void snp_set_page_shared(unsigned long paddr)
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
}
-static bool early_setup_ghcb(void)
+bool early_setup_ghcb(void)
{
if (set_page_decrypted((unsigned long)&boot_ghcb_page))
return false;
@@ -223,7 +141,7 @@ static bool early_setup_ghcb(void)
boot_ghcb = &boot_ghcb_page;
/* Initialize lookup tables for the instruction decoder */
- inat_init_tables();
+ sev_insn_decode_init();
/* SNP guest requires the GHCB GPA must be registered */
if (sev_snp_enabled())
@@ -296,46 +214,6 @@ bool sev_es_check_ghcb_fault(unsigned long address)
return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
}
-void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
-{
- struct es_em_ctxt ctxt;
- enum es_result result;
-
- if (!boot_ghcb && !early_setup_ghcb())
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-
- vc_ghcb_invalidate(boot_ghcb);
- result = vc_init_em_ctxt(&ctxt, regs, exit_code);
- if (result != ES_OK)
- goto finish;
-
- result = vc_check_opcode_bytes(&ctxt, exit_code);
- if (result != ES_OK)
- goto finish;
-
- switch (exit_code) {
- case SVM_EXIT_RDTSC:
- case SVM_EXIT_RDTSCP:
- result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
- break;
- case SVM_EXIT_IOIO:
- result = vc_handle_ioio(boot_ghcb, &ctxt);
- break;
- case SVM_EXIT_CPUID:
- result = vc_handle_cpuid(boot_ghcb, &ctxt);
- break;
- default:
- result = ES_UNSUPPORTED;
- break;
- }
-
-finish:
- if (result == ES_OK)
- vc_finish_insn(&ctxt);
- else if (result != ES_RETRY)
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-}
-
/*
* SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
* guest side implementation for proper functioning of the guest. If any
@@ -645,3 +523,43 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
sev_verify_cbit(top_level_pgt);
}
+
+bool early_is_sevsnp_guest(void)
+{
+ static bool sevsnp;
+
+ if (sevsnp)
+ return true;
+
+ if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
+ return false;
+
+ sevsnp = true;
+
+ if (!snp_vmpl) {
+ unsigned int eax, ebx, ecx, edx;
+
+ /*
+ * CPUID Fn8000_001F_EAX[28] - SVSM support
+ */
+ eax = 0x8000001f;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ if (eax & BIT(28)) {
+ struct msr m;
+
+ /* Obtain the address of the calling area to use */
+ boot_rdmsr(MSR_SVSM_CAA, &m);
+ boot_svsm_caa = (void *)m.q;
+ boot_svsm_caa_pa = m.q;
+
+ /*
+ * The real VMPL level cannot be discovered, but the
+ * memory acceptance routines make no use of that so
+ * any non-zero value suffices here.
+ */
+ snp_vmpl = U8_MAX;
+ }
+ }
+ return true;
+}
diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
index 4e463f33186d..92f79c21939c 100644
--- a/arch/x86/boot/compressed/sev.h
+++ b/arch/x86/boot/compressed/sev.h
@@ -10,15 +10,34 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
-bool sev_snp_enabled(void);
+#include "../msr.h"
+
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 sev_get_status(void);
+bool early_is_sevsnp_guest(void);
+
+static inline u64 sev_es_rd_ghcb_msr(void)
+{
+ struct msr m;
+
+ boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
+
+ return m.q;
+}
+
+static inline void sev_es_wr_ghcb_msr(u64 val)
+{
+ struct msr m;
+
+ m.q = val;
+ boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
+}
#else
-static inline bool sev_snp_enabled(void) { return false; }
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 sev_get_status(void) { return 0; }
+static inline bool early_is_sevsnp_guest(void) { return false; }
#endif
diff --git a/arch/x86/boot/compressed/string.c b/arch/x86/boot/compressed/string.c
index 81fc1eaa3229..9af19d9614cb 100644
--- a/arch/x86/boot/compressed/string.c
+++ b/arch/x86/boot/compressed/string.c
@@ -15,9 +15,9 @@ static void *____memcpy(void *dest, const void *src, size_t n)
{
int d0, d1, d2;
asm volatile(
- "rep ; movsl\n\t"
+ "rep movsl\n\t"
"movl %4,%%ecx\n\t"
- "rep ; movsb\n\t"
+ "rep movsb"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src)
: "memory");
@@ -29,9 +29,9 @@ static void *____memcpy(void *dest, const void *src, size_t n)
{
long d0, d1, d2;
asm volatile(
- "rep ; movsq\n\t"
+ "rep movsq\n\t"
"movq %4,%%rcx\n\t"
- "rep ; movsb\n\t"
+ "rep movsb"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n >> 3), "g" (n & 7), "1" (dest), "2" (src)
: "memory");
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index 6afd05e819d2..3973a67cd04e 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -22,10 +22,10 @@ SYM_FUNC_START_NOALIGN(memcpy)
movw %dx, %si
pushw %cx
shrw $2, %cx
- rep; movsl
+ rep movsl
popw %cx
andw $3, %cx
- rep; movsb
+ rep movsb
popw %di
popw %si
retl
@@ -38,10 +38,10 @@ SYM_FUNC_START_NOALIGN(memset)
imull $0x01010101,%eax
pushw %cx
shrw $2, %cx
- rep; stosl
+ rep stosl
popw %cx
andw $3, %cx
- rep; stosb
+ rep stosb
popw %di
retl
SYM_FUNC_END(memset)
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index b5c79f43359b..9cb91421b4e4 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -585,7 +585,7 @@ start_of_setup:
xorl %eax, %eax
subw %di, %cx
shrw $2, %cx
- rep; stosl
+ rep stosl
# Jump to C code (should not return)
calll main
diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile
new file mode 100644
index 000000000000..b514f7e81332
--- /dev/null
+++ b/arch/x86/boot/startup/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+KBUILD_AFLAGS += -D__DISABLE_EXPORTS
+KBUILD_CFLAGS += -D__DISABLE_EXPORTS -mcmodel=small -fPIC \
+ -Os -DDISABLE_BRANCH_PROFILING \
+ $(DISABLE_STACKLEAK_PLUGIN) \
+ -fno-stack-protector -D__NO_FORTIFY \
+ -fno-jump-tables \
+ -include $(srctree)/include/linux/hidden.h
+
+# disable ftrace hooks and LTO
+KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
+obj-$(CONFIG_X86_64) += gdt_idt.o map_kernel.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += sme.o sev-startup.o
+
+lib-$(CONFIG_X86_64) += la57toggle.o
+lib-$(CONFIG_EFI_MIXED) += efi-mixed.o
+
+#
+# Disable objtool validation for all library code, which is intended
+# to be linked into the decompressor or the EFI stub but not vmlinux
+#
+$(patsubst %.o,$(obj)/%.o,$(lib-y)): OBJECT_FILES_NON_STANDARD := y
diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/arch/x86/boot/startup/efi-mixed.S
index e04ed99bc449..e04ed99bc449 100644
--- a/drivers/firmware/efi/libstub/x86-mixed.S
+++ b/arch/x86/boot/startup/efi-mixed.S
diff --git a/arch/x86/boot/startup/gdt_idt.c b/arch/x86/boot/startup/gdt_idt.c
new file mode 100644
index 000000000000..a3112a69b06a
--- /dev/null
+++ b/arch/x86/boot/startup/gdt_idt.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+#include <asm/desc.h>
+#include <asm/init.h>
+#include <asm/setup.h>
+#include <asm/sev.h>
+#include <asm/trapnr.h>
+
+/*
+ * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
+ * used until the idt_table takes over. On the boot CPU this happens in
+ * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
+ * this happens in the functions called from head_64.S.
+ *
+ * The idt_table can't be used that early because all the code modifying it is
+ * in idt.c and can be instrumented by tracing or KASAN, which both don't work
+ * during early CPU bringup. Also the idt_table has the runtime vectors
+ * configured which require certain CPU state to be setup already (like TSS),
+ * which also hasn't happened yet in early CPU bringup.
+ */
+static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
+
+/* This may run while still in the direct mapping */
+void __head startup_64_load_idt(void *vc_handler)
+{
+ struct desc_ptr desc = {
+ .address = (unsigned long)rip_rel_ptr(bringup_idt_table),
+ .size = sizeof(bringup_idt_table) - 1,
+ };
+ struct idt_data data;
+ gate_desc idt_desc;
+
+ /* @vc_handler is set only for a VMM Communication Exception */
+ if (vc_handler) {
+ init_idt_data(&data, X86_TRAP_VC, vc_handler);
+ idt_init_desc(&idt_desc, &data);
+ native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
+ }
+
+ native_load_idt(&desc);
+}
+
+/*
+ * Setup boot CPU state needed before kernel switches to virtual addresses.
+ */
+void __head startup_64_setup_gdt_idt(void)
+{
+ struct gdt_page *gp = rip_rel_ptr((void *)(__force unsigned long)&gdt_page);
+ void *handler = NULL;
+
+ struct desc_ptr startup_gdt_descr = {
+ .address = (unsigned long)gp->gdt,
+ .size = GDT_SIZE - 1,
+ };
+
+ /* Load GDT */
+ native_load_gdt(&startup_gdt_descr);
+
+ /* New GDT is live - reload data segment registers */
+ asm volatile("movl %%eax, %%ds\n"
+ "movl %%eax, %%ss\n"
+ "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
+ handler = rip_rel_ptr(vc_no_ghcb);
+
+ startup_64_load_idt(handler);
+}
diff --git a/arch/x86/boot/compressed/la57toggle.S b/arch/x86/boot/startup/la57toggle.S
index 9ee002387eb1..370075b4d95b 100644
--- a/arch/x86/boot/compressed/la57toggle.S
+++ b/arch/x86/boot/startup/la57toggle.S
@@ -5,7 +5,6 @@
#include <asm/boot.h>
#include <asm/msr.h>
#include <asm/processor-flags.h>
-#include "pgtable.h"
/*
* This is the 32-bit trampoline that will be copied over to low memory. It
diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
new file mode 100644
index 000000000000..099ae2559336
--- /dev/null
+++ b/arch/x86/boot/startup/map_kernel.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pgtable.h>
+
+#include <asm/init.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sev.h>
+
+extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
+extern unsigned int next_early_pgt;
+
+static inline bool check_la57_support(void)
+{
+ if (!IS_ENABLED(CONFIG_X86_5LEVEL))
+ return false;
+
+ /*
+ * 5-level paging is detected and enabled at kernel decompression
+ * stage. Only check if it has been enabled there.
+ */
+ if (!(native_read_cr4() & X86_CR4_LA57))
+ return false;
+
+ __pgtable_l5_enabled = 1;
+ pgdir_shift = 48;
+ ptrs_per_p4d = 512;
+ page_offset_base = __PAGE_OFFSET_BASE_L5;
+ vmalloc_base = __VMALLOC_BASE_L5;
+ vmemmap_base = __VMEMMAP_BASE_L5;
+
+ return true;
+}
+
+static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
+ pmdval_t *pmd,
+ unsigned long p2v_offset)
+{
+ unsigned long paddr, paddr_end;
+ int i;
+
+ /* Encrypt the kernel and related (if SME is active) */
+ sme_encrypt_kernel(bp);
+
+ /*
+ * Clear the memory encryption mask from the .bss..decrypted section.
+ * The bss section will be memset to zero later in the initialization so
+ * there is no need to zero it after changing the memory encryption
+ * attribute.
+ */
+ if (sme_get_me_mask()) {
+ paddr = (unsigned long)rip_rel_ptr(__start_bss_decrypted);
+ paddr_end = (unsigned long)rip_rel_ptr(__end_bss_decrypted);
+
+ for (; paddr < paddr_end; paddr += PMD_SIZE) {
+ /*
+ * On SNP, transition the page to shared in the RMP table so that
+ * it is consistent with the page table attribute change.
+ *
+ * __start_bss_decrypted has a virtual address in the high range
+ * mapping (kernel .text). PVALIDATE, by way of
+ * early_snp_set_memory_shared(), requires a valid virtual
+ * address but the kernel is currently running off of the identity
+ * mapping so use the PA to get a *currently* valid virtual address.
+ */
+ early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD);
+
+ i = pmd_index(paddr - p2v_offset);
+ pmd[i] -= sme_get_me_mask();
+ }
+ }
+
+ /*
+ * Return the SME encryption mask (if SME is active) to be used as a
+ * modifier for the initial pgdir entry programmed into CR3.
+ */
+ return sme_get_me_mask();
+}
+
+/*
+ * This code is compiled using PIC codegen because it will execute from the
+ * early 1:1 mapping of memory, which deviates from the mapping expected by the
+ * linker. Due to this deviation, taking the address of a global variable will
+ * produce an ambiguous result when using the plain & operator. Instead,
+ * rip_rel_ptr() must be used, which will return the RIP-relative address in
+ * the 1:1 mapping of memory. Kernel virtual addresses can be determined by
+ * subtracting p2v_offset from the RIP-relative address.
+ */
+unsigned long __head __startup_64(unsigned long p2v_offset,
+ struct boot_params *bp)
+{
+ pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
+ unsigned long physaddr = (unsigned long)rip_rel_ptr(_text);
+ unsigned long va_text, va_end;
+ unsigned long pgtable_flags;
+ unsigned long load_delta;
+ pgdval_t *pgd;
+ p4dval_t *p4d;
+ pudval_t *pud;
+ pmdval_t *pmd, pmd_entry;
+ bool la57;
+ int i;
+
+ la57 = check_la57_support();
+
+ /* Is the address too large? */
+ if (physaddr >> MAX_PHYSMEM_BITS)
+ for (;;);
+
+ /*
+ * Compute the delta between the address I am compiled to run at
+ * and the address I am actually running at.
+ */
+ phys_base = load_delta = __START_KERNEL_map + p2v_offset;
+
+ /* Is the address not 2M aligned? */
+ if (load_delta & ~PMD_MASK)
+ for (;;);
+
+ va_text = physaddr - p2v_offset;
+ va_end = (unsigned long)rip_rel_ptr(_end) - p2v_offset;
+
+ /* Include the SME encryption mask in the fixup value */
+ load_delta += sme_get_me_mask();
+
+ /* Fixup the physical addresses in the page table */
+
+ pgd = rip_rel_ptr(early_top_pgt);
+ pgd[pgd_index(__START_KERNEL_map)] += load_delta;
+
+ if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) {
+ p4d = (p4dval_t *)rip_rel_ptr(level4_kernel_pgt);
+ p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
+
+ pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
+ }
+
+ level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta;
+ level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta;
+
+ for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
+ level2_fixmap_pgt[i].pmd += load_delta;
+
+ /*
+ * Set up the identity mapping for the switchover. These
+ * entries should *NOT* have the global bit set! This also
+ * creates a bunch of nonsense entries but that is fine --
+ * it avoids problems around wraparound.
+ */
+
+ pud = &early_pgts[0]->pmd;
+ pmd = &early_pgts[1]->pmd;
+ next_early_pgt = 2;
+
+ pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
+
+ if (la57) {
+ p4d = &early_pgts[next_early_pgt++]->pmd;
+
+ i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+ pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
+ pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
+
+ i = physaddr >> P4D_SHIFT;
+ p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
+ p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
+ } else {
+ i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+ pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
+ pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
+ }
+
+ i = physaddr >> PUD_SHIFT;
+ pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
+ pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
+
+ pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+ pmd_entry += sme_get_me_mask();
+ pmd_entry += physaddr;
+
+ for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
+ int idx = i + (physaddr >> PMD_SHIFT);
+
+ pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
+ }
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+ * we might write invalid pmds, when the kernel is relocated
+ * cleanup_highmap() fixes this up along with the mappings
+ * beyond _end.
+ *
+ * Only the region occupied by the kernel image has so far
+ * been checked against the table of usable memory regions
+ * provided by the firmware, so invalidate pages outside that
+ * region. A page table entry that maps to a reserved area of
+ * memory would allow processor speculation into that area,
+ * and on some hardware (particularly the UV platform) even
+ * speculative access to some reserved areas is caught as an
+ * error, causing the BIOS to halt the system.
+ */
+
+ pmd = rip_rel_ptr(level2_kernel_pgt);
+
+ /* invalidate pages before the kernel image */
+ for (i = 0; i < pmd_index(va_text); i++)
+ pmd[i] &= ~_PAGE_PRESENT;
+
+ /* fixup pages that are part of the kernel image */
+ for (; i <= pmd_index(va_end); i++)
+ if (pmd[i] & _PAGE_PRESENT)
+ pmd[i] += load_delta;
+
+ /* invalidate pages after the kernel image */
+ for (; i < PTRS_PER_PMD; i++)
+ pmd[i] &= ~_PAGE_PRESENT;
+
+ return sme_postprocess_startup(bp, pmd, p2v_offset);
+}
diff --git a/arch/x86/coco/sev/shared.c b/arch/x86/boot/startup/sev-shared.c
index 2e4122f8aa6b..7a706db87b93 100644
--- a/arch/x86/coco/sev/shared.c
+++ b/arch/x86/boot/startup/sev-shared.c
@@ -14,76 +14,23 @@
#ifndef __BOOT_COMPRESSED
#define error(v) pr_err(v)
#define has_cpuflag(f) boot_cpu_has(f)
-#define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__)
-#define sev_printk_rtl(fmt, ...) printk_ratelimited(fmt, ##__VA_ARGS__)
#else
#undef WARN
#define WARN(condition, format...) (!!(condition))
-#define sev_printk(fmt, ...)
-#define sev_printk_rtl(fmt, ...)
#undef vc_forward_exception
#define vc_forward_exception(c) panic("SNP: Hypervisor requested exception\n")
#endif
/*
* SVSM related information:
- * When running under an SVSM, the VMPL that Linux is executing at must be
- * non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
- *
* During boot, the page tables are set up as identity mapped and later
* changed to use kernel virtual addresses. Maintain separate virtual and
* physical addresses for the CAA to allow SVSM functions to be used during
* early boot, both with identity mapped virtual addresses and proper kernel
* virtual addresses.
*/
-u8 snp_vmpl __ro_after_init;
-EXPORT_SYMBOL_GPL(snp_vmpl);
-static struct svsm_ca *boot_svsm_caa __ro_after_init;
-static u64 boot_svsm_caa_pa __ro_after_init;
-
-static struct svsm_ca *svsm_get_caa(void);
-static u64 svsm_get_caa_pa(void);
-static int svsm_perform_call_protocol(struct svsm_call *call);
-
-/* I/O parameters for CPUID-related helpers */
-struct cpuid_leaf {
- u32 fn;
- u32 subfn;
- u32 eax;
- u32 ebx;
- u32 ecx;
- u32 edx;
-};
-
-/*
- * Individual entries of the SNP CPUID table, as defined by the SNP
- * Firmware ABI, Revision 0.9, Section 7.1, Table 14.
- */
-struct snp_cpuid_fn {
- u32 eax_in;
- u32 ecx_in;
- u64 xcr0_in;
- u64 xss_in;
- u32 eax;
- u32 ebx;
- u32 ecx;
- u32 edx;
- u64 __reserved;
-} __packed;
-
-/*
- * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9,
- * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
- * of 64 entries per CPUID table.
- */
-#define SNP_CPUID_COUNT_MAX 64
-
-struct snp_cpuid_table {
- u32 count;
- u32 __reserved1;
- u64 __reserved2;
- struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX];
-} __packed;
+struct svsm_ca *boot_svsm_caa __ro_after_init;
+u64 boot_svsm_caa_pa __ro_after_init;
/*
* Since feature negotiation related variables are set early in the boot
@@ -107,7 +54,7 @@ static u32 cpuid_std_range_max __ro_after_init;
static u32 cpuid_hyp_range_max __ro_after_init;
static u32 cpuid_ext_range_max __ro_after_init;
-static bool __init sev_es_check_cpu_features(void)
+bool __init sev_es_check_cpu_features(void)
{
if (!has_cpuflag(X86_FEATURE_RDRAND)) {
error("RDRAND instruction not supported - no trusted source of randomness available\n");
@@ -117,7 +64,7 @@ static bool __init sev_es_check_cpu_features(void)
return true;
}
-static void __head __noreturn
+void __head __noreturn
sev_es_terminate(unsigned int set, unsigned int reason)
{
u64 val = GHCB_MSR_TERM_REQ;
@@ -136,7 +83,7 @@ sev_es_terminate(unsigned int set, unsigned int reason)
/*
* The hypervisor features are available from GHCB version 2 onward.
*/
-static u64 get_hv_features(void)
+u64 get_hv_features(void)
{
u64 val;
@@ -153,7 +100,7 @@ static u64 get_hv_features(void)
return GHCB_MSR_HV_FT_RESP_VAL(val);
}
-static void snp_register_ghcb_early(unsigned long paddr)
+void snp_register_ghcb_early(unsigned long paddr)
{
unsigned long pfn = paddr >> PAGE_SHIFT;
u64 val;
@@ -169,7 +116,7 @@ static void snp_register_ghcb_early(unsigned long paddr)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER);
}
-static bool sev_es_negotiate_protocol(void)
+bool sev_es_negotiate_protocol(void)
{
u64 val;
@@ -190,39 +137,6 @@ static bool sev_es_negotiate_protocol(void)
return true;
}
-static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
-{
- ghcb->save.sw_exit_code = 0;
- __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
-}
-
-static bool vc_decoding_needed(unsigned long exit_code)
-{
- /* Exceptions don't require to decode the instruction */
- return !(exit_code >= SVM_EXIT_EXCP_BASE &&
- exit_code <= SVM_EXIT_LAST_EXCP);
-}
-
-static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
- struct pt_regs *regs,
- unsigned long exit_code)
-{
- enum es_result ret = ES_OK;
-
- memset(ctxt, 0, sizeof(*ctxt));
- ctxt->regs = regs;
-
- if (vc_decoding_needed(exit_code))
- ret = vc_decode_insn(ctxt);
-
- return ret;
-}
-
-static void vc_finish_insn(struct es_em_ctxt *ctxt)
-{
- ctxt->regs->ip += ctxt->insn.length;
-}
-
static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{
u32 ret;
@@ -344,7 +258,7 @@ static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
* Fill in protocol and format specifiers. This can be called very early
* in the boot, so use rip-relative references as needed.
*/
- ghcb->protocol_version = RIP_REL_REF(ghcb_version);
+ ghcb->protocol_version = ghcb_version;
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
@@ -371,10 +285,10 @@ static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
return svsm_process_result_codes(call);
}
-static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt,
- u64 exit_code, u64 exit_info_1,
- u64 exit_info_2)
+enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ u64 exit_code, u64 exit_info_1,
+ u64 exit_info_2)
{
/* Fill in protocol and format specifiers */
ghcb->protocol_version = ghcb_version;
@@ -473,9 +387,9 @@ static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid
* while running with the initial identity mapping as well as the
* switch-over to kernel virtual addresses later.
*/
-static const struct snp_cpuid_table *snp_cpuid_get_table(void)
+const struct snp_cpuid_table *snp_cpuid_get_table(void)
{
- return &RIP_REL_REF(cpuid_table_copy);
+ return rip_rel_ptr(&cpuid_table_copy);
}
/*
@@ -672,7 +586,7 @@ snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
* Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
* should be treated as fatal by caller.
*/
-static int __head
+int __head
snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@@ -701,9 +615,9 @@ snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
/* Skip post-processing for out-of-range zero leafs. */
- if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
- (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
- (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
+ if (!(leaf->fn <= cpuid_std_range_max ||
+ (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
+ (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
return 0;
}
@@ -782,391 +696,6 @@ fail:
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
}
-static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
- unsigned long address,
- bool write)
-{
- if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = X86_PF_USER;
- ctxt->fi.cr2 = address;
- if (write)
- ctxt->fi.error_code |= X86_PF_WRITE;
-
- return ES_EXCEPTION;
- }
-
- return ES_OK;
-}
-
-static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
- void *src, char *buf,
- unsigned int data_size,
- unsigned int count,
- bool backwards)
-{
- int i, b = backwards ? -1 : 1;
- unsigned long address = (unsigned long)src;
- enum es_result ret;
-
- ret = vc_insn_string_check(ctxt, address, false);
- if (ret != ES_OK)
- return ret;
-
- for (i = 0; i < count; i++) {
- void *s = src + (i * data_size * b);
- char *d = buf + (i * data_size);
-
- ret = vc_read_mem(ctxt, s, d, data_size);
- if (ret != ES_OK)
- break;
- }
-
- return ret;
-}
-
-static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
- void *dst, char *buf,
- unsigned int data_size,
- unsigned int count,
- bool backwards)
-{
- int i, s = backwards ? -1 : 1;
- unsigned long address = (unsigned long)dst;
- enum es_result ret;
-
- ret = vc_insn_string_check(ctxt, address, true);
- if (ret != ES_OK)
- return ret;
-
- for (i = 0; i < count; i++) {
- void *d = dst + (i * data_size * s);
- char *b = buf + (i * data_size);
-
- ret = vc_write_mem(ctxt, d, b, data_size);
- if (ret != ES_OK)
- break;
- }
-
- return ret;
-}
-
-#define IOIO_TYPE_STR BIT(2)
-#define IOIO_TYPE_IN 1
-#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR)
-#define IOIO_TYPE_OUT 0
-#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
-
-#define IOIO_REP BIT(3)
-
-#define IOIO_ADDR_64 BIT(9)
-#define IOIO_ADDR_32 BIT(8)
-#define IOIO_ADDR_16 BIT(7)
-
-#define IOIO_DATA_32 BIT(6)
-#define IOIO_DATA_16 BIT(5)
-#define IOIO_DATA_8 BIT(4)
-
-#define IOIO_SEG_ES (0 << 10)
-#define IOIO_SEG_DS (3 << 10)
-
-static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
-{
- struct insn *insn = &ctxt->insn;
- size_t size;
- u64 port;
-
- *exitinfo = 0;
-
- switch (insn->opcode.bytes[0]) {
- /* INS opcodes */
- case 0x6c:
- case 0x6d:
- *exitinfo |= IOIO_TYPE_INS;
- *exitinfo |= IOIO_SEG_ES;
- port = ctxt->regs->dx & 0xffff;
- break;
-
- /* OUTS opcodes */
- case 0x6e:
- case 0x6f:
- *exitinfo |= IOIO_TYPE_OUTS;
- *exitinfo |= IOIO_SEG_DS;
- port = ctxt->regs->dx & 0xffff;
- break;
-
- /* IN immediate opcodes */
- case 0xe4:
- case 0xe5:
- *exitinfo |= IOIO_TYPE_IN;
- port = (u8)insn->immediate.value & 0xffff;
- break;
-
- /* OUT immediate opcodes */
- case 0xe6:
- case 0xe7:
- *exitinfo |= IOIO_TYPE_OUT;
- port = (u8)insn->immediate.value & 0xffff;
- break;
-
- /* IN register opcodes */
- case 0xec:
- case 0xed:
- *exitinfo |= IOIO_TYPE_IN;
- port = ctxt->regs->dx & 0xffff;
- break;
-
- /* OUT register opcodes */
- case 0xee:
- case 0xef:
- *exitinfo |= IOIO_TYPE_OUT;
- port = ctxt->regs->dx & 0xffff;
- break;
-
- default:
- return ES_DECODE_FAILED;
- }
-
- *exitinfo |= port << 16;
-
- switch (insn->opcode.bytes[0]) {
- case 0x6c:
- case 0x6e:
- case 0xe4:
- case 0xe6:
- case 0xec:
- case 0xee:
- /* Single byte opcodes */
- *exitinfo |= IOIO_DATA_8;
- size = 1;
- break;
- default:
- /* Length determined by instruction parsing */
- *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
- : IOIO_DATA_32;
- size = (insn->opnd_bytes == 2) ? 2 : 4;
- }
-
- switch (insn->addr_bytes) {
- case 2:
- *exitinfo |= IOIO_ADDR_16;
- break;
- case 4:
- *exitinfo |= IOIO_ADDR_32;
- break;
- case 8:
- *exitinfo |= IOIO_ADDR_64;
- break;
- }
-
- if (insn_has_rep_prefix(insn))
- *exitinfo |= IOIO_REP;
-
- return vc_ioio_check(ctxt, (u16)port, size);
-}
-
-static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- struct pt_regs *regs = ctxt->regs;
- u64 exit_info_1, exit_info_2;
- enum es_result ret;
-
- ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
- if (ret != ES_OK)
- return ret;
-
- if (exit_info_1 & IOIO_TYPE_STR) {
-
- /* (REP) INS/OUTS */
-
- bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
- unsigned int io_bytes, exit_bytes;
- unsigned int ghcb_count, op_count;
- unsigned long es_base;
- u64 sw_scratch;
-
- /*
- * For the string variants with rep prefix the amount of in/out
- * operations per #VC exception is limited so that the kernel
- * has a chance to take interrupts and re-schedule while the
- * instruction is emulated.
- */
- io_bytes = (exit_info_1 >> 4) & 0x7;
- ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
-
- op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
- exit_info_2 = min(op_count, ghcb_count);
- exit_bytes = exit_info_2 * io_bytes;
-
- es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
-
- /* Read bytes of OUTS into the shared buffer */
- if (!(exit_info_1 & IOIO_TYPE_IN)) {
- ret = vc_insn_string_read(ctxt,
- (void *)(es_base + regs->si),
- ghcb->shared_buffer, io_bytes,
- exit_info_2, df);
- if (ret)
- return ret;
- }
-
- /*
- * Issue an VMGEXIT to the HV to consume the bytes from the
- * shared buffer or to have it write them into the shared buffer
- * depending on the instruction: OUTS or INS.
- */
- sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
- ghcb_set_sw_scratch(ghcb, sw_scratch);
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
- exit_info_1, exit_info_2);
- if (ret != ES_OK)
- return ret;
-
- /* Read bytes from shared buffer into the guest's destination. */
- if (exit_info_1 & IOIO_TYPE_IN) {
- ret = vc_insn_string_write(ctxt,
- (void *)(es_base + regs->di),
- ghcb->shared_buffer, io_bytes,
- exit_info_2, df);
- if (ret)
- return ret;
-
- if (df)
- regs->di -= exit_bytes;
- else
- regs->di += exit_bytes;
- } else {
- if (df)
- regs->si -= exit_bytes;
- else
- regs->si += exit_bytes;
- }
-
- if (exit_info_1 & IOIO_REP)
- regs->cx -= exit_info_2;
-
- ret = regs->cx ? ES_RETRY : ES_OK;
-
- } else {
-
- /* IN/OUT into/from rAX */
-
- int bits = (exit_info_1 & 0x70) >> 1;
- u64 rax = 0;
-
- if (!(exit_info_1 & IOIO_TYPE_IN))
- rax = lower_bits(regs->ax, bits);
-
- ghcb_set_rax(ghcb, rax);
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
- if (ret != ES_OK)
- return ret;
-
- if (exit_info_1 & IOIO_TYPE_IN) {
- if (!ghcb_rax_is_valid(ghcb))
- return ES_VMM_ERROR;
- regs->ax = lower_bits(ghcb->save.rax, bits);
- }
- }
-
- return ret;
-}
-
-static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- struct pt_regs *regs = ctxt->regs;
- struct cpuid_leaf leaf;
- int ret;
-
- leaf.fn = regs->ax;
- leaf.subfn = regs->cx;
- ret = snp_cpuid(ghcb, ctxt, &leaf);
- if (!ret) {
- regs->ax = leaf.eax;
- regs->bx = leaf.ebx;
- regs->cx = leaf.ecx;
- regs->dx = leaf.edx;
- }
-
- return ret;
-}
-
-static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- struct pt_regs *regs = ctxt->regs;
- u32 cr4 = native_read_cr4();
- enum es_result ret;
- int snp_cpuid_ret;
-
- snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
- if (!snp_cpuid_ret)
- return ES_OK;
- if (snp_cpuid_ret != -EOPNOTSUPP)
- return ES_VMM_ERROR;
-
- ghcb_set_rax(ghcb, regs->ax);
- ghcb_set_rcx(ghcb, regs->cx);
-
- if (cr4 & X86_CR4_OSXSAVE)
- /* Safe to read xcr0 */
- ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
- else
- /* xgetbv will cause #GP - use reset value for xcr0 */
- ghcb_set_xcr0(ghcb, 1);
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (!(ghcb_rax_is_valid(ghcb) &&
- ghcb_rbx_is_valid(ghcb) &&
- ghcb_rcx_is_valid(ghcb) &&
- ghcb_rdx_is_valid(ghcb)))
- return ES_VMM_ERROR;
-
- regs->ax = ghcb->save.rax;
- regs->bx = ghcb->save.rbx;
- regs->cx = ghcb->save.rcx;
- regs->dx = ghcb->save.rdx;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt,
- unsigned long exit_code)
-{
- bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
- enum es_result ret;
-
- /*
- * The hypervisor should not be intercepting RDTSC/RDTSCP when Secure
- * TSC is enabled. A #VC exception will be generated if the RDTSC/RDTSCP
- * instructions are being intercepted. If this should occur and Secure
- * TSC is enabled, guest execution should be terminated as the guest
- * cannot rely on the TSC value provided by the hypervisor.
- */
- if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
- return ES_VMM_ERROR;
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
- (!rdtscp || ghcb_rcx_is_valid(ghcb))))
- return ES_VMM_ERROR;
-
- ctxt->regs->ax = ghcb->save.rax;
- ctxt->regs->dx = ghcb->save.rdx;
- if (rdtscp)
- ctxt->regs->cx = ghcb->save.rcx;
-
- return ES_OK;
-}
-
struct cc_setup_data {
struct setup_data header;
u32 cc_blob_address;
@@ -1224,36 +753,14 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
if (fn->eax_in == 0x0)
- RIP_REL_REF(cpuid_std_range_max) = fn->eax;
+ cpuid_std_range_max = fn->eax;
else if (fn->eax_in == 0x40000000)
- RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
+ cpuid_hyp_range_max = fn->eax;
else if (fn->eax_in == 0x80000000)
- RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
+ cpuid_ext_range_max = fn->eax;
}
}
-static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
- int ret, u64 svsm_ret)
-{
- WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
- pfn, action, page_size, ret, svsm_ret);
-
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
-}
-
-static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
-{
- unsigned int page_size;
- bool action;
- u64 pfn;
-
- pfn = pc->entry[pc->cur_index].pfn;
- action = pc->entry[pc->cur_index].action;
- page_size = pc->entry[pc->cur_index].page_size;
-
- __pval_terminate(pfn, action, page_size, ret, svsm_ret);
-}
-
static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
{
struct svsm_pvalidate_call *pc;
@@ -1296,11 +803,7 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
{
int ret;
- /*
- * This can be called very early during boot, so use rIP-relative
- * references as needed.
- */
- if (RIP_REL_REF(snp_vmpl)) {
+ if (snp_vmpl) {
svsm_pval_4k_page(paddr, validate);
} else {
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
@@ -1309,351 +812,6 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
}
}
-static void pval_pages(struct snp_psc_desc *desc)
-{
- struct psc_entry *e;
- unsigned long vaddr;
- unsigned int size;
- unsigned int i;
- bool validate;
- u64 pfn;
- int rc;
-
- for (i = 0; i <= desc->hdr.end_entry; i++) {
- e = &desc->entries[i];
-
- pfn = e->gfn;
- vaddr = (unsigned long)pfn_to_kaddr(pfn);
- size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
- validate = e->operation == SNP_PAGE_STATE_PRIVATE;
-
- rc = pvalidate(vaddr, size, validate);
- if (!rc)
- continue;
-
- if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
- unsigned long vaddr_end = vaddr + PMD_SIZE;
-
- for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
- rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
- if (rc)
- __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
- }
- } else {
- __pval_terminate(pfn, validate, size, rc, 0);
- }
- }
-}
-
-static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
- struct svsm_pvalidate_call *pc)
-{
- struct svsm_pvalidate_entry *pe;
-
- /* Nothing in the CA yet */
- pc->num_entries = 0;
- pc->cur_index = 0;
-
- pe = &pc->entry[0];
-
- while (pfn < pfn_end) {
- pe->page_size = RMP_PG_SIZE_4K;
- pe->action = action;
- pe->ignore_cf = 0;
- pe->pfn = pfn;
-
- pe++;
- pfn++;
-
- pc->num_entries++;
- if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
- break;
- }
-
- return pfn;
-}
-
-static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
- struct svsm_pvalidate_call *pc)
-{
- struct svsm_pvalidate_entry *pe;
- struct psc_entry *e;
-
- /* Nothing in the CA yet */
- pc->num_entries = 0;
- pc->cur_index = 0;
-
- pe = &pc->entry[0];
- e = &desc->entries[desc_entry];
-
- while (desc_entry <= desc->hdr.end_entry) {
- pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
- pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
- pe->ignore_cf = 0;
- pe->pfn = e->gfn;
-
- pe++;
- e++;
-
- desc_entry++;
- pc->num_entries++;
- if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
- break;
- }
-
- return desc_entry;
-}
-
-static void svsm_pval_pages(struct snp_psc_desc *desc)
-{
- struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
- unsigned int i, pv_4k_count = 0;
- struct svsm_pvalidate_call *pc;
- struct svsm_call call = {};
- unsigned long flags;
- bool action;
- u64 pc_pa;
- int ret;
-
- /*
- * This can be called very early in the boot, use native functions in
- * order to avoid paravirt issues.
- */
- flags = native_local_irq_save();
-
- /*
- * The SVSM calling area (CA) can support processing 510 entries at a
- * time. Loop through the Page State Change descriptor until the CA is
- * full or the last entry in the descriptor is reached, at which time
- * the SVSM is invoked. This repeats until all entries in the descriptor
- * are processed.
- */
- call.caa = svsm_get_caa();
-
- pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
- pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
-
- /* Protocol 0, Call ID 1 */
- call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
- call.rcx = pc_pa;
-
- for (i = 0; i <= desc->hdr.end_entry;) {
- i = svsm_build_ca_from_psc_desc(desc, i, pc);
-
- do {
- ret = svsm_perform_call_protocol(&call);
- if (!ret)
- continue;
-
- /*
- * Check if the entry failed because of an RMP mismatch (a
- * PVALIDATE at 2M was requested, but the page is mapped in
- * the RMP as 4K).
- */
-
- if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
- pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
- /* Save this entry for post-processing at 4K */
- pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
-
- /* Skip to the next one unless at the end of the list */
- pc->cur_index++;
- if (pc->cur_index < pc->num_entries)
- ret = -EAGAIN;
- else
- ret = 0;
- }
- } while (ret == -EAGAIN);
-
- if (ret)
- svsm_pval_terminate(pc, ret, call.rax_out);
- }
-
- /* Process any entries that failed to be validated at 2M and validate them at 4K */
- for (i = 0; i < pv_4k_count; i++) {
- u64 pfn, pfn_end;
-
- action = pv_4k[i].action;
- pfn = pv_4k[i].pfn;
- pfn_end = pfn + 512;
-
- while (pfn < pfn_end) {
- pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
-
- ret = svsm_perform_call_protocol(&call);
- if (ret)
- svsm_pval_terminate(pc, ret, call.rax_out);
- }
- }
-
- native_local_irq_restore(flags);
-}
-
-static void pvalidate_pages(struct snp_psc_desc *desc)
-{
- if (snp_vmpl)
- svsm_pval_pages(desc);
- else
- pval_pages(desc);
-}
-
-static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
-{
- int cur_entry, end_entry, ret = 0;
- struct snp_psc_desc *data;
- struct es_em_ctxt ctxt;
-
- vc_ghcb_invalidate(ghcb);
-
- /* Copy the input desc into GHCB shared buffer */
- data = (struct snp_psc_desc *)ghcb->shared_buffer;
- memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
-
- /*
- * As per the GHCB specification, the hypervisor can resume the guest
- * before processing all the entries. Check whether all the entries
- * are processed. If not, then keep retrying. Note, the hypervisor
- * will update the data memory directly to indicate the status, so
- * reference the data->hdr everywhere.
- *
- * The strategy here is to wait for the hypervisor to change the page
- * state in the RMP table before guest accesses the memory pages. If the
- * page state change was not successful, then later memory access will
- * result in a crash.
- */
- cur_entry = data->hdr.cur_entry;
- end_entry = data->hdr.end_entry;
-
- while (data->hdr.cur_entry <= data->hdr.end_entry) {
- ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
-
- /* This will advance the shared buffer data points to. */
- ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
-
- /*
- * Page State Change VMGEXIT can pass error code through
- * exit_info_2.
- */
- if (WARN(ret || ghcb->save.sw_exit_info_2,
- "SNP: PSC failed ret=%d exit_info_2=%llx\n",
- ret, ghcb->save.sw_exit_info_2)) {
- ret = 1;
- goto out;
- }
-
- /* Verify that reserved bit is not set */
- if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
- ret = 1;
- goto out;
- }
-
- /*
- * Sanity check that entry processing is not going backwards.
- * This will happen only if hypervisor is tricking us.
- */
- if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
-"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
- end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
- ret = 1;
- goto out;
- }
- }
-
-out:
- return ret;
-}
-
-static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
- unsigned long exit_code)
-{
- unsigned int opcode = (unsigned int)ctxt->insn.opcode.value;
- u8 modrm = ctxt->insn.modrm.value;
-
- switch (exit_code) {
-
- case SVM_EXIT_IOIO:
- case SVM_EXIT_NPF:
- /* handled separately */
- return ES_OK;
-
- case SVM_EXIT_CPUID:
- if (opcode == 0xa20f)
- return ES_OK;
- break;
-
- case SVM_EXIT_INVD:
- if (opcode == 0x080f)
- return ES_OK;
- break;
-
- case SVM_EXIT_MONITOR:
- /* MONITOR and MONITORX instructions generate the same error code */
- if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa))
- return ES_OK;
- break;
-
- case SVM_EXIT_MWAIT:
- /* MWAIT and MWAITX instructions generate the same error code */
- if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb))
- return ES_OK;
- break;
-
- case SVM_EXIT_MSR:
- /* RDMSR */
- if (opcode == 0x320f ||
- /* WRMSR */
- opcode == 0x300f)
- return ES_OK;
- break;
-
- case SVM_EXIT_RDPMC:
- if (opcode == 0x330f)
- return ES_OK;
- break;
-
- case SVM_EXIT_RDTSC:
- if (opcode == 0x310f)
- return ES_OK;
- break;
-
- case SVM_EXIT_RDTSCP:
- if (opcode == 0x010f && modrm == 0xf9)
- return ES_OK;
- break;
-
- case SVM_EXIT_READ_DR7:
- if (opcode == 0x210f &&
- X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
- return ES_OK;
- break;
-
- case SVM_EXIT_VMMCALL:
- if (opcode == 0x010f && modrm == 0xd9)
- return ES_OK;
-
- break;
-
- case SVM_EXIT_WRITE_DR7:
- if (opcode == 0x230f &&
- X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
- return ES_OK;
- break;
-
- case SVM_EXIT_WBINVD:
- if (opcode == 0x90f)
- return ES_OK;
- break;
-
- default:
- break;
- }
-
- sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n",
- opcode, exit_code, ctxt->regs->ip);
-
- return ES_UNSUPPORTED;
-}
-
/*
* Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM
* services needed when not running in VMPL0.
@@ -1681,7 +839,7 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
* routine is running identity mapped when called, both by the decompressor
* code and the early kernel code.
*/
- if (!rmpadjust((unsigned long)&RIP_REL_REF(boot_ghcb_page), RMP_PG_SIZE_4K, 1))
+ if (!rmpadjust((unsigned long)rip_rel_ptr(&boot_ghcb_page), RMP_PG_SIZE_4K, 1))
return false;
/*
@@ -1698,7 +856,7 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
if (!secrets_page->svsm_guest_vmpl)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_VMPL0);
- RIP_REL_REF(snp_vmpl) = secrets_page->svsm_guest_vmpl;
+ snp_vmpl = secrets_page->svsm_guest_vmpl;
caa = secrets_page->svsm_caa;
@@ -1713,8 +871,8 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
* The CA is identity mapped when this routine is called, both by the
* decompressor code and the early kernel code.
*/
- RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)caa;
- RIP_REL_REF(boot_svsm_caa_pa) = caa;
+ boot_svsm_caa = (struct svsm_ca *)caa;
+ boot_svsm_caa_pa = caa;
/* Advertise the SVSM presence via CPUID. */
cpuid_table = (struct snp_cpuid_table *)snp_cpuid_get_table();
diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c
new file mode 100644
index 000000000000..435853a55768
--- /dev/null
+++ b/arch/x86/boot/startup/sev-startup.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt) "SEV: " fmt
+
+#include <linux/percpu-defs.h>
+#include <linux/cc_platform.h>
+#include <linux/printk.h>
+#include <linux/mm_types.h>
+#include <linux/set_memory.h>
+#include <linux/memblock.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/cpumask.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/psp-sev.h>
+#include <uapi/linux/sev-guest.h>
+
+#include <asm/init.h>
+#include <asm/cpu_entry_area.h>
+#include <asm/stacktrace.h>
+#include <asm/sev.h>
+#include <asm/sev-internal.h>
+#include <asm/insn-eval.h>
+#include <asm/fpu/xcr.h>
+#include <asm/processor.h>
+#include <asm/realmode.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/svm.h>
+#include <asm/smp.h>
+#include <asm/cpu.h>
+#include <asm/apic.h>
+#include <asm/cpuid.h>
+#include <asm/cmdline.h>
+
+/* For early boot hypervisor communication in SEV-ES enabled guests */
+struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
+
+/*
+ * Needs to be in the .data section because we need it NULL before bss is
+ * cleared
+ */
+struct ghcb *boot_ghcb __section(".data");
+
+/* Bitmap of SEV features supported by the hypervisor */
+u64 sev_hv_features __ro_after_init;
+
+/* Secrets page physical address from the CC blob */
+u64 sev_secrets_pa __ro_after_init;
+
+/* For early boot SVSM communication */
+struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
+
+DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
+DEFINE_PER_CPU(u64, svsm_caa_pa);
+
+/*
+ * Nothing shall interrupt this code path while holding the per-CPU
+ * GHCB. The backup GHCB is only for NMIs interrupting this path.
+ *
+ * Callers must disable local interrupts around it.
+ */
+noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ WARN_ON(!irqs_disabled());
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (unlikely(data->ghcb_active)) {
+ /* GHCB is already in use - save its contents */
+
+ if (unlikely(data->backup_ghcb_active)) {
+ /*
+ * Backup-GHCB is also already in use. There is no way
+ * to continue here so just kill the machine. To make
+ * panic() work, mark GHCBs inactive so that messages
+ * can be printed out.
+ */
+ data->ghcb_active = false;
+ data->backup_ghcb_active = false;
+
+ instrumentation_begin();
+ panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+ instrumentation_end();
+ }
+
+ /* Mark backup_ghcb active before writing to it */
+ data->backup_ghcb_active = true;
+
+ state->ghcb = &data->backup_ghcb;
+
+ /* Backup GHCB content */
+ *state->ghcb = *ghcb;
+ } else {
+ state->ghcb = NULL;
+ data->ghcb_active = true;
+ }
+
+ return ghcb;
+}
+
+/* Include code shared with pre-decompression boot stage */
+#include "sev-shared.c"
+
+noinstr void __sev_put_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ WARN_ON(!irqs_disabled());
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (state->ghcb) {
+ /* Restore GHCB from Backup */
+ *ghcb = *state->ghcb;
+ data->backup_ghcb_active = false;
+ state->ghcb = NULL;
+ } else {
+ /*
+ * Invalidate the GHCB so a VMGEXIT instruction issued
+ * from userspace won't appear to be valid.
+ */
+ vc_ghcb_invalidate(ghcb);
+ data->ghcb_active = false;
+ }
+}
+
+int svsm_perform_call_protocol(struct svsm_call *call)
+{
+ struct ghcb_state state;
+ unsigned long flags;
+ struct ghcb *ghcb;
+ int ret;
+
+ /*
+ * This can be called very early in the boot, use native functions in
+ * order to avoid paravirt issues.
+ */
+ flags = native_local_irq_save();
+
+ if (sev_cfg.ghcbs_initialized)
+ ghcb = __sev_get_ghcb(&state);
+ else if (boot_ghcb)
+ ghcb = boot_ghcb;
+ else
+ ghcb = NULL;
+
+ do {
+ ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
+ : svsm_perform_msr_protocol(call);
+ } while (ret == -EAGAIN);
+
+ if (sev_cfg.ghcbs_initialized)
+ __sev_put_ghcb(&state);
+
+ native_local_irq_restore(flags);
+
+ return ret;
+}
+
+void __head
+early_set_pages_state(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages, enum psc_op op)
+{
+ unsigned long paddr_end;
+ u64 val;
+
+ vaddr = vaddr & PAGE_MASK;
+
+ paddr = paddr & PAGE_MASK;
+ paddr_end = paddr + (npages << PAGE_SHIFT);
+
+ while (paddr < paddr_end) {
+ /* Page validation must be rescinded before changing to shared */
+ if (op == SNP_PAGE_STATE_SHARED)
+ pvalidate_4k_page(vaddr, paddr, false);
+
+ /*
+ * Use the MSR protocol because this function can be called before
+ * the GHCB is established.
+ */
+ sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
+ VMGEXIT();
+
+ val = sev_es_rd_ghcb_msr();
+
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
+ goto e_term;
+
+ if (GHCB_MSR_PSC_RESP_VAL(val))
+ goto e_term;
+
+ /* Page validation must be performed after changing to private */
+ if (op == SNP_PAGE_STATE_PRIVATE)
+ pvalidate_4k_page(vaddr, paddr, true);
+
+ vaddr += PAGE_SIZE;
+ paddr += PAGE_SIZE;
+ }
+
+ return;
+
+e_term:
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+}
+
+void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages)
+{
+ /*
+ * This can be invoked in early boot while running identity mapped, so
+ * use an open coded check for SNP instead of using cc_platform_has().
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ return;
+
+ /*
+ * Ask the hypervisor to mark the memory pages as private in the RMP
+ * table.
+ */
+ early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
+}
+
+void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages)
+{
+ /*
+ * This can be invoked in early boot while running identity mapped, so
+ * use an open coded check for SNP instead of using cc_platform_has().
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ return;
+
+ /* Ask hypervisor to mark the memory pages shared in the RMP table. */
+ early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
+}
+
+/*
+ * Initial set up of SNP relies on information provided by the
+ * Confidential Computing blob, which can be passed to the kernel
+ * in the following ways, depending on how it is booted:
+ *
+ * - when booted via the boot/decompress kernel:
+ * - via boot_params
+ *
+ * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
+ * - via a setup_data entry, as defined by the Linux Boot Protocol
+ *
+ * Scan for the blob in that order.
+ */
+static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+{
+ struct cc_blob_sev_info *cc_info;
+
+ /* Boot kernel would have passed the CC blob via boot_params. */
+ if (bp->cc_blob_address) {
+ cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
+ goto found_cc_info;
+ }
+
+ /*
+ * If kernel was booted directly, without the use of the
+ * boot/decompression kernel, the CC blob may have been passed via
+ * setup_data instead.
+ */
+ cc_info = find_cc_blob_setup_data(bp);
+ if (!cc_info)
+ return NULL;
+
+found_cc_info:
+ if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
+ snp_abort();
+
+ return cc_info;
+}
+
+static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
+{
+ struct svsm_call call = {};
+ int ret;
+ u64 pa;
+
+ /*
+ * Record the SVSM Calling Area address (CAA) if the guest is not
+ * running at VMPL0. The CA will be used to communicate with the
+ * SVSM to perform the SVSM services.
+ */
+ if (!svsm_setup_ca(cc_info))
+ return;
+
+ /*
+ * It is very early in the boot and the kernel is running identity
+ * mapped but without having adjusted the pagetables to where the
+ * kernel was loaded (physbase), so the get the CA address using
+ * RIP-relative addressing.
+ */
+ pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
+
+ /*
+ * Switch over to the boot SVSM CA while the current CA is still
+ * addressable. There is no GHCB at this point so use the MSR protocol.
+ *
+ * SVSM_CORE_REMAP_CA call:
+ * RAX = 0 (Protocol=0, CallID=0)
+ * RCX = New CA GPA
+ */
+ call.caa = svsm_get_caa();
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
+ call.rcx = pa;
+ ret = svsm_perform_call_protocol(&call);
+ if (ret)
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
+
+ boot_svsm_caa = (struct svsm_ca *)pa;
+ boot_svsm_caa_pa = pa;
+}
+
+bool __head snp_init(struct boot_params *bp)
+{
+ struct cc_blob_sev_info *cc_info;
+
+ if (!bp)
+ return false;
+
+ cc_info = find_cc_blob(bp);
+ if (!cc_info)
+ return false;
+
+ if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
+ sev_secrets_pa = cc_info->secrets_phys;
+ else
+ return false;
+
+ setup_cpuid_table(cc_info);
+
+ svsm_setup(cc_info);
+
+ /*
+ * The CC blob will be used later to access the secrets page. Cache
+ * it here like the boot kernel does.
+ */
+ bp->cc_blob_address = (u32)(unsigned long)cc_info;
+
+ return true;
+}
+
+void __head __noreturn snp_abort(void)
+{
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/boot/startup/sme.c
index 5eecdd92da10..753cd2094080 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/boot/startup/sme.c
@@ -45,8 +45,6 @@
#include <asm/coco.h>
#include <asm/sev.h>
-#include "mm_internal.h"
-
#define PGD_FLAGS _KERNPG_TABLE_NOENC
#define P4D_FLAGS _KERNPG_TABLE_NOENC
#define PUD_FLAGS _KERNPG_TABLE_NOENC
@@ -299,8 +297,7 @@ void __head sme_encrypt_kernel(struct boot_params *bp)
* instrumentation or checking boot_cpu_data in the cc_platform_has()
* function.
*/
- if (!sme_get_me_mask() ||
- RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
+ if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
return;
/*
@@ -318,8 +315,8 @@ void __head sme_encrypt_kernel(struct boot_params *bp)
* memory from being cached.
*/
- kernel_start = (unsigned long)RIP_REL_REF(_text);
- kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
+ kernel_start = (unsigned long)rip_rel_ptr(_text);
+ kernel_end = ALIGN((unsigned long)rip_rel_ptr(_end), PMD_SIZE);
kernel_len = kernel_end - kernel_start;
initrd_start = 0;
@@ -345,7 +342,7 @@ void __head sme_encrypt_kernel(struct boot_params *bp)
* pagetable structures for the encryption of the kernel
* pagetable structures for workarea (in case not currently mapped)
*/
- execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
+ execute_start = workarea_start = (unsigned long)rip_rel_ptr(sme_workarea);
execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
execute_len = execute_end - execute_start;
@@ -526,7 +523,7 @@ void __head sme_enable(struct boot_params *bp)
me_mask = 1UL << (ebx & 0x3f);
/* Check the SEV MSR whether SEV or SME is enabled */
- RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
+ sev_status = msr = __rdmsr(MSR_AMD64_SEV);
feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
/*
@@ -562,8 +559,17 @@ void __head sme_enable(struct boot_params *bp)
return;
}
- RIP_REL_REF(sme_me_mask) = me_mask;
- RIP_REL_REF(physical_mask) &= ~me_mask;
- RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD;
+ sme_me_mask = me_mask;
+ physical_mask &= ~me_mask;
+ cc_vendor = CC_VENDOR_AMD;
cc_set_mask(me_mask);
}
+
+#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
+/* Local version for startup code, which never operates on user page tables */
+__weak
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+ return pgd;
+}
+#endif
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 84f7a883ce1e..f35369bb14c5 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -32,7 +32,7 @@
int memcmp(const void *s1, const void *s2, size_t len)
{
bool diff;
- asm("repe; cmpsb" CC_SET(nz)
+ asm("repe cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index f2e96905b3fe..0641c8c46aee 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -292,7 +292,7 @@ static void restore_screen(void)
"shrw %%cx ; "
"jnc 1f ; "
"stosw \n\t"
- "1: rep;stosl ; "
+ "1: rep stosl ; "
"popw %%es"
: "+D" (dst), "+c" (npad)
: "bdS" (video_segment),
diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index 9a0ddda3aa69..d4610af68114 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -18,7 +18,9 @@
#include <asm/processor.h>
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
+SYM_PIC_ALIAS(cc_vendor);
u64 cc_mask __ro_after_init;
+SYM_PIC_ALIAS(cc_mask);
static struct cc_attr_flags {
__u64 host_sev_snp : 1,
diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile
index dcb06dc8b5ae..db3255b979bd 100644
--- a/arch/x86/coco/sev/Makefile
+++ b/arch/x86/coco/sev/Makefile
@@ -1,22 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += core.o
-
-# jump tables are emitted using absolute references in non-PIC code
-# so they cannot be used in the early SEV startup code
-CFLAGS_core.o += -fno-jump-tables
-
-ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_core.o = -pg
-endif
-
-KASAN_SANITIZE_core.o := n
-KMSAN_SANITIZE_core.o := n
-KCOV_INSTRUMENT_core.o := n
-
-# With some compiler versions the generated code results in boot hangs, caused
-# by several compilation units. To be safe, disable all instrumentation.
-KCSAN_SANITIZE := n
+obj-y += core.o sev-nmi.o vc-handle.o
# Clang 14 and older may fail to respect __no_sanitize_undefined when inlining
-UBSAN_SANITIZE := n
+UBSAN_SANITIZE_sev-nmi.o := n
+
+# GCC may fail to respect __no_sanitize_address when inlining
+KASAN_SANITIZE_sev-nmi.o := n
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index b0c1a7a57497..ac400525de73 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -31,6 +31,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
#include <asm/sev.h>
+#include <asm/sev-internal.h>
#include <asm/insn-eval.h>
#include <asm/fpu/xcr.h>
#include <asm/processor.h>
@@ -44,8 +45,6 @@
#include <asm/cpuid.h>
#include <asm/cmdline.h>
-#define DR7_RESET_VALUE 0x400
-
/* AP INIT values as documented in the APM2 section "Processor Initialization State" */
#define AP_INIT_CS_LIMIT 0xffff
#define AP_INIT_DS_LIMIT 0xffff
@@ -81,21 +80,6 @@ static const char * const sev_status_feat_names[] = {
[MSR_AMD64_SNP_SMT_PROT_BIT] = "SMTProt",
};
-/* For early boot hypervisor communication in SEV-ES enabled guests */
-static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
-
-/*
- * Needs to be in the .data section because we need it NULL before bss is
- * cleared
- */
-static struct ghcb *boot_ghcb __section(".data");
-
-/* Bitmap of SEV features supported by the hypervisor */
-static u64 sev_hv_features __ro_after_init;
-
-/* Secrets page physical address from the CC blob */
-static u64 secrets_pa __ro_after_init;
-
/*
* For Secure TSC guests, the BSP fetches TSC_INFO using SNP guest messaging and
* initializes snp_tsc_scale and snp_tsc_offset. These values are replicated
@@ -105,558 +89,196 @@ static u64 snp_tsc_scale __ro_after_init;
static u64 snp_tsc_offset __ro_after_init;
static u64 snp_tsc_freq_khz __ro_after_init;
-/* #VC handler runtime per-CPU data */
-struct sev_es_runtime_data {
- struct ghcb ghcb_page;
-
- /*
- * Reserve one page per CPU as backup storage for the unencrypted GHCB.
- * It is needed when an NMI happens while the #VC handler uses the real
- * GHCB, and the NMI handler itself is causing another #VC exception. In
- * that case the GHCB content of the first handler needs to be backed up
- * and restored.
- */
- struct ghcb backup_ghcb;
-
- /*
- * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
- * There is no need for it to be atomic, because nothing is written to
- * the GHCB between the read and the write of ghcb_active. So it is safe
- * to use it when a nested #VC exception happens before the write.
- *
- * This is necessary for example in the #VC->NMI->#VC case when the NMI
- * happens while the first #VC handler uses the GHCB. When the NMI code
- * raises a second #VC handler it might overwrite the contents of the
- * GHCB written by the first handler. To avoid this the content of the
- * GHCB is saved and restored when the GHCB is detected to be in use
- * already.
- */
- bool ghcb_active;
- bool backup_ghcb_active;
-
- /*
- * Cached DR7 value - write it on DR7 writes and return it on reads.
- * That value will never make it to the real hardware DR7 as debugging
- * is currently unsupported in SEV-ES guests.
- */
- unsigned long dr7;
-};
-
-struct ghcb_state {
- struct ghcb *ghcb;
-};
-
-/* For early boot SVSM communication */
-static struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
-
-static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
-static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
-static DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
-static DEFINE_PER_CPU(u64, svsm_caa_pa);
-
-static __always_inline bool on_vc_stack(struct pt_regs *regs)
-{
- unsigned long sp = regs->sp;
-
- /* User-mode RSP is not trusted */
- if (user_mode(regs))
- return false;
-
- /* SYSCALL gap still has user-mode RSP */
- if (ip_within_syscall_gap(regs))
- return false;
-
- return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
-}
+DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
+DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
/*
- * This function handles the case when an NMI is raised in the #VC
- * exception handler entry code, before the #VC handler has switched off
- * its IST stack. In this case, the IST entry for #VC must be adjusted,
- * so that any nested #VC exception will not overwrite the stack
- * contents of the interrupted #VC handler.
- *
- * The IST entry is adjusted unconditionally so that it can be also be
- * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
- * nested sev_es_ist_exit() call may adjust back the IST entry too
- * early.
- *
- * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
- * on the NMI IST stack, as they are only called from NMI handling code
- * right now.
+ * SVSM related information:
+ * When running under an SVSM, the VMPL that Linux is executing at must be
+ * non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
*/
-void noinstr __sev_es_ist_enter(struct pt_regs *regs)
-{
- unsigned long old_ist, new_ist;
-
- /* Read old IST entry */
- new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
-
- /*
- * If NMI happened while on the #VC IST stack, set the new IST
- * value below regs->sp, so that the interrupted stack frame is
- * not overwritten by subsequent #VC exceptions.
- */
- if (on_vc_stack(regs))
- new_ist = regs->sp;
-
- /*
- * Reserve additional 8 bytes and store old IST value so this
- * adjustment can be unrolled in __sev_es_ist_exit().
- */
- new_ist -= sizeof(old_ist);
- *(unsigned long *)new_ist = old_ist;
-
- /* Set new IST entry */
- this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
-}
+u8 snp_vmpl __ro_after_init;
+EXPORT_SYMBOL_GPL(snp_vmpl);
-void noinstr __sev_es_ist_exit(void)
+static u64 __init get_snp_jump_table_addr(void)
{
- unsigned long ist;
+ struct snp_secrets_page *secrets;
+ void __iomem *mem;
+ u64 addr;
- /* Read IST entry */
- ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+ mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
+ if (!mem) {
+ pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
+ return 0;
+ }
- if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
- return;
+ secrets = (__force struct snp_secrets_page *)mem;
- /* Read back old IST entry and write it to the TSS */
- this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
+ addr = secrets->os_area.ap_jump_table_pa;
+ iounmap(mem);
+
+ return addr;
}
-/*
- * Nothing shall interrupt this code path while holding the per-CPU
- * GHCB. The backup GHCB is only for NMIs interrupting this path.
- *
- * Callers must disable local interrupts around it.
- */
-static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
+static u64 __init get_jump_table_addr(void)
{
- struct sev_es_runtime_data *data;
+ struct ghcb_state state;
+ unsigned long flags;
struct ghcb *ghcb;
+ u64 ret = 0;
- WARN_ON(!irqs_disabled());
-
- data = this_cpu_read(runtime_data);
- ghcb = &data->ghcb_page;
-
- if (unlikely(data->ghcb_active)) {
- /* GHCB is already in use - save its contents */
-
- if (unlikely(data->backup_ghcb_active)) {
- /*
- * Backup-GHCB is also already in use. There is no way
- * to continue here so just kill the machine. To make
- * panic() work, mark GHCBs inactive so that messages
- * can be printed out.
- */
- data->ghcb_active = false;
- data->backup_ghcb_active = false;
-
- instrumentation_begin();
- panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
- instrumentation_end();
- }
-
- /* Mark backup_ghcb active before writing to it */
- data->backup_ghcb_active = true;
+ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return get_snp_jump_table_addr();
- state->ghcb = &data->backup_ghcb;
+ local_irq_save(flags);
- /* Backup GHCB content */
- *state->ghcb = *ghcb;
- } else {
- state->ghcb = NULL;
- data->ghcb_active = true;
- }
+ ghcb = __sev_get_ghcb(&state);
- return ghcb;
-}
+ vc_ghcb_invalidate(ghcb);
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
+ ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
-static inline u64 sev_es_rd_ghcb_msr(void)
-{
- return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
-}
+ sev_es_wr_ghcb_msr(__pa(ghcb));
+ VMGEXIT();
-static __always_inline void sev_es_wr_ghcb_msr(u64 val)
-{
- u32 low, high;
+ if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
+ ghcb_sw_exit_info_2_is_valid(ghcb))
+ ret = ghcb->save.sw_exit_info_2;
- low = (u32)(val);
- high = (u32)(val >> 32);
+ __sev_put_ghcb(&state);
- native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
-}
+ local_irq_restore(flags);
-static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
- unsigned char *buffer)
-{
- return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+ return ret;
}
-static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
+static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
+ int ret, u64 svsm_ret)
{
- char buffer[MAX_INSN_SIZE];
- int insn_bytes;
-
- insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
- if (insn_bytes == 0) {
- /* Nothing could be copied */
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
- ctxt->fi.cr2 = ctxt->regs->ip;
- return ES_EXCEPTION;
- } else if (insn_bytes == -EINVAL) {
- /* Effective RIP could not be calculated */
- ctxt->fi.vector = X86_TRAP_GP;
- ctxt->fi.error_code = 0;
- ctxt->fi.cr2 = 0;
- return ES_EXCEPTION;
- }
-
- if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
- return ES_DECODE_FAILED;
+ WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
+ pfn, action, page_size, ret, svsm_ret);
- if (ctxt->insn.immediate.got)
- return ES_OK;
- else
- return ES_DECODE_FAILED;
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
-static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
+static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
{
- char buffer[MAX_INSN_SIZE];
- int res, ret;
+ unsigned int page_size;
+ bool action;
+ u64 pfn;
- res = vc_fetch_insn_kernel(ctxt, buffer);
- if (res) {
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = X86_PF_INSTR;
- ctxt->fi.cr2 = ctxt->regs->ip;
- return ES_EXCEPTION;
- }
+ pfn = pc->entry[pc->cur_index].pfn;
+ action = pc->entry[pc->cur_index].action;
+ page_size = pc->entry[pc->cur_index].page_size;
- ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
- if (ret < 0)
- return ES_DECODE_FAILED;
- else
- return ES_OK;
+ __pval_terminate(pfn, action, page_size, ret, svsm_ret);
}
-static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+static void pval_pages(struct snp_psc_desc *desc)
{
- if (user_mode(ctxt->regs))
- return __vc_decode_user_insn(ctxt);
- else
- return __vc_decode_kern_insn(ctxt);
-}
+ struct psc_entry *e;
+ unsigned long vaddr;
+ unsigned int size;
+ unsigned int i;
+ bool validate;
+ u64 pfn;
+ int rc;
-static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
- char *dst, char *buf, size_t size)
-{
- unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
+ for (i = 0; i <= desc->hdr.end_entry; i++) {
+ e = &desc->entries[i];
- /*
- * This function uses __put_user() independent of whether kernel or user
- * memory is accessed. This works fine because __put_user() does no
- * sanity checks of the pointer being accessed. All that it does is
- * to report when the access failed.
- *
- * Also, this function runs in atomic context, so __put_user() is not
- * allowed to sleep. The page-fault handler detects that it is running
- * in atomic context and will not try to take mmap_sem and handle the
- * fault, so additional pagefault_enable()/disable() calls are not
- * needed.
- *
- * The access can't be done via copy_to_user() here because
- * vc_write_mem() must not use string instructions to access unsafe
- * memory. The reason is that MOVS is emulated by the #VC handler by
- * splitting the move up into a read and a write and taking a nested #VC
- * exception on whatever of them is the MMIO access. Using string
- * instructions here would cause infinite nesting.
- */
- switch (size) {
- case 1: {
- u8 d1;
- u8 __user *target = (u8 __user *)dst;
-
- memcpy(&d1, buf, 1);
- if (__put_user(d1, target))
- goto fault;
- break;
- }
- case 2: {
- u16 d2;
- u16 __user *target = (u16 __user *)dst;
+ pfn = e->gfn;
+ vaddr = (unsigned long)pfn_to_kaddr(pfn);
+ size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+ validate = e->operation == SNP_PAGE_STATE_PRIVATE;
- memcpy(&d2, buf, 2);
- if (__put_user(d2, target))
- goto fault;
- break;
- }
- case 4: {
- u32 d4;
- u32 __user *target = (u32 __user *)dst;
+ rc = pvalidate(vaddr, size, validate);
+ if (!rc)
+ continue;
- memcpy(&d4, buf, 4);
- if (__put_user(d4, target))
- goto fault;
- break;
- }
- case 8: {
- u64 d8;
- u64 __user *target = (u64 __user *)dst;
+ if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
+ unsigned long vaddr_end = vaddr + PMD_SIZE;
- memcpy(&d8, buf, 8);
- if (__put_user(d8, target))
- goto fault;
- break;
- }
- default:
- WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
- return ES_UNSUPPORTED;
+ for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
+ rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
+ if (rc)
+ __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
+ }
+ } else {
+ __pval_terminate(pfn, validate, size, rc, 0);
+ }
}
-
- return ES_OK;
-
-fault:
- if (user_mode(ctxt->regs))
- error_code |= X86_PF_USER;
-
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = error_code;
- ctxt->fi.cr2 = (unsigned long)dst;
-
- return ES_EXCEPTION;
}
-static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
- char *src, char *buf, size_t size)
+static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
+ struct svsm_pvalidate_call *pc)
{
- unsigned long error_code = X86_PF_PROT;
-
- /*
- * This function uses __get_user() independent of whether kernel or user
- * memory is accessed. This works fine because __get_user() does no
- * sanity checks of the pointer being accessed. All that it does is
- * to report when the access failed.
- *
- * Also, this function runs in atomic context, so __get_user() is not
- * allowed to sleep. The page-fault handler detects that it is running
- * in atomic context and will not try to take mmap_sem and handle the
- * fault, so additional pagefault_enable()/disable() calls are not
- * needed.
- *
- * The access can't be done via copy_from_user() here because
- * vc_read_mem() must not use string instructions to access unsafe
- * memory. The reason is that MOVS is emulated by the #VC handler by
- * splitting the move up into a read and a write and taking a nested #VC
- * exception on whatever of them is the MMIO access. Using string
- * instructions here would cause infinite nesting.
- */
- switch (size) {
- case 1: {
- u8 d1;
- u8 __user *s = (u8 __user *)src;
-
- if (__get_user(d1, s))
- goto fault;
- memcpy(buf, &d1, 1);
- break;
- }
- case 2: {
- u16 d2;
- u16 __user *s = (u16 __user *)src;
-
- if (__get_user(d2, s))
- goto fault;
- memcpy(buf, &d2, 2);
- break;
- }
- case 4: {
- u32 d4;
- u32 __user *s = (u32 __user *)src;
-
- if (__get_user(d4, s))
- goto fault;
- memcpy(buf, &d4, 4);
- break;
- }
- case 8: {
- u64 d8;
- u64 __user *s = (u64 __user *)src;
- if (__get_user(d8, s))
- goto fault;
- memcpy(buf, &d8, 8);
- break;
- }
- default:
- WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
- return ES_UNSUPPORTED;
- }
-
- return ES_OK;
-
-fault:
- if (user_mode(ctxt->regs))
- error_code |= X86_PF_USER;
+ struct svsm_pvalidate_entry *pe;
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = error_code;
- ctxt->fi.cr2 = (unsigned long)src;
+ /* Nothing in the CA yet */
+ pc->num_entries = 0;
+ pc->cur_index = 0;
- return ES_EXCEPTION;
-}
+ pe = &pc->entry[0];
-static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
- unsigned long vaddr, phys_addr_t *paddr)
-{
- unsigned long va = (unsigned long)vaddr;
- unsigned int level;
- phys_addr_t pa;
- pgd_t *pgd;
- pte_t *pte;
+ while (pfn < pfn_end) {
+ pe->page_size = RMP_PG_SIZE_4K;
+ pe->action = action;
+ pe->ignore_cf = 0;
+ pe->pfn = pfn;
- pgd = __va(read_cr3_pa());
- pgd = &pgd[pgd_index(va)];
- pte = lookup_address_in_pgd(pgd, va, &level);
- if (!pte) {
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.cr2 = vaddr;
- ctxt->fi.error_code = 0;
+ pe++;
+ pfn++;
- if (user_mode(ctxt->regs))
- ctxt->fi.error_code |= X86_PF_USER;
-
- return ES_EXCEPTION;
+ pc->num_entries++;
+ if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
+ break;
}
- if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
- /* Emulated MMIO to/from encrypted memory not supported */
- return ES_UNSUPPORTED;
-
- pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
- pa |= va & ~page_level_mask(level);
-
- *paddr = pa;
-
- return ES_OK;
+ return pfn;
}
-static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
+ struct svsm_pvalidate_call *pc)
{
- BUG_ON(size > 4);
-
- if (user_mode(ctxt->regs)) {
- struct thread_struct *t = &current->thread;
- struct io_bitmap *iobm = t->io_bitmap;
- size_t idx;
-
- if (!iobm)
- goto fault;
-
- for (idx = port; idx < port + size; ++idx) {
- if (test_bit(idx, iobm->bitmap))
- goto fault;
- }
- }
-
- return ES_OK;
+ struct svsm_pvalidate_entry *pe;
+ struct psc_entry *e;
-fault:
- ctxt->fi.vector = X86_TRAP_GP;
- ctxt->fi.error_code = 0;
+ /* Nothing in the CA yet */
+ pc->num_entries = 0;
+ pc->cur_index = 0;
- return ES_EXCEPTION;
-}
+ pe = &pc->entry[0];
+ e = &desc->entries[desc_entry];
-static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
-{
- long error_code = ctxt->fi.error_code;
- int trapnr = ctxt->fi.vector;
+ while (desc_entry <= desc->hdr.end_entry) {
+ pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+ pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
+ pe->ignore_cf = 0;
+ pe->pfn = e->gfn;
- ctxt->regs->orig_ax = ctxt->fi.error_code;
+ pe++;
+ e++;
- switch (trapnr) {
- case X86_TRAP_GP:
- exc_general_protection(ctxt->regs, error_code);
- break;
- case X86_TRAP_UD:
- exc_invalid_op(ctxt->regs);
- break;
- case X86_TRAP_PF:
- write_cr2(ctxt->fi.cr2);
- exc_page_fault(ctxt->regs, error_code);
- break;
- case X86_TRAP_AC:
- exc_alignment_check(ctxt->regs, error_code);
- break;
- default:
- pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
- BUG();
+ desc_entry++;
+ pc->num_entries++;
+ if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
+ break;
}
-}
-
-/* Include code shared with pre-decompression boot stage */
-#include "shared.c"
-
-static inline struct svsm_ca *svsm_get_caa(void)
-{
- /*
- * Use rIP-relative references when called early in the boot. If
- * ->use_cas is set, then it is late in the boot and no need
- * to worry about rIP-relative references.
- */
- if (RIP_REL_REF(sev_cfg).use_cas)
- return this_cpu_read(svsm_caa);
- else
- return RIP_REL_REF(boot_svsm_caa);
-}
-
-static u64 svsm_get_caa_pa(void)
-{
- /*
- * Use rIP-relative references when called early in the boot. If
- * ->use_cas is set, then it is late in the boot and no need
- * to worry about rIP-relative references.
- */
- if (RIP_REL_REF(sev_cfg).use_cas)
- return this_cpu_read(svsm_caa_pa);
- else
- return RIP_REL_REF(boot_svsm_caa_pa);
-}
-
-static noinstr void __sev_put_ghcb(struct ghcb_state *state)
-{
- struct sev_es_runtime_data *data;
- struct ghcb *ghcb;
-
- WARN_ON(!irqs_disabled());
-
- data = this_cpu_read(runtime_data);
- ghcb = &data->ghcb_page;
- if (state->ghcb) {
- /* Restore GHCB from Backup */
- *ghcb = *state->ghcb;
- data->backup_ghcb_active = false;
- state->ghcb = NULL;
- } else {
- /*
- * Invalidate the GHCB so a VMGEXIT instruction issued
- * from userspace won't appear to be valid.
- */
- vc_ghcb_invalidate(ghcb);
- data->ghcb_active = false;
- }
+ return desc_entry;
}
-static int svsm_perform_call_protocol(struct svsm_call *call)
+static void svsm_pval_pages(struct snp_psc_desc *desc)
{
- struct ghcb_state state;
+ struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
+ unsigned int i, pv_4k_count = 0;
+ struct svsm_pvalidate_call *pc;
+ struct svsm_call call = {};
unsigned long flags;
- struct ghcb *ghcb;
+ bool action;
+ u64 pc_pa;
int ret;
/*
@@ -666,180 +288,145 @@ static int svsm_perform_call_protocol(struct svsm_call *call)
flags = native_local_irq_save();
/*
- * Use rip-relative references when called early in the boot. If
- * ghcbs_initialized is set, then it is late in the boot and no need
- * to worry about rip-relative references in called functions.
+ * The SVSM calling area (CA) can support processing 510 entries at a
+ * time. Loop through the Page State Change descriptor until the CA is
+ * full or the last entry in the descriptor is reached, at which time
+ * the SVSM is invoked. This repeats until all entries in the descriptor
+ * are processed.
*/
- if (RIP_REL_REF(sev_cfg).ghcbs_initialized)
- ghcb = __sev_get_ghcb(&state);
- else if (RIP_REL_REF(boot_ghcb))
- ghcb = RIP_REL_REF(boot_ghcb);
- else
- ghcb = NULL;
+ call.caa = svsm_get_caa();
- do {
- ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
- : svsm_perform_msr_protocol(call);
- } while (ret == -EAGAIN);
+ pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
+ pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
- if (RIP_REL_REF(sev_cfg).ghcbs_initialized)
- __sev_put_ghcb(&state);
+ /* Protocol 0, Call ID 1 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
+ call.rcx = pc_pa;
- native_local_irq_restore(flags);
+ for (i = 0; i <= desc->hdr.end_entry;) {
+ i = svsm_build_ca_from_psc_desc(desc, i, pc);
- return ret;
-}
+ do {
+ ret = svsm_perform_call_protocol(&call);
+ if (!ret)
+ continue;
-void noinstr __sev_es_nmi_complete(void)
-{
- struct ghcb_state state;
- struct ghcb *ghcb;
+ /*
+ * Check if the entry failed because of an RMP mismatch (a
+ * PVALIDATE at 2M was requested, but the page is mapped in
+ * the RMP as 4K).
+ */
- ghcb = __sev_get_ghcb(&state);
+ if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
+ pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
+ /* Save this entry for post-processing at 4K */
+ pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
+
+ /* Skip to the next one unless at the end of the list */
+ pc->cur_index++;
+ if (pc->cur_index < pc->num_entries)
+ ret = -EAGAIN;
+ else
+ ret = 0;
+ }
+ } while (ret == -EAGAIN);
- vc_ghcb_invalidate(ghcb);
- ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
- ghcb_set_sw_exit_info_1(ghcb, 0);
- ghcb_set_sw_exit_info_2(ghcb, 0);
+ if (ret)
+ svsm_pval_terminate(pc, ret, call.rax_out);
+ }
- sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
- VMGEXIT();
+ /* Process any entries that failed to be validated at 2M and validate them at 4K */
+ for (i = 0; i < pv_4k_count; i++) {
+ u64 pfn, pfn_end;
- __sev_put_ghcb(&state);
-}
+ action = pv_4k[i].action;
+ pfn = pv_4k[i].pfn;
+ pfn_end = pfn + 512;
-static u64 __init get_snp_jump_table_addr(void)
-{
- struct snp_secrets_page *secrets;
- void __iomem *mem;
- u64 addr;
+ while (pfn < pfn_end) {
+ pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
- mem = ioremap_encrypted(secrets_pa, PAGE_SIZE);
- if (!mem) {
- pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
- return 0;
+ ret = svsm_perform_call_protocol(&call);
+ if (ret)
+ svsm_pval_terminate(pc, ret, call.rax_out);
+ }
}
- secrets = (__force struct snp_secrets_page *)mem;
-
- addr = secrets->os_area.ap_jump_table_pa;
- iounmap(mem);
-
- return addr;
+ native_local_irq_restore(flags);
}
-static u64 __init get_jump_table_addr(void)
+static void pvalidate_pages(struct snp_psc_desc *desc)
{
- struct ghcb_state state;
- unsigned long flags;
- struct ghcb *ghcb;
- u64 ret = 0;
-
- if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
- return get_snp_jump_table_addr();
-
- local_irq_save(flags);
+ if (snp_vmpl)
+ svsm_pval_pages(desc);
+ else
+ pval_pages(desc);
+}
- ghcb = __sev_get_ghcb(&state);
+static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
+{
+ int cur_entry, end_entry, ret = 0;
+ struct snp_psc_desc *data;
+ struct es_em_ctxt ctxt;
vc_ghcb_invalidate(ghcb);
- ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
- ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
- ghcb_set_sw_exit_info_2(ghcb, 0);
- sev_es_wr_ghcb_msr(__pa(ghcb));
- VMGEXIT();
-
- if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
- ghcb_sw_exit_info_2_is_valid(ghcb))
- ret = ghcb->save.sw_exit_info_2;
-
- __sev_put_ghcb(&state);
-
- local_irq_restore(flags);
+ /* Copy the input desc into GHCB shared buffer */
+ data = (struct snp_psc_desc *)ghcb->shared_buffer;
+ memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
- return ret;
-}
-
-static void __head
-early_set_pages_state(unsigned long vaddr, unsigned long paddr,
- unsigned long npages, enum psc_op op)
-{
- unsigned long paddr_end;
- u64 val;
-
- vaddr = vaddr & PAGE_MASK;
+ /*
+ * As per the GHCB specification, the hypervisor can resume the guest
+ * before processing all the entries. Check whether all the entries
+ * are processed. If not, then keep retrying. Note, the hypervisor
+ * will update the data memory directly to indicate the status, so
+ * reference the data->hdr everywhere.
+ *
+ * The strategy here is to wait for the hypervisor to change the page
+ * state in the RMP table before guest accesses the memory pages. If the
+ * page state change was not successful, then later memory access will
+ * result in a crash.
+ */
+ cur_entry = data->hdr.cur_entry;
+ end_entry = data->hdr.end_entry;
- paddr = paddr & PAGE_MASK;
- paddr_end = paddr + (npages << PAGE_SHIFT);
+ while (data->hdr.cur_entry <= data->hdr.end_entry) {
+ ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
- while (paddr < paddr_end) {
- /* Page validation must be rescinded before changing to shared */
- if (op == SNP_PAGE_STATE_SHARED)
- pvalidate_4k_page(vaddr, paddr, false);
+ /* This will advance the shared buffer data points to. */
+ ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
/*
- * Use the MSR protocol because this function can be called before
- * the GHCB is established.
+ * Page State Change VMGEXIT can pass error code through
+ * exit_info_2.
*/
- sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
- VMGEXIT();
-
- val = sev_es_rd_ghcb_msr();
-
- if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
- goto e_term;
-
- if (GHCB_MSR_PSC_RESP_VAL(val))
- goto e_term;
+ if (WARN(ret || ghcb->save.sw_exit_info_2,
+ "SNP: PSC failed ret=%d exit_info_2=%llx\n",
+ ret, ghcb->save.sw_exit_info_2)) {
+ ret = 1;
+ goto out;
+ }
- /* Page validation must be performed after changing to private */
- if (op == SNP_PAGE_STATE_PRIVATE)
- pvalidate_4k_page(vaddr, paddr, true);
+ /* Verify that reserved bit is not set */
+ if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
+ ret = 1;
+ goto out;
+ }
- vaddr += PAGE_SIZE;
- paddr += PAGE_SIZE;
+ /*
+ * Sanity check that entry processing is not going backwards.
+ * This will happen only if hypervisor is tricking us.
+ */
+ if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
+"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
+ end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
+ ret = 1;
+ goto out;
+ }
}
- return;
-
-e_term:
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
-}
-
-void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
- unsigned long npages)
-{
- /*
- * This can be invoked in early boot while running identity mapped, so
- * use an open coded check for SNP instead of using cc_platform_has().
- * This eliminates worries about jump tables or checking boot_cpu_data
- * in the cc_platform_has() function.
- */
- if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
- return;
-
- /*
- * Ask the hypervisor to mark the memory pages as private in the RMP
- * table.
- */
- early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
-}
-
-void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
- unsigned long npages)
-{
- /*
- * This can be invoked in early boot while running identity mapped, so
- * use an open coded check for SNP instead of using cc_platform_has().
- * This eliminates worries about jump tables or checking boot_cpu_data
- * in the cc_platform_has() function.
- */
- if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
- return;
-
- /* Ask hypervisor to mark the memory pages shared in the RMP table. */
- early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
+out:
+ return ret;
}
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
@@ -1417,90 +1004,6 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
return 0;
}
-/* Writes to the SVSM CAA MSR are ignored */
-static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
-{
- if (write)
- return ES_OK;
-
- regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
- regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
-
- return ES_OK;
-}
-
-/*
- * TSC related accesses should not exit to the hypervisor when a guest is
- * executing with Secure TSC enabled, so special handling is required for
- * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
- */
-static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)
-{
- u64 tsc;
-
- /*
- * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.
- * Terminate the SNP guest when the interception is enabled.
- */
- if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
- return ES_VMM_ERROR;
-
- /*
- * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC
- * to return undefined values, so ignore all writes.
- *
- * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use
- * the value returned by rdtsc_ordered().
- */
- if (write) {
- WARN_ONCE(1, "TSC MSR writes are verboten!\n");
- return ES_OK;
- }
-
- tsc = rdtsc_ordered();
- regs->ax = lower_32_bits(tsc);
- regs->dx = upper_32_bits(tsc);
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- struct pt_regs *regs = ctxt->regs;
- enum es_result ret;
- bool write;
-
- /* Is it a WRMSR? */
- write = ctxt->insn.opcode.bytes[1] == 0x30;
-
- switch (regs->cx) {
- case MSR_SVSM_CAA:
- return __vc_handle_msr_caa(regs, write);
- case MSR_IA32_TSC:
- case MSR_AMD64_GUEST_TSC_FREQ:
- if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
- return __vc_handle_secure_tsc_msrs(regs, write);
- break;
- default:
- break;
- }
-
- ghcb_set_rcx(ghcb, regs->cx);
- if (write) {
- ghcb_set_rax(ghcb, regs->ax);
- ghcb_set_rdx(ghcb, regs->dx);
- }
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0);
-
- if ((ret == ES_OK) && !write) {
- regs->ax = ghcb->save.rax;
- regs->dx = ghcb->save.rdx;
- }
-
- return ret;
-}
-
static void snp_register_per_cpu_ghcb(void)
{
struct sev_es_runtime_data *data;
@@ -1713,748 +1216,6 @@ void __init sev_es_init_vc_handling(void)
initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
}
-static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
-{
- int trapnr = ctxt->fi.vector;
-
- if (trapnr == X86_TRAP_PF)
- native_write_cr2(ctxt->fi.cr2);
-
- ctxt->regs->orig_ax = ctxt->fi.error_code;
- do_early_exception(ctxt->regs, trapnr);
-}
-
-static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
-{
- long *reg_array;
- int offset;
-
- reg_array = (long *)ctxt->regs;
- offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
-
- if (offset < 0)
- return NULL;
-
- offset /= sizeof(long);
-
- return reg_array + offset;
-}
-static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
- unsigned int bytes, bool read)
-{
- u64 exit_code, exit_info_1, exit_info_2;
- unsigned long ghcb_pa = __pa(ghcb);
- enum es_result res;
- phys_addr_t paddr;
- void __user *ref;
-
- ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
- if (ref == (void __user *)-1L)
- return ES_UNSUPPORTED;
-
- exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
-
- res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
- if (res != ES_OK) {
- if (res == ES_EXCEPTION && !read)
- ctxt->fi.error_code |= X86_PF_WRITE;
-
- return res;
- }
-
- exit_info_1 = paddr;
- /* Can never be greater than 8 */
- exit_info_2 = bytes;
-
- ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
-
- return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
-}
-
-/*
- * The MOVS instruction has two memory operands, which raises the
- * problem that it is not known whether the access to the source or the
- * destination caused the #VC exception (and hence whether an MMIO read
- * or write operation needs to be emulated).
- *
- * Instead of playing games with walking page-tables and trying to guess
- * whether the source or destination is an MMIO range, split the move
- * into two operations, a read and a write with only one memory operand.
- * This will cause a nested #VC exception on the MMIO address which can
- * then be handled.
- *
- * This implementation has the benefit that it also supports MOVS where
- * source _and_ destination are MMIO regions.
- *
- * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
- * rare operation. If it turns out to be a performance problem the split
- * operations can be moved to memcpy_fromio() and memcpy_toio().
- */
-static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
- unsigned int bytes)
-{
- unsigned long ds_base, es_base;
- unsigned char *src, *dst;
- unsigned char buffer[8];
- enum es_result ret;
- bool rep;
- int off;
-
- ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
- es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
-
- if (ds_base == -1L || es_base == -1L) {
- ctxt->fi.vector = X86_TRAP_GP;
- ctxt->fi.error_code = 0;
- return ES_EXCEPTION;
- }
-
- src = ds_base + (unsigned char *)ctxt->regs->si;
- dst = es_base + (unsigned char *)ctxt->regs->di;
-
- ret = vc_read_mem(ctxt, src, buffer, bytes);
- if (ret != ES_OK)
- return ret;
-
- ret = vc_write_mem(ctxt, dst, buffer, bytes);
- if (ret != ES_OK)
- return ret;
-
- if (ctxt->regs->flags & X86_EFLAGS_DF)
- off = -bytes;
- else
- off = bytes;
-
- ctxt->regs->si += off;
- ctxt->regs->di += off;
-
- rep = insn_has_rep_prefix(&ctxt->insn);
- if (rep)
- ctxt->regs->cx -= 1;
-
- if (!rep || ctxt->regs->cx == 0)
- return ES_OK;
- else
- return ES_RETRY;
-}
-
-static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- struct insn *insn = &ctxt->insn;
- enum insn_mmio_type mmio;
- unsigned int bytes = 0;
- enum es_result ret;
- u8 sign_byte;
- long *reg_data;
-
- mmio = insn_decode_mmio(insn, &bytes);
- if (mmio == INSN_MMIO_DECODE_FAILED)
- return ES_DECODE_FAILED;
-
- if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
- reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
- if (!reg_data)
- return ES_DECODE_FAILED;
- }
-
- if (user_mode(ctxt->regs))
- return ES_UNSUPPORTED;
-
- switch (mmio) {
- case INSN_MMIO_WRITE:
- memcpy(ghcb->shared_buffer, reg_data, bytes);
- ret = vc_do_mmio(ghcb, ctxt, bytes, false);
- break;
- case INSN_MMIO_WRITE_IMM:
- memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
- ret = vc_do_mmio(ghcb, ctxt, bytes, false);
- break;
- case INSN_MMIO_READ:
- ret = vc_do_mmio(ghcb, ctxt, bytes, true);
- if (ret)
- break;
-
- /* Zero-extend for 32-bit operation */
- if (bytes == 4)
- *reg_data = 0;
-
- memcpy(reg_data, ghcb->shared_buffer, bytes);
- break;
- case INSN_MMIO_READ_ZERO_EXTEND:
- ret = vc_do_mmio(ghcb, ctxt, bytes, true);
- if (ret)
- break;
-
- /* Zero extend based on operand size */
- memset(reg_data, 0, insn->opnd_bytes);
- memcpy(reg_data, ghcb->shared_buffer, bytes);
- break;
- case INSN_MMIO_READ_SIGN_EXTEND:
- ret = vc_do_mmio(ghcb, ctxt, bytes, true);
- if (ret)
- break;
-
- if (bytes == 1) {
- u8 *val = (u8 *)ghcb->shared_buffer;
-
- sign_byte = (*val & 0x80) ? 0xff : 0x00;
- } else {
- u16 *val = (u16 *)ghcb->shared_buffer;
-
- sign_byte = (*val & 0x8000) ? 0xff : 0x00;
- }
-
- /* Sign extend based on operand size */
- memset(reg_data, sign_byte, insn->opnd_bytes);
- memcpy(reg_data, ghcb->shared_buffer, bytes);
- break;
- case INSN_MMIO_MOVS:
- ret = vc_handle_mmio_movs(ctxt, bytes);
- break;
- default:
- ret = ES_UNSUPPORTED;
- break;
- }
-
- return ret;
-}
-
-static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
- long val, *reg = vc_insn_get_rm(ctxt);
- enum es_result ret;
-
- if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
- return ES_VMM_ERROR;
-
- if (!reg)
- return ES_DECODE_FAILED;
-
- val = *reg;
-
- /* Upper 32 bits must be written as zeroes */
- if (val >> 32) {
- ctxt->fi.vector = X86_TRAP_GP;
- ctxt->fi.error_code = 0;
- return ES_EXCEPTION;
- }
-
- /* Clear out other reserved bits and set bit 10 */
- val = (val & 0xffff23ffL) | BIT(10);
-
- /* Early non-zero writes to DR7 are not supported */
- if (!data && (val & ~DR7_RESET_VALUE))
- return ES_UNSUPPORTED;
-
- /* Using a value of 0 for ExitInfo1 means RAX holds the value */
- ghcb_set_rax(ghcb, val);
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (data)
- data->dr7 = val;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
- long *reg = vc_insn_get_rm(ctxt);
-
- if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
- return ES_VMM_ERROR;
-
- if (!reg)
- return ES_DECODE_FAILED;
-
- if (data)
- *reg = data->dr7;
- else
- *reg = DR7_RESET_VALUE;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
-}
-
-static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
-{
- enum es_result ret;
-
- ghcb_set_rcx(ghcb, ctxt->regs->cx);
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
- return ES_VMM_ERROR;
-
- ctxt->regs->ax = ghcb->save.rax;
- ctxt->regs->dx = ghcb->save.rdx;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_monitor(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- /*
- * Treat it as a NOP and do not leak a physical address to the
- * hypervisor.
- */
- return ES_OK;
-}
-
-static enum es_result vc_handle_mwait(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- /* Treat the same as MONITOR/MONITORX */
- return ES_OK;
-}
-
-static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- enum es_result ret;
-
- ghcb_set_rax(ghcb, ctxt->regs->ax);
- ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
-
- if (x86_platform.hyper.sev_es_hcall_prepare)
- x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
-
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
- if (ret != ES_OK)
- return ret;
-
- if (!ghcb_rax_is_valid(ghcb))
- return ES_VMM_ERROR;
-
- ctxt->regs->ax = ghcb->save.rax;
-
- /*
- * Call sev_es_hcall_finish() after regs->ax is already set.
- * This allows the hypervisor handler to overwrite it again if
- * necessary.
- */
- if (x86_platform.hyper.sev_es_hcall_finish &&
- !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
- return ES_VMM_ERROR;
-
- return ES_OK;
-}
-
-static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
-{
- /*
- * Calling ecx_alignment_check() directly does not work, because it
- * enables IRQs and the GHCB is active. Forward the exception and call
- * it later from vc_forward_exception().
- */
- ctxt->fi.vector = X86_TRAP_AC;
- ctxt->fi.error_code = 0;
- return ES_EXCEPTION;
-}
-
-static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
- struct ghcb *ghcb,
- unsigned long exit_code)
-{
- enum es_result result = vc_check_opcode_bytes(ctxt, exit_code);
-
- if (result != ES_OK)
- return result;
-
- switch (exit_code) {
- case SVM_EXIT_READ_DR7:
- result = vc_handle_dr7_read(ghcb, ctxt);
- break;
- case SVM_EXIT_WRITE_DR7:
- result = vc_handle_dr7_write(ghcb, ctxt);
- break;
- case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
- result = vc_handle_trap_ac(ghcb, ctxt);
- break;
- case SVM_EXIT_RDTSC:
- case SVM_EXIT_RDTSCP:
- result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
- break;
- case SVM_EXIT_RDPMC:
- result = vc_handle_rdpmc(ghcb, ctxt);
- break;
- case SVM_EXIT_INVD:
- pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
- result = ES_UNSUPPORTED;
- break;
- case SVM_EXIT_CPUID:
- result = vc_handle_cpuid(ghcb, ctxt);
- break;
- case SVM_EXIT_IOIO:
- result = vc_handle_ioio(ghcb, ctxt);
- break;
- case SVM_EXIT_MSR:
- result = vc_handle_msr(ghcb, ctxt);
- break;
- case SVM_EXIT_VMMCALL:
- result = vc_handle_vmmcall(ghcb, ctxt);
- break;
- case SVM_EXIT_WBINVD:
- result = vc_handle_wbinvd(ghcb, ctxt);
- break;
- case SVM_EXIT_MONITOR:
- result = vc_handle_monitor(ghcb, ctxt);
- break;
- case SVM_EXIT_MWAIT:
- result = vc_handle_mwait(ghcb, ctxt);
- break;
- case SVM_EXIT_NPF:
- result = vc_handle_mmio(ghcb, ctxt);
- break;
- default:
- /*
- * Unexpected #VC exception
- */
- result = ES_UNSUPPORTED;
- }
-
- return result;
-}
-
-static __always_inline bool is_vc2_stack(unsigned long sp)
-{
- return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
-}
-
-static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
-{
- unsigned long sp, prev_sp;
-
- sp = (unsigned long)regs;
- prev_sp = regs->sp;
-
- /*
- * If the code was already executing on the VC2 stack when the #VC
- * happened, let it proceed to the normal handling routine. This way the
- * code executing on the VC2 stack can cause #VC exceptions to get handled.
- */
- return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
-}
-
-static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
-{
- struct ghcb_state state;
- struct es_em_ctxt ctxt;
- enum es_result result;
- struct ghcb *ghcb;
- bool ret = true;
-
- ghcb = __sev_get_ghcb(&state);
-
- vc_ghcb_invalidate(ghcb);
- result = vc_init_em_ctxt(&ctxt, regs, error_code);
-
- if (result == ES_OK)
- result = vc_handle_exitcode(&ctxt, ghcb, error_code);
-
- __sev_put_ghcb(&state);
-
- /* Done - now check the result */
- switch (result) {
- case ES_OK:
- vc_finish_insn(&ctxt);
- break;
- case ES_UNSUPPORTED:
- pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
- error_code, regs->ip);
- ret = false;
- break;
- case ES_VMM_ERROR:
- pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
- error_code, regs->ip);
- ret = false;
- break;
- case ES_DECODE_FAILED:
- pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
- error_code, regs->ip);
- ret = false;
- break;
- case ES_EXCEPTION:
- vc_forward_exception(&ctxt);
- break;
- case ES_RETRY:
- /* Nothing to do */
- break;
- default:
- pr_emerg("Unknown result in %s():%d\n", __func__, result);
- /*
- * Emulating the instruction which caused the #VC exception
- * failed - can't continue so print debug information
- */
- BUG();
- }
-
- return ret;
-}
-
-static __always_inline bool vc_is_db(unsigned long error_code)
-{
- return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
-}
-
-/*
- * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
- * and will panic when an error happens.
- */
-DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
-{
- irqentry_state_t irq_state;
-
- /*
- * With the current implementation it is always possible to switch to a
- * safe stack because #VC exceptions only happen at known places, like
- * intercepted instructions or accesses to MMIO areas/IO ports. They can
- * also happen with code instrumentation when the hypervisor intercepts
- * #DB, but the critical paths are forbidden to be instrumented, so #DB
- * exceptions currently also only happen in safe places.
- *
- * But keep this here in case the noinstr annotations are violated due
- * to bug elsewhere.
- */
- if (unlikely(vc_from_invalid_context(regs))) {
- instrumentation_begin();
- panic("Can't handle #VC exception from unsupported context\n");
- instrumentation_end();
- }
-
- /*
- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
- */
- if (vc_is_db(error_code)) {
- exc_debug(regs);
- return;
- }
-
- irq_state = irqentry_nmi_enter(regs);
-
- instrumentation_begin();
-
- if (!vc_raw_handle_exception(regs, error_code)) {
- /* Show some debug info */
- show_regs(regs);
-
- /* Ask hypervisor to sev_es_terminate */
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-
- /* If that fails and we get here - just panic */
- panic("Returned from Terminate-Request to Hypervisor\n");
- }
-
- instrumentation_end();
- irqentry_nmi_exit(regs, irq_state);
-}
-
-/*
- * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
- * and will kill the current task with SIGBUS when an error happens.
- */
-DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
-{
- /*
- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
- */
- if (vc_is_db(error_code)) {
- noist_exc_debug(regs);
- return;
- }
-
- irqentry_enter_from_user_mode(regs);
- instrumentation_begin();
-
- if (!vc_raw_handle_exception(regs, error_code)) {
- /*
- * Do not kill the machine if user-space triggered the
- * exception. Send SIGBUS instead and let user-space deal with
- * it.
- */
- force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
- }
-
- instrumentation_end();
- irqentry_exit_to_user_mode(regs);
-}
-
-bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
-{
- unsigned long exit_code = regs->orig_ax;
- struct es_em_ctxt ctxt;
- enum es_result result;
-
- vc_ghcb_invalidate(boot_ghcb);
-
- result = vc_init_em_ctxt(&ctxt, regs, exit_code);
- if (result == ES_OK)
- result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
-
- /* Done - now check the result */
- switch (result) {
- case ES_OK:
- vc_finish_insn(&ctxt);
- break;
- case ES_UNSUPPORTED:
- early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
- exit_code, regs->ip);
- goto fail;
- case ES_VMM_ERROR:
- early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
- exit_code, regs->ip);
- goto fail;
- case ES_DECODE_FAILED:
- early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
- exit_code, regs->ip);
- goto fail;
- case ES_EXCEPTION:
- vc_early_forward_exception(&ctxt);
- break;
- case ES_RETRY:
- /* Nothing to do */
- break;
- default:
- BUG();
- }
-
- return true;
-
-fail:
- show_regs(regs);
-
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-}
-
-/*
- * Initial set up of SNP relies on information provided by the
- * Confidential Computing blob, which can be passed to the kernel
- * in the following ways, depending on how it is booted:
- *
- * - when booted via the boot/decompress kernel:
- * - via boot_params
- *
- * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
- * - via a setup_data entry, as defined by the Linux Boot Protocol
- *
- * Scan for the blob in that order.
- */
-static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
-{
- struct cc_blob_sev_info *cc_info;
-
- /* Boot kernel would have passed the CC blob via boot_params. */
- if (bp->cc_blob_address) {
- cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
- goto found_cc_info;
- }
-
- /*
- * If kernel was booted directly, without the use of the
- * boot/decompression kernel, the CC blob may have been passed via
- * setup_data instead.
- */
- cc_info = find_cc_blob_setup_data(bp);
- if (!cc_info)
- return NULL;
-
-found_cc_info:
- if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
- snp_abort();
-
- return cc_info;
-}
-
-static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
-{
- struct svsm_call call = {};
- int ret;
- u64 pa;
-
- /*
- * Record the SVSM Calling Area address (CAA) if the guest is not
- * running at VMPL0. The CA will be used to communicate with the
- * SVSM to perform the SVSM services.
- */
- if (!svsm_setup_ca(cc_info))
- return;
-
- /*
- * It is very early in the boot and the kernel is running identity
- * mapped but without having adjusted the pagetables to where the
- * kernel was loaded (physbase), so the get the CA address using
- * RIP-relative addressing.
- */
- pa = (u64)&RIP_REL_REF(boot_svsm_ca_page);
-
- /*
- * Switch over to the boot SVSM CA while the current CA is still
- * addressable. There is no GHCB at this point so use the MSR protocol.
- *
- * SVSM_CORE_REMAP_CA call:
- * RAX = 0 (Protocol=0, CallID=0)
- * RCX = New CA GPA
- */
- call.caa = svsm_get_caa();
- call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
- call.rcx = pa;
- ret = svsm_perform_call_protocol(&call);
- if (ret)
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
-
- RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)pa;
- RIP_REL_REF(boot_svsm_caa_pa) = pa;
-}
-
-bool __head snp_init(struct boot_params *bp)
-{
- struct cc_blob_sev_info *cc_info;
-
- if (!bp)
- return false;
-
- cc_info = find_cc_blob(bp);
- if (!cc_info)
- return false;
-
- if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
- secrets_pa = cc_info->secrets_phys;
- else
- return false;
-
- setup_cpuid_table(cc_info);
-
- svsm_setup(cc_info);
-
- /*
- * The CC blob will be used later to access the secrets page. Cache
- * it here like the boot kernel does.
- */
- bp->cc_blob_address = (u32)(unsigned long)cc_info;
-
- return true;
-}
-
-void __head __noreturn snp_abort(void)
-{
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
-}
-
/*
* SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
* enabled, as the alternative (fallback) logic for DMI probing in the legacy
@@ -2835,7 +1596,7 @@ struct snp_msg_desc *snp_msg_alloc(void)
if (!mdesc)
return ERR_PTR(-ENOMEM);
- mem = ioremap_encrypted(secrets_pa, PAGE_SIZE);
+ mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE);
if (!mem)
goto e_free_mdesc;
diff --git a/arch/x86/coco/sev/sev-nmi.c b/arch/x86/coco/sev/sev-nmi.c
new file mode 100644
index 000000000000..d8dfaddfb367
--- /dev/null
+++ b/arch/x86/coco/sev/sev-nmi.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt) "SEV: " fmt
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <asm/cpu_entry_area.h>
+#include <asm/msr.h>
+#include <asm/ptrace.h>
+#include <asm/sev.h>
+#include <asm/sev-internal.h>
+
+static __always_inline bool on_vc_stack(struct pt_regs *regs)
+{
+ unsigned long sp = regs->sp;
+
+ /* User-mode RSP is not trusted */
+ if (user_mode(regs))
+ return false;
+
+ /* SYSCALL gap still has user-mode RSP */
+ if (ip_within_syscall_gap(regs))
+ return false;
+
+ return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
+}
+
+/*
+ * This function handles the case when an NMI is raised in the #VC
+ * exception handler entry code, before the #VC handler has switched off
+ * its IST stack. In this case, the IST entry for #VC must be adjusted,
+ * so that any nested #VC exception will not overwrite the stack
+ * contents of the interrupted #VC handler.
+ *
+ * The IST entry is adjusted unconditionally so that it can be also be
+ * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
+ * nested sev_es_ist_exit() call may adjust back the IST entry too
+ * early.
+ *
+ * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
+ * on the NMI IST stack, as they are only called from NMI handling code
+ * right now.
+ */
+void noinstr __sev_es_ist_enter(struct pt_regs *regs)
+{
+ unsigned long old_ist, new_ist;
+
+ /* Read old IST entry */
+ new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ /*
+ * If NMI happened while on the #VC IST stack, set the new IST
+ * value below regs->sp, so that the interrupted stack frame is
+ * not overwritten by subsequent #VC exceptions.
+ */
+ if (on_vc_stack(regs))
+ new_ist = regs->sp;
+
+ /*
+ * Reserve additional 8 bytes and store old IST value so this
+ * adjustment can be unrolled in __sev_es_ist_exit().
+ */
+ new_ist -= sizeof(old_ist);
+ *(unsigned long *)new_ist = old_ist;
+
+ /* Set new IST entry */
+ this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
+}
+
+void noinstr __sev_es_ist_exit(void)
+{
+ unsigned long ist;
+
+ /* Read IST entry */
+ ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
+ return;
+
+ /* Read back old IST entry and write it to the TSS */
+ this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
+}
+
+void noinstr __sev_es_nmi_complete(void)
+{
+ struct ghcb_state state;
+ struct ghcb *ghcb;
+
+ ghcb = __sev_get_ghcb(&state);
+
+ vc_ghcb_invalidate(ghcb);
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
+ sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
+ VMGEXIT();
+
+ __sev_put_ghcb(&state);
+}
diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c
new file mode 100644
index 000000000000..b4895c648024
--- /dev/null
+++ b/arch/x86/coco/sev/vc-handle.c
@@ -0,0 +1,1061 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#define pr_fmt(fmt) "SEV: " fmt
+
+#include <linux/sched/debug.h> /* For show_regs() */
+#include <linux/cc_platform.h>
+#include <linux/printk.h>
+#include <linux/mm_types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/psp-sev.h>
+#include <uapi/linux/sev-guest.h>
+
+#include <asm/init.h>
+#include <asm/stacktrace.h>
+#include <asm/sev.h>
+#include <asm/sev-internal.h>
+#include <asm/insn-eval.h>
+#include <asm/fpu/xcr.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/svm.h>
+#include <asm/smp.h>
+#include <asm/cpu.h>
+#include <asm/apic.h>
+#include <asm/cpuid.h>
+
+static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ unsigned long vaddr, phys_addr_t *paddr)
+{
+ unsigned long va = (unsigned long)vaddr;
+ unsigned int level;
+ phys_addr_t pa;
+ pgd_t *pgd;
+ pte_t *pte;
+
+ pgd = __va(read_cr3_pa());
+ pgd = &pgd[pgd_index(va)];
+ pte = lookup_address_in_pgd(pgd, va, &level);
+ if (!pte) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.cr2 = vaddr;
+ ctxt->fi.error_code = 0;
+
+ if (user_mode(ctxt->regs))
+ ctxt->fi.error_code |= X86_PF_USER;
+
+ return ES_EXCEPTION;
+ }
+
+ if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
+ /* Emulated MMIO to/from encrypted memory not supported */
+ return ES_UNSUPPORTED;
+
+ pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+ pa |= va & ~page_level_mask(level);
+
+ *paddr = pa;
+
+ return ES_OK;
+}
+
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ BUG_ON(size > 4);
+
+ if (user_mode(ctxt->regs)) {
+ struct thread_struct *t = &current->thread;
+ struct io_bitmap *iobm = t->io_bitmap;
+ size_t idx;
+
+ if (!iobm)
+ goto fault;
+
+ for (idx = port; idx < port + size; ++idx) {
+ if (test_bit(idx, iobm->bitmap))
+ goto fault;
+ }
+ }
+
+ return ES_OK;
+
+fault:
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+
+ return ES_EXCEPTION;
+}
+
+void vc_forward_exception(struct es_em_ctxt *ctxt)
+{
+ long error_code = ctxt->fi.error_code;
+ int trapnr = ctxt->fi.vector;
+
+ ctxt->regs->orig_ax = ctxt->fi.error_code;
+
+ switch (trapnr) {
+ case X86_TRAP_GP:
+ exc_general_protection(ctxt->regs, error_code);
+ break;
+ case X86_TRAP_UD:
+ exc_invalid_op(ctxt->regs);
+ break;
+ case X86_TRAP_PF:
+ write_cr2(ctxt->fi.cr2);
+ exc_page_fault(ctxt->regs, error_code);
+ break;
+ case X86_TRAP_AC:
+ exc_alignment_check(ctxt->regs, error_code);
+ break;
+ default:
+ pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
+ BUG();
+ }
+}
+
+static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
+ unsigned char *buffer)
+{
+ return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+}
+
+static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ int insn_bytes;
+
+ insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
+ if (insn_bytes == 0) {
+ /* Nothing could be copied */
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
+ } else if (insn_bytes == -EINVAL) {
+ /* Effective RIP could not be calculated */
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ ctxt->fi.cr2 = 0;
+ return ES_EXCEPTION;
+ }
+
+ if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
+ return ES_DECODE_FAILED;
+
+ if (ctxt->insn.immediate.got)
+ return ES_OK;
+ else
+ return ES_DECODE_FAILED;
+}
+
+static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ int res, ret;
+
+ res = vc_fetch_insn_kernel(ctxt, buffer);
+ if (res) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_INSTR;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
+ }
+
+ ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
+ if (ret < 0)
+ return ES_DECODE_FAILED;
+ else
+ return ES_OK;
+}
+
+static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+{
+ if (user_mode(ctxt->regs))
+ return __vc_decode_user_insn(ctxt);
+ else
+ return __vc_decode_kern_insn(ctxt);
+}
+
+static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ char *dst, char *buf, size_t size)
+{
+ unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
+
+ /*
+ * This function uses __put_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __put_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __put_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_to_user() here because
+ * vc_write_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
+ switch (size) {
+ case 1: {
+ u8 d1;
+ u8 __user *target = (u8 __user *)dst;
+
+ memcpy(&d1, buf, 1);
+ if (__put_user(d1, target))
+ goto fault;
+ break;
+ }
+ case 2: {
+ u16 d2;
+ u16 __user *target = (u16 __user *)dst;
+
+ memcpy(&d2, buf, 2);
+ if (__put_user(d2, target))
+ goto fault;
+ break;
+ }
+ case 4: {
+ u32 d4;
+ u32 __user *target = (u32 __user *)dst;
+
+ memcpy(&d4, buf, 4);
+ if (__put_user(d4, target))
+ goto fault;
+ break;
+ }
+ case 8: {
+ u64 d8;
+ u64 __user *target = (u64 __user *)dst;
+
+ memcpy(&d8, buf, 8);
+ if (__put_user(d8, target))
+ goto fault;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return ES_UNSUPPORTED;
+ }
+
+ return ES_OK;
+
+fault:
+ if (user_mode(ctxt->regs))
+ error_code |= X86_PF_USER;
+
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = error_code;
+ ctxt->fi.cr2 = (unsigned long)dst;
+
+ return ES_EXCEPTION;
+}
+
+static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ char *src, char *buf, size_t size)
+{
+ unsigned long error_code = X86_PF_PROT;
+
+ /*
+ * This function uses __get_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __get_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __get_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_from_user() here because
+ * vc_read_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
+ switch (size) {
+ case 1: {
+ u8 d1;
+ u8 __user *s = (u8 __user *)src;
+
+ if (__get_user(d1, s))
+ goto fault;
+ memcpy(buf, &d1, 1);
+ break;
+ }
+ case 2: {
+ u16 d2;
+ u16 __user *s = (u16 __user *)src;
+
+ if (__get_user(d2, s))
+ goto fault;
+ memcpy(buf, &d2, 2);
+ break;
+ }
+ case 4: {
+ u32 d4;
+ u32 __user *s = (u32 __user *)src;
+
+ if (__get_user(d4, s))
+ goto fault;
+ memcpy(buf, &d4, 4);
+ break;
+ }
+ case 8: {
+ u64 d8;
+ u64 __user *s = (u64 __user *)src;
+ if (__get_user(d8, s))
+ goto fault;
+ memcpy(buf, &d8, 8);
+ break;
+ }
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return ES_UNSUPPORTED;
+ }
+
+ return ES_OK;
+
+fault:
+ if (user_mode(ctxt->regs))
+ error_code |= X86_PF_USER;
+
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = error_code;
+ ctxt->fi.cr2 = (unsigned long)src;
+
+ return ES_EXCEPTION;
+}
+
+#define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__)
+
+#include "vc-shared.c"
+
+/* Writes to the SVSM CAA MSR are ignored */
+static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
+{
+ if (write)
+ return ES_OK;
+
+ regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
+ regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
+
+ return ES_OK;
+}
+
+/*
+ * TSC related accesses should not exit to the hypervisor when a guest is
+ * executing with Secure TSC enabled, so special handling is required for
+ * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
+ */
+static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)
+{
+ u64 tsc;
+
+ /*
+ * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.
+ * Terminate the SNP guest when the interception is enabled.
+ */
+ if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
+ return ES_VMM_ERROR;
+
+ /*
+ * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC
+ * to return undefined values, so ignore all writes.
+ *
+ * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use
+ * the value returned by rdtsc_ordered().
+ */
+ if (write) {
+ WARN_ONCE(1, "TSC MSR writes are verboten!\n");
+ return ES_OK;
+ }
+
+ tsc = rdtsc_ordered();
+ regs->ax = lower_32_bits(tsc);
+ regs->dx = upper_32_bits(tsc);
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ enum es_result ret;
+ bool write;
+
+ /* Is it a WRMSR? */
+ write = ctxt->insn.opcode.bytes[1] == 0x30;
+
+ switch (regs->cx) {
+ case MSR_SVSM_CAA:
+ return __vc_handle_msr_caa(regs, write);
+ case MSR_IA32_TSC:
+ case MSR_AMD64_GUEST_TSC_FREQ:
+ if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
+ return __vc_handle_secure_tsc_msrs(regs, write);
+ break;
+ default:
+ break;
+ }
+
+ ghcb_set_rcx(ghcb, regs->cx);
+ if (write) {
+ ghcb_set_rax(ghcb, regs->ax);
+ ghcb_set_rdx(ghcb, regs->dx);
+ }
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0);
+
+ if ((ret == ES_OK) && !write) {
+ regs->ax = ghcb->save.rax;
+ regs->dx = ghcb->save.rdx;
+ }
+
+ return ret;
+}
+
+static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
+{
+ int trapnr = ctxt->fi.vector;
+
+ if (trapnr == X86_TRAP_PF)
+ native_write_cr2(ctxt->fi.cr2);
+
+ ctxt->regs->orig_ax = ctxt->fi.error_code;
+ do_early_exception(ctxt->regs, trapnr);
+}
+
+static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
+{
+ long *reg_array;
+ int offset;
+
+ reg_array = (long *)ctxt->regs;
+ offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
+
+ if (offset < 0)
+ return NULL;
+
+ offset /= sizeof(long);
+
+ return reg_array + offset;
+}
+static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ unsigned int bytes, bool read)
+{
+ u64 exit_code, exit_info_1, exit_info_2;
+ unsigned long ghcb_pa = __pa(ghcb);
+ enum es_result res;
+ phys_addr_t paddr;
+ void __user *ref;
+
+ ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
+ if (ref == (void __user *)-1L)
+ return ES_UNSUPPORTED;
+
+ exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
+
+ res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
+ if (res != ES_OK) {
+ if (res == ES_EXCEPTION && !read)
+ ctxt->fi.error_code |= X86_PF_WRITE;
+
+ return res;
+ }
+
+ exit_info_1 = paddr;
+ /* Can never be greater than 8 */
+ exit_info_2 = bytes;
+
+ ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
+
+ return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
+}
+
+/*
+ * The MOVS instruction has two memory operands, which raises the
+ * problem that it is not known whether the access to the source or the
+ * destination caused the #VC exception (and hence whether an MMIO read
+ * or write operation needs to be emulated).
+ *
+ * Instead of playing games with walking page-tables and trying to guess
+ * whether the source or destination is an MMIO range, split the move
+ * into two operations, a read and a write with only one memory operand.
+ * This will cause a nested #VC exception on the MMIO address which can
+ * then be handled.
+ *
+ * This implementation has the benefit that it also supports MOVS where
+ * source _and_ destination are MMIO regions.
+ *
+ * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
+ * rare operation. If it turns out to be a performance problem the split
+ * operations can be moved to memcpy_fromio() and memcpy_toio().
+ */
+static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
+ unsigned int bytes)
+{
+ unsigned long ds_base, es_base;
+ unsigned char *src, *dst;
+ unsigned char buffer[8];
+ enum es_result ret;
+ bool rep;
+ int off;
+
+ ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
+ es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
+
+ if (ds_base == -1L || es_base == -1L) {
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+ }
+
+ src = ds_base + (unsigned char *)ctxt->regs->si;
+ dst = es_base + (unsigned char *)ctxt->regs->di;
+
+ ret = vc_read_mem(ctxt, src, buffer, bytes);
+ if (ret != ES_OK)
+ return ret;
+
+ ret = vc_write_mem(ctxt, dst, buffer, bytes);
+ if (ret != ES_OK)
+ return ret;
+
+ if (ctxt->regs->flags & X86_EFLAGS_DF)
+ off = -bytes;
+ else
+ off = bytes;
+
+ ctxt->regs->si += off;
+ ctxt->regs->di += off;
+
+ rep = insn_has_rep_prefix(&ctxt->insn);
+ if (rep)
+ ctxt->regs->cx -= 1;
+
+ if (!rep || ctxt->regs->cx == 0)
+ return ES_OK;
+ else
+ return ES_RETRY;
+}
+
+static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct insn *insn = &ctxt->insn;
+ enum insn_mmio_type mmio;
+ unsigned int bytes = 0;
+ enum es_result ret;
+ u8 sign_byte;
+ long *reg_data;
+
+ mmio = insn_decode_mmio(insn, &bytes);
+ if (mmio == INSN_MMIO_DECODE_FAILED)
+ return ES_DECODE_FAILED;
+
+ if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
+ reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
+ if (!reg_data)
+ return ES_DECODE_FAILED;
+ }
+
+ if (user_mode(ctxt->regs))
+ return ES_UNSUPPORTED;
+
+ switch (mmio) {
+ case INSN_MMIO_WRITE:
+ memcpy(ghcb->shared_buffer, reg_data, bytes);
+ ret = vc_do_mmio(ghcb, ctxt, bytes, false);
+ break;
+ case INSN_MMIO_WRITE_IMM:
+ memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
+ ret = vc_do_mmio(ghcb, ctxt, bytes, false);
+ break;
+ case INSN_MMIO_READ:
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ /* Zero-extend for 32-bit operation */
+ if (bytes == 4)
+ *reg_data = 0;
+
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+ case INSN_MMIO_READ_ZERO_EXTEND:
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ /* Zero extend based on operand size */
+ memset(reg_data, 0, insn->opnd_bytes);
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+ case INSN_MMIO_READ_SIGN_EXTEND:
+ ret = vc_do_mmio(ghcb, ctxt, bytes, true);
+ if (ret)
+ break;
+
+ if (bytes == 1) {
+ u8 *val = (u8 *)ghcb->shared_buffer;
+
+ sign_byte = (*val & 0x80) ? 0xff : 0x00;
+ } else {
+ u16 *val = (u16 *)ghcb->shared_buffer;
+
+ sign_byte = (*val & 0x8000) ? 0xff : 0x00;
+ }
+
+ /* Sign extend based on operand size */
+ memset(reg_data, sign_byte, insn->opnd_bytes);
+ memcpy(reg_data, ghcb->shared_buffer, bytes);
+ break;
+ case INSN_MMIO_MOVS:
+ ret = vc_handle_mmio_movs(ctxt, bytes);
+ break;
+ default:
+ ret = ES_UNSUPPORTED;
+ break;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ long val, *reg = vc_insn_get_rm(ctxt);
+ enum es_result ret;
+
+ if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
+ return ES_VMM_ERROR;
+
+ if (!reg)
+ return ES_DECODE_FAILED;
+
+ val = *reg;
+
+ /* Upper 32 bits must be written as zeroes */
+ if (val >> 32) {
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+ }
+
+ /* Clear out other reserved bits and set bit 10 */
+ val = (val & 0xffff23ffL) | BIT(10);
+
+ /* Early non-zero writes to DR7 are not supported */
+ if (!data && (val & ~DR7_RESET_VALUE))
+ return ES_UNSUPPORTED;
+
+ /* Using a value of 0 for ExitInfo1 means RAX holds the value */
+ ghcb_set_rax(ghcb, val);
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (data)
+ data->dr7 = val;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ long *reg = vc_insn_get_rm(ctxt);
+
+ if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
+ return ES_VMM_ERROR;
+
+ if (!reg)
+ return ES_DECODE_FAILED;
+
+ if (data)
+ *reg = data->dr7;
+ else
+ *reg = DR7_RESET_VALUE;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
+}
+
+static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ enum es_result ret;
+
+ ghcb_set_rcx(ghcb, ctxt->regs->cx);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+ ctxt->regs->dx = ghcb->save.rdx;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_monitor(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /*
+ * Treat it as a NOP and do not leak a physical address to the
+ * hypervisor.
+ */
+ return ES_OK;
+}
+
+static enum es_result vc_handle_mwait(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /* Treat the same as MONITOR/MONITORX */
+ return ES_OK;
+}
+
+static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ enum es_result ret;
+
+ ghcb_set_rax(ghcb, ctxt->regs->ax);
+ ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
+
+ if (x86_platform.hyper.sev_es_hcall_prepare)
+ x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!ghcb_rax_is_valid(ghcb))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+
+ /*
+ * Call sev_es_hcall_finish() after regs->ax is already set.
+ * This allows the hypervisor handler to overwrite it again if
+ * necessary.
+ */
+ if (x86_platform.hyper.sev_es_hcall_finish &&
+ !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
+ return ES_VMM_ERROR;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ /*
+ * Calling ecx_alignment_check() directly does not work, because it
+ * enables IRQs and the GHCB is active. Forward the exception and call
+ * it later from vc_forward_exception().
+ */
+ ctxt->fi.vector = X86_TRAP_AC;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTION;
+}
+
+static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
+ struct ghcb *ghcb,
+ unsigned long exit_code)
+{
+ enum es_result result = vc_check_opcode_bytes(ctxt, exit_code);
+
+ if (result != ES_OK)
+ return result;
+
+ switch (exit_code) {
+ case SVM_EXIT_READ_DR7:
+ result = vc_handle_dr7_read(ghcb, ctxt);
+ break;
+ case SVM_EXIT_WRITE_DR7:
+ result = vc_handle_dr7_write(ghcb, ctxt);
+ break;
+ case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
+ result = vc_handle_trap_ac(ghcb, ctxt);
+ break;
+ case SVM_EXIT_RDTSC:
+ case SVM_EXIT_RDTSCP:
+ result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
+ break;
+ case SVM_EXIT_RDPMC:
+ result = vc_handle_rdpmc(ghcb, ctxt);
+ break;
+ case SVM_EXIT_INVD:
+ pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
+ result = ES_UNSUPPORTED;
+ break;
+ case SVM_EXIT_CPUID:
+ result = vc_handle_cpuid(ghcb, ctxt);
+ break;
+ case SVM_EXIT_IOIO:
+ result = vc_handle_ioio(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MSR:
+ result = vc_handle_msr(ghcb, ctxt);
+ break;
+ case SVM_EXIT_VMMCALL:
+ result = vc_handle_vmmcall(ghcb, ctxt);
+ break;
+ case SVM_EXIT_WBINVD:
+ result = vc_handle_wbinvd(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MONITOR:
+ result = vc_handle_monitor(ghcb, ctxt);
+ break;
+ case SVM_EXIT_MWAIT:
+ result = vc_handle_mwait(ghcb, ctxt);
+ break;
+ case SVM_EXIT_NPF:
+ result = vc_handle_mmio(ghcb, ctxt);
+ break;
+ default:
+ /*
+ * Unexpected #VC exception
+ */
+ result = ES_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+static __always_inline bool is_vc2_stack(unsigned long sp)
+{
+ return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
+}
+
+static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
+{
+ unsigned long sp, prev_sp;
+
+ sp = (unsigned long)regs;
+ prev_sp = regs->sp;
+
+ /*
+ * If the code was already executing on the VC2 stack when the #VC
+ * happened, let it proceed to the normal handling routine. This way the
+ * code executing on the VC2 stack can cause #VC exceptions to get handled.
+ */
+ return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
+}
+
+static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
+{
+ struct ghcb_state state;
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+ struct ghcb *ghcb;
+ bool ret = true;
+
+ ghcb = __sev_get_ghcb(&state);
+
+ vc_ghcb_invalidate(ghcb);
+ result = vc_init_em_ctxt(&ctxt, regs, error_code);
+
+ if (result == ES_OK)
+ result = vc_handle_exitcode(&ctxt, ghcb, error_code);
+
+ __sev_put_ghcb(&state);
+
+ /* Done - now check the result */
+ switch (result) {
+ case ES_OK:
+ vc_finish_insn(&ctxt);
+ break;
+ case ES_UNSUPPORTED:
+ pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
+ error_code, regs->ip);
+ ret = false;
+ break;
+ case ES_VMM_ERROR:
+ pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
+ error_code, regs->ip);
+ ret = false;
+ break;
+ case ES_DECODE_FAILED:
+ pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
+ error_code, regs->ip);
+ ret = false;
+ break;
+ case ES_EXCEPTION:
+ vc_forward_exception(&ctxt);
+ break;
+ case ES_RETRY:
+ /* Nothing to do */
+ break;
+ default:
+ pr_emerg("Unknown result in %s():%d\n", __func__, result);
+ /*
+ * Emulating the instruction which caused the #VC exception
+ * failed - can't continue so print debug information
+ */
+ BUG();
+ }
+
+ return ret;
+}
+
+static __always_inline bool vc_is_db(unsigned long error_code)
+{
+ return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
+}
+
+/*
+ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
+ * and will panic when an error happens.
+ */
+DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
+{
+ irqentry_state_t irq_state;
+
+ /*
+ * With the current implementation it is always possible to switch to a
+ * safe stack because #VC exceptions only happen at known places, like
+ * intercepted instructions or accesses to MMIO areas/IO ports. They can
+ * also happen with code instrumentation when the hypervisor intercepts
+ * #DB, but the critical paths are forbidden to be instrumented, so #DB
+ * exceptions currently also only happen in safe places.
+ *
+ * But keep this here in case the noinstr annotations are violated due
+ * to bug elsewhere.
+ */
+ if (unlikely(vc_from_invalid_context(regs))) {
+ instrumentation_begin();
+ panic("Can't handle #VC exception from unsupported context\n");
+ instrumentation_end();
+ }
+
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ exc_debug(regs);
+ return;
+ }
+
+ irq_state = irqentry_nmi_enter(regs);
+
+ instrumentation_begin();
+
+ if (!vc_raw_handle_exception(regs, error_code)) {
+ /* Show some debug info */
+ show_regs(regs);
+
+ /* Ask hypervisor to sev_es_terminate */
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+
+ /* If that fails and we get here - just panic */
+ panic("Returned from Terminate-Request to Hypervisor\n");
+ }
+
+ instrumentation_end();
+ irqentry_nmi_exit(regs, irq_state);
+}
+
+/*
+ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
+ * and will kill the current task with SIGBUS when an error happens.
+ */
+DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
+{
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ noist_exc_debug(regs);
+ return;
+ }
+
+ irqentry_enter_from_user_mode(regs);
+ instrumentation_begin();
+
+ if (!vc_raw_handle_exception(regs, error_code)) {
+ /*
+ * Do not kill the machine if user-space triggered the
+ * exception. Send SIGBUS instead and let user-space deal with
+ * it.
+ */
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
+ }
+
+ instrumentation_end();
+ irqentry_exit_to_user_mode(regs);
+}
+
+bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
+{
+ unsigned long exit_code = regs->orig_ax;
+ struct es_em_ctxt ctxt;
+ enum es_result result;
+
+ vc_ghcb_invalidate(boot_ghcb);
+
+ result = vc_init_em_ctxt(&ctxt, regs, exit_code);
+ if (result == ES_OK)
+ result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
+
+ /* Done - now check the result */
+ switch (result) {
+ case ES_OK:
+ vc_finish_insn(&ctxt);
+ break;
+ case ES_UNSUPPORTED:
+ early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_VMM_ERROR:
+ early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_DECODE_FAILED:
+ early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
+ exit_code, regs->ip);
+ goto fail;
+ case ES_EXCEPTION:
+ vc_early_forward_exception(&ctxt);
+ break;
+ case ES_RETRY:
+ /* Nothing to do */
+ break;
+ default:
+ BUG();
+ }
+
+ return true;
+
+fail:
+ show_regs(regs);
+
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+}
+
diff --git a/arch/x86/coco/sev/vc-shared.c b/arch/x86/coco/sev/vc-shared.c
new file mode 100644
index 000000000000..2c0ab0fdc060
--- /dev/null
+++ b/arch/x86/coco/sev/vc-shared.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+
+static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
+ unsigned long exit_code)
+{
+ unsigned int opcode = (unsigned int)ctxt->insn.opcode.value;
+ u8 modrm = ctxt->insn.modrm.value;
+
+ switch (exit_code) {
+
+ case SVM_EXIT_IOIO:
+ case SVM_EXIT_NPF:
+ /* handled separately */
+ return ES_OK;
+
+ case SVM_EXIT_CPUID:
+ if (opcode == 0xa20f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_INVD:
+ if (opcode == 0x080f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_MONITOR:
+ /* MONITOR and MONITORX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa))
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_MWAIT:
+ /* MWAIT and MWAITX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb))
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_MSR:
+ /* RDMSR */
+ if (opcode == 0x320f ||
+ /* WRMSR */
+ opcode == 0x300f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_RDPMC:
+ if (opcode == 0x330f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_RDTSC:
+ if (opcode == 0x310f)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_RDTSCP:
+ if (opcode == 0x010f && modrm == 0xf9)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_READ_DR7:
+ if (opcode == 0x210f &&
+ X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_VMMCALL:
+ if (opcode == 0x010f && modrm == 0xd9)
+ return ES_OK;
+
+ break;
+
+ case SVM_EXIT_WRITE_DR7:
+ if (opcode == 0x230f &&
+ X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
+ return ES_OK;
+ break;
+
+ case SVM_EXIT_WBINVD:
+ if (opcode == 0x90f)
+ return ES_OK;
+ break;
+
+ default:
+ break;
+ }
+
+ sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n",
+ opcode, exit_code, ctxt->regs->ip);
+
+ return ES_UNSUPPORTED;
+}
+
+static bool vc_decoding_needed(unsigned long exit_code)
+{
+ /* Exceptions don't require to decode the instruction */
+ return !(exit_code >= SVM_EXIT_EXCP_BASE &&
+ exit_code <= SVM_EXIT_LAST_EXCP);
+}
+
+static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
+ struct pt_regs *regs,
+ unsigned long exit_code)
+{
+ enum es_result ret = ES_OK;
+
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->regs = regs;
+
+ if (vc_decoding_needed(exit_code))
+ ret = vc_decode_insn(ctxt);
+
+ return ret;
+}
+
+static void vc_finish_insn(struct es_em_ctxt *ctxt)
+{
+ ctxt->regs->ip += ctxt->insn.length;
+}
+
+static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
+ unsigned long address,
+ bool write)
+{
+ if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_USER;
+ ctxt->fi.cr2 = address;
+ if (write)
+ ctxt->fi.error_code |= X86_PF_WRITE;
+
+ return ES_EXCEPTION;
+ }
+
+ return ES_OK;
+}
+
+static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
+ void *src, char *buf,
+ unsigned int data_size,
+ unsigned int count,
+ bool backwards)
+{
+ int i, b = backwards ? -1 : 1;
+ unsigned long address = (unsigned long)src;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, false);
+ if (ret != ES_OK)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ void *s = src + (i * data_size * b);
+ char *d = buf + (i * data_size);
+
+ ret = vc_read_mem(ctxt, s, d, data_size);
+ if (ret != ES_OK)
+ break;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
+ void *dst, char *buf,
+ unsigned int data_size,
+ unsigned int count,
+ bool backwards)
+{
+ int i, s = backwards ? -1 : 1;
+ unsigned long address = (unsigned long)dst;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, true);
+ if (ret != ES_OK)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ void *d = dst + (i * data_size * s);
+ char *b = buf + (i * data_size);
+
+ ret = vc_write_mem(ctxt, d, b, data_size);
+ if (ret != ES_OK)
+ break;
+ }
+
+ return ret;
+}
+
+#define IOIO_TYPE_STR BIT(2)
+#define IOIO_TYPE_IN 1
+#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR)
+#define IOIO_TYPE_OUT 0
+#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
+
+#define IOIO_REP BIT(3)
+
+#define IOIO_ADDR_64 BIT(9)
+#define IOIO_ADDR_32 BIT(8)
+#define IOIO_ADDR_16 BIT(7)
+
+#define IOIO_DATA_32 BIT(6)
+#define IOIO_DATA_16 BIT(5)
+#define IOIO_DATA_8 BIT(4)
+
+#define IOIO_SEG_ES (0 << 10)
+#define IOIO_SEG_DS (3 << 10)
+
+static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+{
+ struct insn *insn = &ctxt->insn;
+ size_t size;
+ u64 port;
+
+ *exitinfo = 0;
+
+ switch (insn->opcode.bytes[0]) {
+ /* INS opcodes */
+ case 0x6c:
+ case 0x6d:
+ *exitinfo |= IOIO_TYPE_INS;
+ *exitinfo |= IOIO_SEG_ES;
+ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* OUTS opcodes */
+ case 0x6e:
+ case 0x6f:
+ *exitinfo |= IOIO_TYPE_OUTS;
+ *exitinfo |= IOIO_SEG_DS;
+ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* IN immediate opcodes */
+ case 0xe4:
+ case 0xe5:
+ *exitinfo |= IOIO_TYPE_IN;
+ port = (u8)insn->immediate.value & 0xffff;
+ break;
+
+ /* OUT immediate opcodes */
+ case 0xe6:
+ case 0xe7:
+ *exitinfo |= IOIO_TYPE_OUT;
+ port = (u8)insn->immediate.value & 0xffff;
+ break;
+
+ /* IN register opcodes */
+ case 0xec:
+ case 0xed:
+ *exitinfo |= IOIO_TYPE_IN;
+ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* OUT register opcodes */
+ case 0xee:
+ case 0xef:
+ *exitinfo |= IOIO_TYPE_OUT;
+ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ default:
+ return ES_DECODE_FAILED;
+ }
+
+ *exitinfo |= port << 16;
+
+ switch (insn->opcode.bytes[0]) {
+ case 0x6c:
+ case 0x6e:
+ case 0xe4:
+ case 0xe6:
+ case 0xec:
+ case 0xee:
+ /* Single byte opcodes */
+ *exitinfo |= IOIO_DATA_8;
+ size = 1;
+ break;
+ default:
+ /* Length determined by instruction parsing */
+ *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
+ : IOIO_DATA_32;
+ size = (insn->opnd_bytes == 2) ? 2 : 4;
+ }
+
+ switch (insn->addr_bytes) {
+ case 2:
+ *exitinfo |= IOIO_ADDR_16;
+ break;
+ case 4:
+ *exitinfo |= IOIO_ADDR_32;
+ break;
+ case 8:
+ *exitinfo |= IOIO_ADDR_64;
+ break;
+ }
+
+ if (insn_has_rep_prefix(insn))
+ *exitinfo |= IOIO_REP;
+
+ return vc_ioio_check(ctxt, (u16)port, size);
+}
+
+static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ u64 exit_info_1, exit_info_2;
+ enum es_result ret;
+
+ ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
+ if (ret != ES_OK)
+ return ret;
+
+ if (exit_info_1 & IOIO_TYPE_STR) {
+
+ /* (REP) INS/OUTS */
+
+ bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
+ unsigned int io_bytes, exit_bytes;
+ unsigned int ghcb_count, op_count;
+ unsigned long es_base;
+ u64 sw_scratch;
+
+ /*
+ * For the string variants with rep prefix the amount of in/out
+ * operations per #VC exception is limited so that the kernel
+ * has a chance to take interrupts and re-schedule while the
+ * instruction is emulated.
+ */
+ io_bytes = (exit_info_1 >> 4) & 0x7;
+ ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
+
+ op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
+ exit_info_2 = min(op_count, ghcb_count);
+ exit_bytes = exit_info_2 * io_bytes;
+
+ es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
+
+ /* Read bytes of OUTS into the shared buffer */
+ if (!(exit_info_1 & IOIO_TYPE_IN)) {
+ ret = vc_insn_string_read(ctxt,
+ (void *)(es_base + regs->si),
+ ghcb->shared_buffer, io_bytes,
+ exit_info_2, df);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Issue an VMGEXIT to the HV to consume the bytes from the
+ * shared buffer or to have it write them into the shared buffer
+ * depending on the instruction: OUTS or INS.
+ */
+ sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
+ ghcb_set_sw_scratch(ghcb, sw_scratch);
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
+ exit_info_1, exit_info_2);
+ if (ret != ES_OK)
+ return ret;
+
+ /* Read bytes from shared buffer into the guest's destination. */
+ if (exit_info_1 & IOIO_TYPE_IN) {
+ ret = vc_insn_string_write(ctxt,
+ (void *)(es_base + regs->di),
+ ghcb->shared_buffer, io_bytes,
+ exit_info_2, df);
+ if (ret)
+ return ret;
+
+ if (df)
+ regs->di -= exit_bytes;
+ else
+ regs->di += exit_bytes;
+ } else {
+ if (df)
+ regs->si -= exit_bytes;
+ else
+ regs->si += exit_bytes;
+ }
+
+ if (exit_info_1 & IOIO_REP)
+ regs->cx -= exit_info_2;
+
+ ret = regs->cx ? ES_RETRY : ES_OK;
+
+ } else {
+
+ /* IN/OUT into/from rAX */
+
+ int bits = (exit_info_1 & 0x70) >> 1;
+ u64 rax = 0;
+
+ if (!(exit_info_1 & IOIO_TYPE_IN))
+ rax = lower_bits(regs->ax, bits);
+
+ ghcb_set_rax(ghcb, rax);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (exit_info_1 & IOIO_TYPE_IN) {
+ if (!ghcb_rax_is_valid(ghcb))
+ return ES_VMM_ERROR;
+ regs->ax = lower_bits(ghcb->save.rax, bits);
+ }
+ }
+
+ return ret;
+}
+
+static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ struct cpuid_leaf leaf;
+ int ret;
+
+ leaf.fn = regs->ax;
+ leaf.subfn = regs->cx;
+ ret = snp_cpuid(ghcb, ctxt, &leaf);
+ if (!ret) {
+ regs->ax = leaf.eax;
+ regs->bx = leaf.ebx;
+ regs->cx = leaf.ecx;
+ regs->dx = leaf.edx;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct pt_regs *regs = ctxt->regs;
+ u32 cr4 = native_read_cr4();
+ enum es_result ret;
+ int snp_cpuid_ret;
+
+ snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
+ if (!snp_cpuid_ret)
+ return ES_OK;
+ if (snp_cpuid_ret != -EOPNOTSUPP)
+ return ES_VMM_ERROR;
+
+ ghcb_set_rax(ghcb, regs->ax);
+ ghcb_set_rcx(ghcb, regs->cx);
+
+ if (cr4 & X86_CR4_OSXSAVE)
+ /* Safe to read xcr0 */
+ ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
+ else
+ /* xgetbv will cause #GP - use reset value for xcr0 */
+ ghcb_set_xcr0(ghcb, 1);
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) &&
+ ghcb_rbx_is_valid(ghcb) &&
+ ghcb_rcx_is_valid(ghcb) &&
+ ghcb_rdx_is_valid(ghcb)))
+ return ES_VMM_ERROR;
+
+ regs->ax = ghcb->save.rax;
+ regs->bx = ghcb->save.rbx;
+ regs->cx = ghcb->save.rcx;
+ regs->dx = ghcb->save.rdx;
+
+ return ES_OK;
+}
+
+static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ unsigned long exit_code)
+{
+ bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
+ enum es_result ret;
+
+ /*
+ * The hypervisor should not be intercepting RDTSC/RDTSCP when Secure
+ * TSC is enabled. A #VC exception will be generated if the RDTSC/RDTSCP
+ * instructions are being intercepted. If this should occur and Secure
+ * TSC is enabled, guest execution should be terminated as the guest
+ * cannot rely on the TSC value provided by the hypervisor.
+ */
+ if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
+ return ES_VMM_ERROR;
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
+ (!rdtscp || ghcb_rcx_is_valid(ghcb))))
+ return ES_VMM_ERROR;
+
+ ctxt->regs->ax = ghcb->save.rax;
+ ctxt->regs->dx = ghcb->save.rdx;
+ if (rdtscp)
+ ctxt->regs->cx = ghcb->save.rcx;
+
+ return ES_OK;
+}
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 3a4f031d2f44..5fe201efc753 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -754,7 +754,7 @@ void x86_pmu_enable_all(int added)
}
}
-static inline int is_x86_event(struct perf_event *event)
+int is_x86_event(struct perf_event *event)
{
int i;
@@ -2803,8 +2803,15 @@ static unsigned long get_segment_base(unsigned int segment)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
+ /*
+ * If we're not in a valid context with a real (not just lazy)
+ * user mm, then don't even try.
+ */
+ if (!nmi_uaccess_okay())
+ return 0;
+
/* IRQs are off, so this synchronizes with smp_store_release */
- ldt = READ_ONCE(current->active_mm->context.ldt);
+ ldt = smp_load_acquire(&current->mm->context.ldt);
if (!ldt || idx >= ldt->nr_entries)
return 0;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 00dfe487bd00..c5f385413392 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4395,7 +4395,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
- .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
+ .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
};
if (arr[pebs_enable].host) {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 18c3ab579b8b..9b20acc0e932 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -2379,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
*/
intel_pmu_save_and_restart_reload(event, count);
}
- } else
- intel_pmu_save_and_restart(event);
+ } else {
+ /*
+ * For a non-precise event, it's possible the
+ * counters-snapshotting records a positive value for the
+ * overflowed event. Then the HW auto-reload mechanism
+ * reset the counter to 0 immediately, because the
+ * pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD
+ * is not set. The counter backwards may be observed in a
+ * PMI handler.
+ *
+ * Since the event value has been updated when processing the
+ * counters-snapshotting record, only needs to set the new
+ * period for the counter.
+ */
+ if (is_pebs_counter_event_group(event))
+ static_call(x86_pmu_set_period)(event);
+ else
+ intel_pmu_save_and_restart(event);
+ }
}
static __always_inline void
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 2c0ce0e9545e..46d120597bab 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -110,14 +110,21 @@ static inline bool is_topdown_event(struct perf_event *event)
return is_metric_event(event) || is_slots_event(event);
}
+int is_x86_event(struct perf_event *event);
+
+static inline bool check_leader_group(struct perf_event *leader, int flags)
+{
+ return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
+}
+
static inline bool is_branch_counters_group(struct perf_event *event)
{
- return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
+ return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
}
static inline bool is_pebs_counter_event_group(struct perf_event *event)
{
- return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
+ return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
}
struct amd_nb {
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 4a37a8bd87fd..e18cdaa1573c 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -82,6 +82,12 @@ struct alt_instr {
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+extern s32 __retpoline_sites[], __retpoline_sites_end[];
+extern s32 __return_sites[], __return_sites_end[];
+extern s32 __cfi_sites[], __cfi_sites_end[];
+extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
+extern s32 __smp_locks[], __smp_locks_end[];
+
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
@@ -335,11 +341,6 @@ void nop_func(void);
__ALTERNATIVE(\oldinstr, \newinstr, \ft_flags)
.endm
-#define old_len 141b-140b
-#define new_len1 144f-143f
-#define new_len2 145f-144f
-#define new_len3 146f-145f
-
/*
* Same as ALTERNATIVE macro above but for two alternatives. If CPU
* has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index cbc6157f0b4b..b5982b94bdba 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -16,8 +16,7 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
- asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
- "call __sw_hweight32",
+ asm_inline (ALTERNATIVE("call __sw_hweight32",
"popcntl %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
@@ -46,8 +45,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
- asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
- "call __sw_hweight64",
+ asm_inline (ALTERNATIVE("call __sw_hweight64",
"popcntq %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index cc2881576c2c..eef0771512de 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -114,17 +114,12 @@
#endif
#ifndef __ASSEMBLER__
-#ifndef __pic__
static __always_inline __pure void *rip_rel_ptr(void *p)
{
asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
return p;
}
-#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
-#else
-#define RIP_REL_REF(var) (var)
-#endif
#endif
/*
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 100413aff640..eebbc8889e70 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -248,7 +248,7 @@ arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
static __always_inline unsigned long variable__ffs(unsigned long word)
{
- asm("rep; bsf %1,%0"
+ asm("tzcnt %1,%0"
: "=r" (word)
: ASM_INPUT_RM (word));
return word;
@@ -267,10 +267,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
static __always_inline unsigned long variable_ffz(unsigned long word)
{
- asm("rep; bsf %1,%0"
- : "=r" (word)
- : "r" (~word));
- return word;
+ return variable__ffs(~word);
}
/**
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 3f02ff6d333d..02b23aa78955 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -74,6 +74,11 @@
# define BOOT_STACK_SIZE 0x1000
#endif
+#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE)
+
+#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
+#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0
+
#ifndef __ASSEMBLER__
extern unsigned int output_len;
extern const unsigned long kernel_text_size;
@@ -83,6 +88,11 @@ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
void (*error)(char *x));
extern struct boot_params *boot_params_ptr;
+extern unsigned long *trampoline_32bit;
+extern const u16 trampoline_ljmp_imm_offset;
+
+void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
+
#endif
#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index e7225452963f..e1dbf8df1b69 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -22,7 +22,7 @@ static inline u64 cc_get_mask(void)
static inline void cc_set_mask(u64 mask)
{
- RIP_REL_REF(cc_mask) = mask;
+ cc_mask = mask;
}
u64 cc_mkenc(u64 val);
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index fdbbbfec745a..719d95f1ab5e 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -23,7 +23,7 @@ DECLARE_PER_CPU(unsigned long, cpu_dr7);
static __always_inline unsigned long native_get_debugreg(int regno)
{
- unsigned long val = 0; /* Damn you, gcc! */
+ unsigned long val;
switch (regno) {
case 0:
@@ -43,7 +43,7 @@ static __always_inline unsigned long native_get_debugreg(int regno)
break;
case 7:
/*
- * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
+ * Use "asm volatile" for DR7 reads to forbid re-ordering them
* with other code.
*
* This is needed because a DR7 access can cause a #VC exception
@@ -55,7 +55,7 @@ static __always_inline unsigned long native_get_debugreg(int regno)
* re-ordered to happen before the call to sev_es_ist_enter(),
* causing stack recursion.
*/
- asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%db7, %0" : "=r" (val));
break;
default:
BUG();
@@ -83,15 +83,15 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
break;
case 7:
/*
- * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
+ * Use "asm volatile" for DR7 writes to forbid re-ordering them
* with other code.
*
* While is didn't happen with a DR7 write (see the DR7 read
* comment above which explains where it happened), add the
- * __FORCE_ORDER here too to avoid similar problems in the
+ * "asm volatile" here too to avoid similar problems in the
* future.
*/
- asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
+ asm volatile("mov %0, %%db7" ::"r" (value));
break;
default:
BUG();
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index 53e4015242b4..97f341777db5 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -82,6 +82,7 @@
#define INAT_NO_REX2 (1 << (INAT_FLAG_OFFS + 8))
#define INAT_REX2_VARIANT (1 << (INAT_FLAG_OFFS + 9))
#define INAT_EVEX_SCALABLE (1 << (INAT_FLAG_OFFS + 10))
+#define INAT_INV64 (1 << (INAT_FLAG_OFFS + 11))
/* Attribute making macros for attribute tables */
#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
@@ -242,4 +243,9 @@ static inline int inat_evex_scalable(insn_attr_t attr)
{
return attr & INAT_EVEX_SCALABLE;
}
+
+static inline int inat_is_invalid64(insn_attr_t attr)
+{
+ return attr & INAT_INV64;
+}
#endif
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e889c3bab5a2..ca309a3227c7 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -217,7 +217,7 @@ void memset_io(volatile void __iomem *, int, size_t);
static inline void __iowrite32_copy(void __iomem *to, const void *from,
size_t count)
{
- asm volatile("rep ; movsl"
+ asm volatile("rep movsl"
: "=&c"(count), "=&D"(to), "=&S"(from)
: "0"(count), "1"(to), "2"(from)
: "memory");
@@ -282,7 +282,7 @@ static inline void outs##bwl(u16 port, const void *addr, unsigned long count) \
count--; \
} \
} else { \
- asm volatile("rep; outs" #bwl \
+ asm volatile("rep outs" #bwl \
: "+S"(addr), "+c"(count) \
: "d"(port) : "memory"); \
} \
@@ -298,7 +298,7 @@ static inline void ins##bwl(u16 port, void *addr, unsigned long count) \
count--; \
} \
} else { \
- asm volatile("rep; ins" #bwl \
+ asm volatile("rep ins" #bwl \
: "+D"(addr), "+c"(count) \
: "d"(port) : "memory"); \
} \
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 5432457d2338..f2ad77929d6e 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -8,6 +8,9 @@
# define PA_PGD 2
# define PA_SWAP_PAGE 3
# define PAGES_NR 4
+#else
+/* Size of each exception handler referenced by the IDT */
+# define KEXEC_DEBUG_EXC_HANDLER_SIZE 6 /* PUSHI, PUSHI, 2-byte JMP */
#endif
# define KEXEC_CONTROL_PAGE_SIZE 4096
@@ -59,6 +62,10 @@ struct kimage;
extern unsigned long kexec_va_control_page;
extern unsigned long kexec_pa_table_page;
extern unsigned long kexec_pa_swap_page;
+extern gate_desc kexec_debug_idt[];
+extern unsigned char kexec_debug_exc_vectors[];
+extern uint16_t kexec_debug_8250_port;
+extern unsigned long kexec_debug_8250_mmio32;
#endif
/*
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index b51d8a4673f5..9d38ae744a2e 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -141,5 +141,15 @@
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
+/*
+ * Expose 'sym' to the startup code in arch/x86/boot/startup/, by emitting an
+ * alias prefixed with __pi_
+ */
+#ifdef __ASSEMBLER__
+#define SYM_PIC_ALIAS(sym) SYM_ALIAS(__pi_ ## sym, sym, SYM_L_GLOBAL)
+#else
+#define SYM_PIC_ALIAS(sym) extern typeof(sym) __PASTE(__pi_, sym) __alias(sym)
+#endif
+
#endif /* _ASM_X86_LINKAGE_H */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 1530ee301dfe..ea6494628cb0 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -61,7 +61,7 @@ void __init sev_es_init_vc_handling(void);
static inline u64 sme_get_me_mask(void)
{
- return RIP_REL_REF(sme_me_mask);
+ return sme_me_mask;
}
#define __bss_decrypted __section(".bss..decrypted")
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 695e569159c1..be7cddc414e4 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -17,10 +17,12 @@ struct ucode_cpu_info {
void load_ucode_bsp(void);
void load_ucode_ap(void);
void microcode_bsp_resume(void);
+bool __init microcode_loader_disabled(void);
#else
static inline void load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void microcode_bsp_resume(void) { }
+static inline bool __init microcode_loader_disabled(void) { return false; }
#endif
extern unsigned long initrd_start_early;
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 8b8055a8eb9e..0fe9c569d171 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -16,6 +16,8 @@
#define MM_CONTEXT_LOCK_LAM 2
/* Allow LAM and SVA coexisting */
#define MM_CONTEXT_FORCE_TAGGED_SVA 3
+/* Tracks mm_cpumask */
+#define MM_CONTEXT_NOTRACK 4
/*
* x86 has arch-specific MMU state beyond what lives in mm_struct.
@@ -44,9 +46,7 @@ typedef struct {
struct ldt_struct *ldt;
#endif
-#ifdef CONFIG_X86_64
unsigned long flags;
-#endif
#ifdef CONFIG_ADDRESS_MASKING
/* Active LAM mode: X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 2398058b6e83..73bf3b1b44e8 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -190,7 +190,7 @@ extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
#define activate_mm(prev, next) \
do { \
paravirt_enter_mmap(next); \
- switch_mm((prev), (next), NULL); \
+ switch_mm_irqs_off((prev), (next), NULL); \
} while (0);
#ifdef CONFIG_X86_32
@@ -247,6 +247,16 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
}
#endif
+static inline bool is_notrack_mm(struct mm_struct *mm)
+{
+ return test_bit(MM_CONTEXT_NOTRACK, &mm->context.flags);
+}
+
+static inline void set_notrack_mm(struct mm_struct *mm)
+{
+ set_bit(MM_CONTEXT_NOTRACK, &mm->context.flags);
+}
+
/*
* We only want to enforce protection keys on the current process
* because we effectively have no access to PKRU for other
@@ -272,4 +282,7 @@ unsigned long __get_current_cr3_fast(void);
#include <asm-generic/mmu_context.h>
+extern struct mm_struct *use_temporary_mm(struct mm_struct *temp_mm);
+extern void unuse_temporary_mm(struct mm_struct *prev_mm);
+
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index fd4effd7654a..51a677fe9a8d 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -561,7 +561,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
-DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
extern u16 mds_verw_sel;
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 9f77bf03d747..018a8d906ca3 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -29,9 +29,7 @@
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
/* Physical address where kernel should be loaded. */
-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
- + (CONFIG_PHYSICAL_ALIGN - 1)) \
- & ~(CONFIG_PHYSICAL_ALIGN - 1))
+#define LOAD_PHYSICAL_ADDR __ALIGN_KERNEL_MASK(CONFIG_PHYSICAL_START, CONFIG_PHYSICAL_ALIGN - 1)
#define __START_KERNEL (__START_KERNEL_map + LOAD_PHYSICAL_ADDR)
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 5fe314a2e73e..b0d03b6c279b 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -29,6 +29,8 @@
#ifdef CONFIG_SMP
+#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
+
#ifdef CONFIG_CC_HAS_NAMED_AS
#ifdef __CHECKER__
@@ -36,23 +38,23 @@
# define __seg_fs __attribute__((address_space(__seg_fs)))
#endif
+#define __percpu_prefix
#define __percpu_seg_override CONCATENATE(__seg_, __percpu_seg)
-#define __percpu_prefix ""
#else /* !CONFIG_CC_HAS_NAMED_AS: */
+#define __percpu_prefix __force_percpu_prefix
#define __percpu_seg_override
-#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
#endif /* CONFIG_CC_HAS_NAMED_AS */
-#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
-#define __my_cpu_offset this_cpu_read(this_cpu_off)
-
/*
* Compared to the generic __my_cpu_offset version, the following
* saves one instruction and avoids clobbering a temp register.
- *
+ */
+#define __my_cpu_offset this_cpu_read(this_cpu_off)
+
+/*
* arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
* kernel, because games are played with CONFIG_X86_64 there and
* sizeof(this_cpu_off) becames 4.
@@ -77,9 +79,9 @@
#else /* !CONFIG_SMP: */
+#define __force_percpu_prefix
+#define __percpu_prefix
#define __percpu_seg_override
-#define __percpu_prefix ""
-#define __force_percpu_prefix ""
#define PER_CPU_VAR(var) (var)__percpu_rel
@@ -97,8 +99,8 @@
# define __my_cpu_var(var) (*__my_cpu_ptr(&(var)))
#endif
-#define __percpu_arg(x) __percpu_prefix "%" #x
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
+#define __percpu_arg(x) __percpu_prefix "%" #x
/*
* For arch-specific code, we can use direct single-insn ops (they
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ad33903dc92a..50d34698036d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -733,6 +733,7 @@ void store_cpu_caps(struct cpuinfo_x86 *info);
enum l1tf_mitigations {
L1TF_MITIGATION_OFF,
+ L1TF_MITIGATION_AUTO,
L1TF_MITIGATION_FLUSH_NOWARN,
L1TF_MITIGATION_FLUSH,
L1TF_MITIGATION_FLUSH_NOSMT,
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ad9212df0ec0..6324f4c6c545 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -52,6 +52,7 @@ extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
extern void startup_64_setup_gdt_idt(void);
+extern void startup_64_load_idt(void *vc_handler);
extern void early_setup_idt(void);
extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/include/asm/sev-internal.h
new file mode 100644
index 000000000000..b7232081f8f7
--- /dev/null
+++ b/arch/x86/include/asm/sev-internal.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define DR7_RESET_VALUE 0x400
+
+extern struct ghcb boot_ghcb_page;
+extern u64 sev_hv_features;
+extern u64 sev_secrets_pa;
+
+/* #VC handler runtime per-CPU data */
+struct sev_es_runtime_data {
+ struct ghcb ghcb_page;
+
+ /*
+ * Reserve one page per CPU as backup storage for the unencrypted GHCB.
+ * It is needed when an NMI happens while the #VC handler uses the real
+ * GHCB, and the NMI handler itself is causing another #VC exception. In
+ * that case the GHCB content of the first handler needs to be backed up
+ * and restored.
+ */
+ struct ghcb backup_ghcb;
+
+ /*
+ * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
+ * There is no need for it to be atomic, because nothing is written to
+ * the GHCB between the read and the write of ghcb_active. So it is safe
+ * to use it when a nested #VC exception happens before the write.
+ *
+ * This is necessary for example in the #VC->NMI->#VC case when the NMI
+ * happens while the first #VC handler uses the GHCB. When the NMI code
+ * raises a second #VC handler it might overwrite the contents of the
+ * GHCB written by the first handler. To avoid this the content of the
+ * GHCB is saved and restored when the GHCB is detected to be in use
+ * already.
+ */
+ bool ghcb_active;
+ bool backup_ghcb_active;
+
+ /*
+ * Cached DR7 value - write it on DR7 writes and return it on reads.
+ * That value will never make it to the real hardware DR7 as debugging
+ * is currently unsupported in SEV-ES guests.
+ */
+ unsigned long dr7;
+};
+
+struct ghcb_state {
+ struct ghcb *ghcb;
+};
+
+extern struct svsm_ca boot_svsm_ca_page;
+
+struct ghcb *__sev_get_ghcb(struct ghcb_state *state);
+void __sev_put_ghcb(struct ghcb_state *state);
+
+DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
+DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
+
+void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages, enum psc_op op);
+
+DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
+DECLARE_PER_CPU(u64, svsm_caa_pa);
+
+extern struct svsm_ca *boot_svsm_caa;
+extern u64 boot_svsm_caa_pa;
+
+static __always_inline struct svsm_ca *svsm_get_caa(void)
+{
+ if (sev_cfg.use_cas)
+ return this_cpu_read(svsm_caa);
+ else
+ return boot_svsm_caa;
+}
+
+static __always_inline u64 svsm_get_caa_pa(void)
+{
+ if (sev_cfg.use_cas)
+ return this_cpu_read(svsm_caa_pa);
+ else
+ return boot_svsm_caa_pa;
+}
+
+int svsm_perform_call_protocol(struct svsm_call *call);
+
+static inline u64 sev_es_rd_ghcb_msr(void)
+{
+ return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+}
+
+static __always_inline void sev_es_wr_ghcb_msr(u64 val)
+{
+ u32 low, high;
+
+ low = (u32)(val);
+ high = (u32)(val >> 32);
+
+ native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
+}
+
+void snp_register_ghcb_early(unsigned long paddr);
+bool sev_es_negotiate_protocol(void);
+bool sev_es_check_cpu_features(void);
+u64 get_hv_features(void);
+
+const struct snp_cpuid_table *snp_cpuid_get_table(void);
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index ba7999f66abe..6158893786d6 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -15,6 +15,7 @@
#include <asm/sev-common.h>
#include <asm/coco.h>
#include <asm/set_memory.h>
+#include <asm/svm.h>
#define GHCB_PROTOCOL_MIN 1ULL
#define GHCB_PROTOCOL_MAX 2ULL
@@ -83,6 +84,36 @@ extern void vc_no_ghcb(void);
extern void vc_boot_ghcb(void);
extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
+/*
+ * Individual entries of the SNP CPUID table, as defined by the SNP
+ * Firmware ABI, Revision 0.9, Section 7.1, Table 14.
+ */
+struct snp_cpuid_fn {
+ u32 eax_in;
+ u32 ecx_in;
+ u64 xcr0_in;
+ u64 xss_in;
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ u64 __reserved;
+} __packed;
+
+/*
+ * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9,
+ * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
+ * of 64 entries per CPUID table.
+ */
+#define SNP_CPUID_COUNT_MAX 64
+
+struct snp_cpuid_table {
+ u32 count;
+ u32 __reserved1;
+ u64 __reserved2;
+ struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX];
+} __packed;
+
/* PVALIDATE return codes */
#define PVALIDATE_FAIL_SIZEMISMATCH 6
@@ -484,6 +515,34 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
void __init snp_secure_tsc_prepare(void);
void __init snp_secure_tsc_init(void);
+static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
+{
+ ghcb->save.sw_exit_code = 0;
+ __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+}
+
+void vc_forward_exception(struct es_em_ctxt *ctxt);
+
+/* I/O parameters for CPUID-related helpers */
+struct cpuid_leaf {
+ u32 fn;
+ u32 subfn;
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+};
+
+int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf);
+
+void __noreturn sev_es_terminate(unsigned int set, unsigned int reason);
+enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ u64 exit_code, u64 exit_info_1,
+ u64 exit_info_2);
+
+extern struct ghcb *boot_ghcb;
+
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define snp_vmpl 0
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 6266d6b9e0b8..ecda17efa042 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -10,30 +10,19 @@
#include <linux/irqflags.h>
#include <linux/jump_label.h>
-/*
- * The compiler should not reorder volatile asm statements with respect to each
- * other: they should execute in program order. However GCC 4.9.x and 5.x have
- * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
- * volatile asm. The write functions are not affected since they have memory
- * clobbers preventing reordering. To prevent reads from being reordered with
- * respect to writes, use a dummy memory operand.
- */
-
-#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
-
void native_write_cr0(unsigned long val);
static inline unsigned long native_read_cr0(void)
{
unsigned long val;
- asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%cr0,%0" : "=r" (val));
return val;
}
static __always_inline unsigned long native_read_cr2(void)
{
unsigned long val;
- asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%cr2,%0" : "=r" (val));
return val;
}
@@ -45,7 +34,7 @@ static __always_inline void native_write_cr2(unsigned long val)
static __always_inline unsigned long __native_read_cr3(void)
{
unsigned long val;
- asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%cr3,%0" : "=r" (val));
return val;
}
@@ -66,10 +55,10 @@ static inline unsigned long native_read_cr4(void)
asm volatile("1: mov %%cr4, %0\n"
"2:\n"
_ASM_EXTABLE(1b, 2b)
- : "=r" (val) : "0" (0), __FORCE_ORDER);
+ : "=r" (val) : "0" (0));
#else
/* CR4 always exists on x86_64. */
- asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ asm volatile("mov %%cr4,%0" : "=r" (val));
#endif
return val;
}
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 32c0d981a82a..e9cce169bb4c 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -33,11 +33,11 @@ extern size_t strlen(const char *s);
static __always_inline void *__memcpy(void *to, const void *from, size_t n)
{
int d0, d1, d2;
- asm volatile("rep ; movsl\n\t"
+ asm volatile("rep movsl\n\t"
"movl %4,%%ecx\n\t"
"andl $3,%%ecx\n\t"
"jz 1f\n\t"
- "rep ; movsb\n\t"
+ "rep movsb\n\t"
"1:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
@@ -89,7 +89,7 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
if (n >= 5 * 4) {
/* large block: use rep prefix */
int ecx;
- asm volatile("rep ; movsl"
+ asm volatile("rep movsl"
: "=&c" (ecx), "=&D" (edi), "=&S" (esi)
: "0" (n / 4), "1" (edi), "2" (esi)
: "memory"
@@ -165,8 +165,7 @@ extern void *memchr(const void *cs, int c, size_t count);
static inline void *__memset_generic(void *s, char c, size_t count)
{
int d0, d1;
- asm volatile("rep\n\t"
- "stosb"
+ asm volatile("rep stosb"
: "=&c" (d0), "=&D" (d1)
: "a" (c), "1" (s), "0" (count)
: "memory");
@@ -199,8 +198,7 @@ extern void *memset(void *, int, size_t);
static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
{
int d0, d1;
- asm volatile("rep\n\t"
- "stosw"
+ asm volatile("rep stosw"
: "=&c" (d0), "=&D" (d1)
: "a" (v), "1" (s), "0" (n)
: "memory");
@@ -211,8 +209,7 @@ static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
{
int d0, d1;
- asm volatile("rep\n\t"
- "stosl"
+ asm volatile("rep stosl"
: "=&c" (d0), "=&D" (d1)
: "a" (v), "1" (s), "0" (n)
: "memory");
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index ab9e143ec9fe..5337f1be18f6 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -11,11 +11,11 @@
* JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
* Raise it if needed.
*/
-#define POKE_MAX_OPCODE_SIZE 5
+#define TEXT_POKE_MAX_OPCODE_SIZE 5
extern void text_poke_early(void *addr, const void *opcode, size_t len);
-extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len);
+extern void text_poke_apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len);
/*
* Clear and restore the kernel write-protection flag on the local CPU.
@@ -32,17 +32,17 @@ extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u
* an inconsistent instruction while you patch.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
-extern void text_poke_sync(void);
+extern void smp_text_poke_sync_each_cpu(void);
extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
#define text_poke_copy text_poke_copy
extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok);
extern void *text_poke_set(void *addr, int c, size_t len);
-extern int poke_int3_handler(struct pt_regs *regs);
-extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
+extern int smp_text_poke_int3_handler(struct pt_regs *regs);
+extern void smp_text_poke_single(void *addr, const void *opcode, size_t len, const void *emulate);
-extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
-extern void text_poke_finish(void);
+extern void smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate);
+extern void smp_text_poke_batch_finish(void);
#define INT3_INSN_SIZE 1
#define INT3_INSN_OPCODE 0xCC
@@ -82,7 +82,7 @@ static __always_inline int text_opcode_size(u8 opcode)
}
union text_poke_insn {
- u8 text[POKE_MAX_OPCODE_SIZE];
+ u8 text[TEXT_POKE_MAX_OPCODE_SIZE];
struct {
u8 opcode;
s32 disp;
@@ -128,8 +128,8 @@ void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
}
extern int after_bootmem;
-extern __ro_after_init struct mm_struct *poking_mm;
-extern __ro_after_init unsigned long poking_addr;
+extern __ro_after_init struct mm_struct *text_poke_mm;
+extern __ro_after_init unsigned long text_poke_mm_addr;
#ifndef CONFIG_UML_X86
static __always_inline
@@ -142,13 +142,14 @@ static __always_inline
void int3_emulate_push(struct pt_regs *regs, unsigned long val)
{
/*
- * The int3 handler in entry_64.S adds a gap between the
+ * The INT3 handler in entry_64.S adds a gap between the
* stack where the break point happened, and the saving of
* pt_regs. We can extend the original stack because of
- * this gap. See the idtentry macro's create_gap option.
+ * this gap. See the idtentry macro's X86_TRAP_BP logic.
*
- * Similarly entry_32.S will have a gap on the stack for (any) hardware
- * exception and pt_regs; see FIXUP_FRAME.
+ * Similarly, entry_32.S will have a gap on the stack for
+ * (any) hardware exception and pt_regs; see the
+ * FIXUP_FRAME macro.
*/
regs->sp -= sizeof(unsigned long);
*(unsigned long *)regs->sp = val;
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index c52f0133425b..c8a5ae35c871 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -26,8 +26,8 @@ extern unsigned long USER_PTR_MAX;
*/
static inline unsigned long __untagged_addr(unsigned long addr)
{
- asm (ALTERNATIVE("",
- "and " __percpu_arg([mask]) ", %[addr]", X86_FEATURE_LAM)
+ asm_inline (ALTERNATIVE("", "and " __percpu_arg([mask]) ", %[addr]",
+ X86_FEATURE_LAM)
: [addr] "+r" (addr)
: [mask] "m" (__my_cpu_var(tlbstate_untag_mask)));
@@ -54,7 +54,7 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
#endif
#define valid_user_address(x) \
- ((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
+ likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
/*
* Masking the user address is an alternative to a conditional
diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h
index c9b2ba7a9ec4..7000aeb59aa2 100644
--- a/arch/x86/include/asm/vdso/processor.h
+++ b/arch/x86/include/asm/vdso/processor.h
@@ -7,15 +7,15 @@
#ifndef __ASSEMBLER__
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static __always_inline void rep_nop(void)
+/* PAUSE is a good thing to insert into busy-wait loops. */
+static __always_inline void native_pause(void)
{
- asm volatile("rep; nop" ::: "memory");
+ asm volatile("pause" ::: "memory");
}
static __always_inline void cpu_relax(void)
{
- rep_nop();
+ native_pause();
}
struct getcpu_cache;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index bf82c6f7d690..ddbc303e41e3 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1,36 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "SMP alternatives: " fmt
-#include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/mmu_context.h>
#include <linux/perf_event.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/stringify.h>
-#include <linux/highmem.h>
-#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/memory.h>
-#include <linux/stop_machine.h>
-#include <linux/slab.h>
-#include <linux/kdebug.h>
-#include <linux/kprobes.h>
-#include <linux/mmu_context.h>
-#include <linux/bsearch.h>
-#include <linux/sync_core.h>
+
#include <asm/text-patching.h>
-#include <asm/alternative.h>
-#include <asm/sections.h>
-#include <asm/mce.h>
-#include <asm/nmi.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <asm/insn.h>
-#include <asm/io.h>
-#include <asm/fixmap.h>
-#include <asm/paravirt.h>
-#include <asm/asm-prototypes.h>
-#include <asm/cfi.h>
+#include <asm/nmi.h>
int __read_mostly alternatives_patched;
@@ -171,13 +149,6 @@ static void add_nop(u8 *buf, unsigned int len)
*buf = INT3_INSN_OPCODE;
}
-extern s32 __retpoline_sites[], __retpoline_sites_end[];
-extern s32 __return_sites[], __return_sites_end[];
-extern s32 __cfi_sites[], __cfi_sites_end[];
-extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
-extern s32 __smp_locks[], __smp_locks_end[];
-void text_poke_early(void *addr, const void *opcode, size_t len);
-
/*
* Matches NOP and NOPL, not any of the other possible NOPs.
*/
@@ -369,7 +340,7 @@ static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen,
}
}
-void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
+void text_poke_apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
__apply_relocation(buf, instr, instrlen, repl, repl_len);
optimize_nops(instr, buf, instrlen);
@@ -525,7 +496,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
insn_buff[insn_buff_sz] = 0x90;
- apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
+ text_poke_apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
@@ -2010,7 +1981,7 @@ __visible noinline void __init __alt_reloc_selftest(void *arg)
static noinline void __init alt_reloc_selftest(void)
{
/*
- * Tests apply_relocation().
+ * Tests text_poke_apply_relocation().
*
* This has a relative immediate (CALL) in a place other than the first
* instruction and additionally on x86_64 we get a RIP-relative LEA:
@@ -2140,76 +2111,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
}
}
-typedef struct {
- struct mm_struct *mm;
-} temp_mm_state_t;
-
-/*
- * Using a temporary mm allows to set temporary mappings that are not accessible
- * by other CPUs. Such mappings are needed to perform sensitive memory writes
- * that override the kernel memory protections (e.g., W^X), without exposing the
- * temporary page-table mappings that are required for these write operations to
- * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
- * mapping is torn down.
- *
- * Context: The temporary mm needs to be used exclusively by a single core. To
- * harden security IRQs must be disabled while the temporary mm is
- * loaded, thereby preventing interrupt handler bugs from overriding
- * the kernel memory protection.
- */
-static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
-{
- temp_mm_state_t temp_state;
-
- lockdep_assert_irqs_disabled();
-
- /*
- * Make sure not to be in TLB lazy mode, as otherwise we'll end up
- * with a stale address space WITHOUT being in lazy mode after
- * restoring the previous mm.
- */
- if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
- leave_mm();
-
- temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
- switch_mm_irqs_off(NULL, mm, current);
-
- /*
- * If breakpoints are enabled, disable them while the temporary mm is
- * used. Userspace might set up watchpoints on addresses that are used
- * in the temporary mm, which would lead to wrong signals being sent or
- * crashes.
- *
- * Note that breakpoints are not disabled selectively, which also causes
- * kernel breakpoints (e.g., perf's) to be disabled. This might be
- * undesirable, but still seems reasonable as the code that runs in the
- * temporary mm should be short.
- */
- if (hw_breakpoint_active())
- hw_breakpoint_disable();
-
- return temp_state;
-}
-
-__ro_after_init struct mm_struct *poking_mm;
-__ro_after_init unsigned long poking_addr;
-
-static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
-{
- lockdep_assert_irqs_disabled();
-
- switch_mm_irqs_off(NULL, prev_state.mm, current);
-
- /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
- cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
-
- /*
- * Restore the breakpoints if they were disabled before the temporary mm
- * was loaded.
- */
- if (hw_breakpoint_active())
- hw_breakpoint_restore();
-}
+__ro_after_init struct mm_struct *text_poke_mm;
+__ro_after_init unsigned long text_poke_mm_addr;
static void text_poke_memcpy(void *dst, const void *src, size_t len)
{
@@ -2229,7 +2132,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
{
bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
struct page *pages[2] = {NULL};
- temp_mm_state_t prev;
+ struct mm_struct *prev_mm;
unsigned long flags;
pte_t pte, *ptep;
spinlock_t *ptl;
@@ -2266,7 +2169,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
/*
* The lock is not really needed, but this allows to avoid open-coding.
*/
- ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+ ptep = get_locked_pte(text_poke_mm, text_poke_mm_addr, &ptl);
/*
* This must not fail; preallocated in poking_init().
@@ -2276,21 +2179,21 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
local_irq_save(flags);
pte = mk_pte(pages[0], pgprot);
- set_pte_at(poking_mm, poking_addr, ptep, pte);
+ set_pte_at(text_poke_mm, text_poke_mm_addr, ptep, pte);
if (cross_page_boundary) {
pte = mk_pte(pages[1], pgprot);
- set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
+ set_pte_at(text_poke_mm, text_poke_mm_addr + PAGE_SIZE, ptep + 1, pte);
}
/*
* Loading the temporary mm behaves as a compiler barrier, which
* guarantees that the PTE will be set at the time memcpy() is done.
*/
- prev = use_temporary_mm(poking_mm);
+ prev_mm = use_temporary_mm(text_poke_mm);
kasan_disable_current();
- func((u8 *)poking_addr + offset_in_page(addr), src, len);
+ func((u8 *)text_poke_mm_addr + offset_in_page(addr), src, len);
kasan_enable_current();
/*
@@ -2299,22 +2202,22 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l
*/
barrier();
- pte_clear(poking_mm, poking_addr, ptep);
+ pte_clear(text_poke_mm, text_poke_mm_addr, ptep);
if (cross_page_boundary)
- pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
+ pte_clear(text_poke_mm, text_poke_mm_addr + PAGE_SIZE, ptep + 1);
/*
* Loading the previous page-table hierarchy requires a serializing
* instruction that already allows the core to see the updated version.
* Xen-PV is assumed to serialize execution in a similar manner.
*/
- unuse_temporary_mm(prev);
+ unuse_temporary_mm(prev_mm);
/*
* Flushing the TLB might involve IPIs, which would require enabled
* IRQs, but not if the mm is not used, as it is in this point.
*/
- flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
+ flush_tlb_mm_range(text_poke_mm, text_poke_mm_addr, text_poke_mm_addr +
(cross_page_boundary ? 2 : 1) * PAGE_SIZE,
PAGE_SHIFT, false);
@@ -2450,7 +2353,7 @@ static void do_sync_core(void *info)
sync_core();
}
-void text_poke_sync(void)
+void smp_text_poke_sync_each_cpu(void)
{
on_each_cpu(do_sync_core, NULL, 1);
}
@@ -2460,64 +2363,66 @@ void text_poke_sync(void)
* this thing. When len == 6 everything is prefixed with 0x0f and we map
* opcode to Jcc.d8, using len to distinguish.
*/
-struct text_poke_loc {
+struct smp_text_poke_loc {
/* addr := _stext + rel_addr */
s32 rel_addr;
s32 disp;
u8 len;
u8 opcode;
- const u8 text[POKE_MAX_OPCODE_SIZE];
- /* see text_poke_bp_batch() */
+ const u8 text[TEXT_POKE_MAX_OPCODE_SIZE];
+ /* see smp_text_poke_batch_finish() */
u8 old;
};
-struct bp_patching_desc {
- struct text_poke_loc *vec;
+#define TEXT_POKE_ARRAY_MAX (PAGE_SIZE / sizeof(struct smp_text_poke_loc))
+
+static struct smp_text_poke_array {
+ struct smp_text_poke_loc vec[TEXT_POKE_ARRAY_MAX];
int nr_entries;
- atomic_t refs;
-};
+} text_poke_array;
-static struct bp_patching_desc bp_desc;
+static DEFINE_PER_CPU(atomic_t, text_poke_array_refs);
-static __always_inline
-struct bp_patching_desc *try_get_desc(void)
+/*
+ * These four __always_inline annotations imply noinstr, necessary
+ * due to smp_text_poke_int3_handler() being noinstr:
+ */
+
+static __always_inline bool try_get_text_poke_array(void)
{
- struct bp_patching_desc *desc = &bp_desc;
+ atomic_t *refs = this_cpu_ptr(&text_poke_array_refs);
- if (!raw_atomic_inc_not_zero(&desc->refs))
- return NULL;
+ if (!raw_atomic_inc_not_zero(refs))
+ return false;
- return desc;
+ return true;
}
-static __always_inline void put_desc(void)
+static __always_inline void put_text_poke_array(void)
{
- struct bp_patching_desc *desc = &bp_desc;
+ atomic_t *refs = this_cpu_ptr(&text_poke_array_refs);
smp_mb__before_atomic();
- raw_atomic_dec(&desc->refs);
+ raw_atomic_dec(refs);
}
-static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
+static __always_inline void *text_poke_addr(const struct smp_text_poke_loc *tpl)
{
- return _stext + tp->rel_addr;
+ return _stext + tpl->rel_addr;
}
-static __always_inline int patch_cmp(const void *key, const void *elt)
+static __always_inline int patch_cmp(const void *tpl_a, const void *tpl_b)
{
- struct text_poke_loc *tp = (struct text_poke_loc *) elt;
-
- if (key < text_poke_addr(tp))
+ if (tpl_a < text_poke_addr(tpl_b))
return -1;
- if (key > text_poke_addr(tp))
+ if (tpl_a > text_poke_addr(tpl_b))
return 1;
return 0;
}
-noinstr int poke_int3_handler(struct pt_regs *regs)
+noinstr int smp_text_poke_int3_handler(struct pt_regs *regs)
{
- struct bp_patching_desc *desc;
- struct text_poke_loc *tp;
+ struct smp_text_poke_loc *tpl;
int ret = 0;
void *ip;
@@ -2526,41 +2431,40 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
/*
* Having observed our INT3 instruction, we now must observe
- * bp_desc with non-zero refcount:
+ * text_poke_array with non-zero refcount:
*
- * bp_desc.refs = 1 INT3
- * WMB RMB
- * write INT3 if (bp_desc.refs != 0)
+ * text_poke_array_refs = 1 INT3
+ * WMB RMB
+ * write INT3 if (text_poke_array_refs != 0)
*/
smp_rmb();
- desc = try_get_desc();
- if (!desc)
+ if (!try_get_text_poke_array())
return 0;
/*
- * Discount the INT3. See text_poke_bp_batch().
+ * Discount the INT3. See smp_text_poke_batch_finish().
*/
ip = (void *) regs->ip - INT3_INSN_SIZE;
/*
* Skip the binary search if there is a single member in the vector.
*/
- if (unlikely(desc->nr_entries > 1)) {
- tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
- sizeof(struct text_poke_loc),
+ if (unlikely(text_poke_array.nr_entries > 1)) {
+ tpl = __inline_bsearch(ip, text_poke_array.vec, text_poke_array.nr_entries,
+ sizeof(struct smp_text_poke_loc),
patch_cmp);
- if (!tp)
+ if (!tpl)
goto out_put;
} else {
- tp = desc->vec;
- if (text_poke_addr(tp) != ip)
+ tpl = text_poke_array.vec;
+ if (text_poke_addr(tpl) != ip)
goto out_put;
}
- ip += tp->len;
+ ip += tpl->len;
- switch (tp->opcode) {
+ switch (tpl->opcode) {
case INT3_INSN_OPCODE:
/*
* Someone poked an explicit INT3, they'll want to handle it,
@@ -2573,16 +2477,16 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
break;
case CALL_INSN_OPCODE:
- int3_emulate_call(regs, (long)ip + tp->disp);
+ int3_emulate_call(regs, (long)ip + tpl->disp);
break;
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
- int3_emulate_jmp(regs, (long)ip + tp->disp);
+ int3_emulate_jmp(regs, (long)ip + tpl->disp);
break;
case 0x70 ... 0x7f: /* Jcc */
- int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
+ int3_emulate_jcc(regs, tpl->opcode & 0xf, (long)ip, tpl->disp);
break;
default:
@@ -2592,51 +2496,50 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
ret = 1;
out_put:
- put_desc();
+ put_text_poke_array();
return ret;
}
-#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
-static struct text_poke_loc tp_vec[TP_VEC_MAX];
-static int tp_vec_nr;
-
/**
- * text_poke_bp_batch() -- update instructions on live kernel on SMP
- * @tp: vector of instructions to patch
- * @nr_entries: number of entries in the vector
+ * smp_text_poke_batch_finish() -- update instructions on live kernel on SMP
*
- * Modify multi-byte instruction by using int3 breakpoint on SMP.
- * We completely avoid stop_machine() here, and achieve the
- * synchronization using int3 breakpoint.
+ * Input state:
+ * text_poke_array.vec: vector of instructions to patch
+ * text_poke_array.nr_entries: number of entries in the vector
+ *
+ * Modify multi-byte instructions by using INT3 breakpoints on SMP.
+ * We completely avoid using stop_machine() here, and achieve the
+ * synchronization using INT3 breakpoints and SMP cross-calls.
*
* The way it is done:
* - For each entry in the vector:
- * - add a int3 trap to the address that will be patched
- * - sync cores
+ * - add an INT3 trap to the address that will be patched
+ * - SMP sync all CPUs
* - For each entry in the vector:
* - update all but the first byte of the patched range
- * - sync cores
+ * - SMP sync all CPUs
* - For each entry in the vector:
- * - replace the first byte (int3) by the first byte of
+ * - replace the first byte (INT3) by the first byte of the
* replacing opcode
- * - sync cores
+ * - SMP sync all CPUs
*/
-static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
+void smp_text_poke_batch_finish(void)
{
unsigned char int3 = INT3_INSN_OPCODE;
unsigned int i;
int do_sync;
- lockdep_assert_held(&text_mutex);
+ if (!text_poke_array.nr_entries)
+ return;
- bp_desc.vec = tp;
- bp_desc.nr_entries = nr_entries;
+ lockdep_assert_held(&text_mutex);
/*
- * Corresponds to the implicit memory barrier in try_get_desc() to
- * ensure reading a non-zero refcount provides up to date bp_desc data.
+ * Corresponds to the implicit memory barrier in try_get_text_poke_array() to
+ * ensure reading a non-zero refcount provides up to date text_poke_array data.
*/
- atomic_set_release(&bp_desc.refs, 1);
+ for_each_possible_cpu(i)
+ atomic_set_release(per_cpu_ptr(&text_poke_array_refs, i), 1);
/*
* Function tracing can enable thousands of places that need to be
@@ -2649,33 +2552,33 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
cond_resched();
/*
- * Corresponding read barrier in int3 notifier for making sure the
- * nr_entries and handler are correctly ordered wrt. patching.
+ * Corresponding read barrier in INT3 notifier for making sure the
+ * text_poke_array.nr_entries and handler are correctly ordered wrt. patching.
*/
smp_wmb();
/*
- * First step: add a int3 trap to the address that will be patched.
+ * First step: add a INT3 trap to the address that will be patched.
*/
- for (i = 0; i < nr_entries; i++) {
- tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
- text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
+ for (i = 0; i < text_poke_array.nr_entries; i++) {
+ text_poke_array.vec[i].old = *(u8 *)text_poke_addr(&text_poke_array.vec[i]);
+ text_poke(text_poke_addr(&text_poke_array.vec[i]), &int3, INT3_INSN_SIZE);
}
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
/*
* Second step: update all but the first byte of the patched range.
*/
- for (do_sync = 0, i = 0; i < nr_entries; i++) {
- u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
- u8 _new[POKE_MAX_OPCODE_SIZE+1];
- const u8 *new = tp[i].text;
- int len = tp[i].len;
+ for (do_sync = 0, i = 0; i < text_poke_array.nr_entries; i++) {
+ u8 old[TEXT_POKE_MAX_OPCODE_SIZE+1] = { text_poke_array.vec[i].old, };
+ u8 _new[TEXT_POKE_MAX_OPCODE_SIZE+1];
+ const u8 *new = text_poke_array.vec[i].text;
+ int len = text_poke_array.vec[i].len;
if (len - INT3_INSN_SIZE > 0) {
memcpy(old + INT3_INSN_SIZE,
- text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+ text_poke_addr(&text_poke_array.vec[i]) + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
if (len == 6) {
@@ -2684,7 +2587,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
new = _new;
}
- text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+ text_poke(text_poke_addr(&text_poke_array.vec[i]) + INT3_INSN_SIZE,
new + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
@@ -2715,7 +2618,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* The old instruction is recorded so that the event can be
* processed forwards or backwards.
*/
- perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
+ perf_event_text_poke(text_poke_addr(&text_poke_array.vec[i]), old, len, new, len);
}
if (do_sync) {
@@ -2724,63 +2627,79 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* not necessary and we'd be safe even without it. But
* better safe than sorry (plus there's not only Intel).
*/
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
}
/*
- * Third step: replace the first byte (int3) by the first byte of
+ * Third step: replace the first byte (INT3) by the first byte of the
* replacing opcode.
*/
- for (do_sync = 0, i = 0; i < nr_entries; i++) {
- u8 byte = tp[i].text[0];
+ for (do_sync = 0, i = 0; i < text_poke_array.nr_entries; i++) {
+ u8 byte = text_poke_array.vec[i].text[0];
- if (tp[i].len == 6)
+ if (text_poke_array.vec[i].len == 6)
byte = 0x0f;
if (byte == INT3_INSN_OPCODE)
continue;
- text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
+ text_poke(text_poke_addr(&text_poke_array.vec[i]), &byte, INT3_INSN_SIZE);
do_sync++;
}
if (do_sync)
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
/*
* Remove and wait for refs to be zero.
+ *
+ * Notably, if after step-3 above the INT3 got removed, then the
+ * smp_text_poke_sync_each_cpu() will have serialized against any running INT3
+ * handlers and the below spin-wait will not happen.
+ *
+ * IOW. unless the replacement instruction is INT3, this case goes
+ * unused.
*/
- if (!atomic_dec_and_test(&bp_desc.refs))
- atomic_cond_read_acquire(&bp_desc.refs, !VAL);
+ for_each_possible_cpu(i) {
+ atomic_t *refs = per_cpu_ptr(&text_poke_array_refs, i);
+
+ if (unlikely(!atomic_dec_and_test(refs)))
+ atomic_cond_read_acquire(refs, !VAL);
+ }
+
+ /* They are all completed: */
+ text_poke_array.nr_entries = 0;
}
-static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
- const void *opcode, size_t len, const void *emulate)
+static void __smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate)
{
+ struct smp_text_poke_loc *tpl;
struct insn insn;
int ret, i = 0;
+ tpl = &text_poke_array.vec[text_poke_array.nr_entries++];
+
if (len == 6)
i = 1;
- memcpy((void *)tp->text, opcode+i, len-i);
+ memcpy((void *)tpl->text, opcode+i, len-i);
if (!emulate)
emulate = opcode;
ret = insn_decode_kernel(&insn, emulate);
BUG_ON(ret < 0);
- tp->rel_addr = addr - (void *)_stext;
- tp->len = len;
- tp->opcode = insn.opcode.bytes[0];
+ tpl->rel_addr = addr - (void *)_stext;
+ tpl->len = len;
+ tpl->opcode = insn.opcode.bytes[0];
if (is_jcc32(&insn)) {
/*
* Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
*/
- tp->opcode = insn.opcode.bytes[1] - 0x10;
+ tpl->opcode = insn.opcode.bytes[1] - 0x10;
}
- switch (tp->opcode) {
+ switch (tpl->opcode) {
case RET_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
@@ -2789,14 +2708,14 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
* next instruction can be padded with INT3.
*/
for (i = insn.length; i < len; i++)
- BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
+ BUG_ON(tpl->text[i] != INT3_INSN_OPCODE);
break;
default:
BUG_ON(len != insn.length);
}
- switch (tp->opcode) {
+ switch (tpl->opcode) {
case INT3_INSN_OPCODE:
case RET_INSN_OPCODE:
break;
@@ -2805,21 +2724,21 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
case 0x70 ... 0x7f: /* Jcc */
- tp->disp = insn.immediate.value;
+ tpl->disp = insn.immediate.value;
break;
default: /* assume NOP */
switch (len) {
case 2: /* NOP2 -- emulate as JMP8+0 */
BUG_ON(memcmp(emulate, x86_nops[len], len));
- tp->opcode = JMP8_INSN_OPCODE;
- tp->disp = 0;
+ tpl->opcode = JMP8_INSN_OPCODE;
+ tpl->disp = 0;
break;
case 5: /* NOP5 -- emulate as JMP32+0 */
BUG_ON(memcmp(emulate, x86_nops[len], len));
- tp->opcode = JMP32_INSN_OPCODE;
- tp->disp = 0;
+ tpl->opcode = JMP32_INSN_OPCODE;
+ tpl->disp = 0;
break;
default: /* unknown instruction */
@@ -2830,51 +2749,50 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
}
/*
- * We hard rely on the tp_vec being ordered; ensure this is so by flushing
+ * We hard rely on the text_poke_array.vec being ordered; ensure this is so by flushing
* early if needed.
*/
-static bool tp_order_fail(void *addr)
+static bool text_poke_addr_ordered(void *addr)
{
- struct text_poke_loc *tp;
-
- if (!tp_vec_nr)
- return false;
+ WARN_ON_ONCE(!addr);
- if (!addr) /* force */
+ if (!text_poke_array.nr_entries)
return true;
- tp = &tp_vec[tp_vec_nr - 1];
- if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
- return true;
-
- return false;
-}
-
-static void text_poke_flush(void *addr)
-{
- if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
- text_poke_bp_batch(tp_vec, tp_vec_nr);
- tp_vec_nr = 0;
- }
-}
+ /*
+ * If the last current entry's address is higher than the
+ * new entry's address we'd like to add, then ordering
+ * is violated and we must first flush all pending patching
+ * requests:
+ */
+ if (text_poke_addr(text_poke_array.vec + text_poke_array.nr_entries-1) > addr)
+ return false;
-void text_poke_finish(void)
-{
- text_poke_flush(NULL);
+ return true;
}
-void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
+/**
+ * smp_text_poke_batch_add() -- update instruction on live kernel on SMP, batched
+ * @addr: address to patch
+ * @opcode: opcode of new instruction
+ * @len: length to copy
+ * @emulate: instruction to be emulated
+ *
+ * Add a new instruction to the current queue of to-be-patched instructions
+ * the kernel maintains. The patching request will not be executed immediately,
+ * but becomes part of an array of patching requests, optimized for batched
+ * execution. All pending patching requests will be executed on the next
+ * smp_text_poke_batch_finish() call.
+ */
+void __ref smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate)
{
- struct text_poke_loc *tp;
-
- text_poke_flush(addr);
-
- tp = &tp_vec[tp_vec_nr++];
- text_poke_loc_init(tp, addr, opcode, len, emulate);
+ if (text_poke_array.nr_entries == TEXT_POKE_ARRAY_MAX || !text_poke_addr_ordered(addr))
+ smp_text_poke_batch_finish();
+ __smp_text_poke_batch_add(addr, opcode, len, emulate);
}
/**
- * text_poke_bp() -- update instructions on live kernel on SMP
+ * smp_text_poke_single() -- update instruction on live kernel on SMP immediately
* @addr: address to patch
* @opcode: opcode of new instruction
* @len: length to copy
@@ -2882,12 +2800,11 @@ void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const voi
*
* Update a single instruction with the vector in the stack, avoiding
* dynamically allocated memory. This function should be used when it is
- * not possible to allocate memory.
+ * not possible to allocate memory for a vector. The single instruction
+ * is patched in immediately.
*/
-void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
+void __ref smp_text_poke_single(void *addr, const void *opcode, size_t len, const void *emulate)
{
- struct text_poke_loc tp;
-
- text_poke_loc_init(&tp, addr, opcode, len, emulate);
- text_poke_bp_batch(&tp, 1);
+ __smp_text_poke_batch_add(addr, opcode, len, emulate);
+ smp_text_poke_batch_finish();
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index eebc360ed1bb..ba5a4ccda37a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1486,7 +1486,7 @@ static void __init delay_with_tsc(void)
* 1 GHz == 40 jiffies
*/
do {
- rep_nop();
+ native_pause();
now = rdtsc();
} while ((now - start) < 40000000000ULL / HZ && time_before_eq(jiffies, end));
}
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index d86d7d6e750c..a951333c5995 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -185,7 +185,7 @@ static void *patch_dest(void *dest, bool direct)
u8 *pad = dest - tsize;
memcpy(insn_buff, skl_call_thunk_template, tsize);
- apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
+ text_poke_apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
/* Already patched? */
if (!bcmp(pad, insn_buff, tsize))
@@ -294,7 +294,7 @@ static bool is_callthunk(void *addr)
pad = (void *)(dest - tmpl_size);
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
+ text_poke_apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
return !bcmp(pad, insn_buff, tmpl_size);
}
@@ -312,7 +312,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
return 0;
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
+ text_poke_apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
memcpy(*pprog, insn_buff, tmpl_size);
*pprog += tmpl_size;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 606c0cb97e86..246a76ddbf46 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -34,21 +34,63 @@
#include "cpu.h"
+/*
+ * Speculation Vulnerability Handling
+ *
+ * Each vulnerability is handled with the following functions:
+ * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
+ * take into account all relevant command line
+ * options.
+ * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
+ * selected a mitigation, in case the selection
+ * may want to change based on other choices
+ * made. This function is optional.
+ * <vuln>_apply_mitigation() -- Enable the selected mitigation.
+ *
+ * The compile-time mitigation in all cases should be AUTO. An explicit
+ * command-line option can override AUTO. If no such option is
+ * provided, <vuln>_select_mitigation() will override AUTO to the best
+ * mitigation option.
+ */
+
static void __init spectre_v1_select_mitigation(void);
+static void __init spectre_v1_apply_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
+static void __init spectre_v2_update_mitigation(void);
+static void __init spectre_v2_apply_mitigation(void);
static void __init retbleed_select_mitigation(void);
+static void __init retbleed_update_mitigation(void);
+static void __init retbleed_apply_mitigation(void);
static void __init spectre_v2_user_select_mitigation(void);
+static void __init spectre_v2_user_update_mitigation(void);
+static void __init spectre_v2_user_apply_mitigation(void);
static void __init ssb_select_mitigation(void);
+static void __init ssb_apply_mitigation(void);
static void __init l1tf_select_mitigation(void);
+static void __init l1tf_apply_mitigation(void);
static void __init mds_select_mitigation(void);
-static void __init md_clear_update_mitigation(void);
-static void __init md_clear_select_mitigation(void);
+static void __init mds_update_mitigation(void);
+static void __init mds_apply_mitigation(void);
static void __init taa_select_mitigation(void);
+static void __init taa_update_mitigation(void);
+static void __init taa_apply_mitigation(void);
static void __init mmio_select_mitigation(void);
+static void __init mmio_update_mitigation(void);
+static void __init mmio_apply_mitigation(void);
+static void __init rfds_select_mitigation(void);
+static void __init rfds_update_mitigation(void);
+static void __init rfds_apply_mitigation(void);
static void __init srbds_select_mitigation(void);
+static void __init srbds_apply_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void);
+static void __init srso_update_mitigation(void);
+static void __init srso_apply_mitigation(void);
static void __init gds_select_mitigation(void);
+static void __init gds_apply_mitigation(void);
+static void __init bhi_select_mitigation(void);
+static void __init bhi_update_mitigation(void);
+static void __init bhi_apply_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
@@ -127,9 +169,13 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
*/
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
-/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
-DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
-EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
+/*
+ * Controls CPU Fill buffer clear before VMenter. This is a subset of
+ * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
+ * mitigation is required.
+ */
+DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
+EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
void __init cpu_select_mitigations(void)
{
@@ -154,30 +200,60 @@ void __init cpu_select_mitigations(void)
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
- /*
- * retbleed_select_mitigation() relies on the state set by
- * spectre_v2_select_mitigation(); specifically it wants to know about
- * spectre_v2=ibrs.
- */
retbleed_select_mitigation();
- /*
- * spectre_v2_user_select_mitigation() relies on the state set by
- * retbleed_select_mitigation(); specifically the STIBP selection is
- * forced for UNRET or IBPB.
- */
spectre_v2_user_select_mitigation();
ssb_select_mitigation();
l1tf_select_mitigation();
- md_clear_select_mitigation();
+ mds_select_mitigation();
+ taa_select_mitigation();
+ mmio_select_mitigation();
+ rfds_select_mitigation();
srbds_select_mitigation();
l1d_flush_select_mitigation();
+ srso_select_mitigation();
+ gds_select_mitigation();
+ bhi_select_mitigation();
/*
- * srso_select_mitigation() depends and must run after
- * retbleed_select_mitigation().
+ * After mitigations are selected, some may need to update their
+ * choices.
*/
- srso_select_mitigation();
- gds_select_mitigation();
+ spectre_v2_update_mitigation();
+ /*
+ * retbleed_update_mitigation() relies on the state set by
+ * spectre_v2_update_mitigation(); specifically it wants to know about
+ * spectre_v2=ibrs.
+ */
+ retbleed_update_mitigation();
+
+ /*
+ * spectre_v2_user_update_mitigation() depends on
+ * retbleed_update_mitigation(), specifically the STIBP
+ * selection is forced for UNRET or IBPB.
+ */
+ spectre_v2_user_update_mitigation();
+ mds_update_mitigation();
+ taa_update_mitigation();
+ mmio_update_mitigation();
+ rfds_update_mitigation();
+ bhi_update_mitigation();
+ /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
+ srso_update_mitigation();
+
+ spectre_v1_apply_mitigation();
+ spectre_v2_apply_mitigation();
+ retbleed_apply_mitigation();
+ spectre_v2_user_apply_mitigation();
+ ssb_apply_mitigation();
+ l1tf_apply_mitigation();
+ mds_apply_mitigation();
+ taa_apply_mitigation();
+ mmio_apply_mitigation();
+ rfds_apply_mitigation();
+ srbds_apply_mitigation();
+ srso_apply_mitigation();
+ gds_apply_mitigation();
+ bhi_apply_mitigation();
}
/*
@@ -280,6 +356,12 @@ enum rfds_mitigations {
static enum rfds_mitigations rfds_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
+/*
+ * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
+ * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
+ */
+static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
+
static void __init mds_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
@@ -290,12 +372,34 @@ static void __init mds_select_mitigation(void)
if (mds_mitigation == MDS_MITIGATION_AUTO)
mds_mitigation = MDS_MITIGATION_FULL;
+ if (mds_mitigation == MDS_MITIGATION_OFF)
+ return;
+
+ verw_clear_cpu_buf_mitigation_selected = true;
+}
+
+static void __init mds_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
+ return;
+
+ /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
+ if (verw_clear_cpu_buf_mitigation_selected)
+ mds_mitigation = MDS_MITIGATION_FULL;
+
if (mds_mitigation == MDS_MITIGATION_FULL) {
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
mds_mitigation = MDS_MITIGATION_VMWERV;
+ }
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+}
+static void __init mds_apply_mitigation(void)
+{
+ if (mds_mitigation == MDS_MITIGATION_FULL ||
+ mds_mitigation == MDS_MITIGATION_VMWERV) {
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
(mds_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
@@ -335,6 +439,11 @@ static const char * const taa_strings[] = {
[TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
};
+static bool __init taa_vulnerable(void)
+{
+ return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
+}
+
static void __init taa_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
@@ -348,48 +457,63 @@ static void __init taa_select_mitigation(void)
return;
}
- if (cpu_mitigations_off()) {
+ if (cpu_mitigations_off())
taa_mitigation = TAA_MITIGATION_OFF;
- return;
- }
- /*
- * TAA mitigation via VERW is turned off if both
- * tsx_async_abort=off and mds=off are specified.
- */
- if (taa_mitigation == TAA_MITIGATION_OFF &&
- mds_mitigation == MDS_MITIGATION_OFF)
+ /* Microcode will be checked in taa_update_mitigation(). */
+ if (taa_mitigation == TAA_MITIGATION_AUTO)
+ taa_mitigation = TAA_MITIGATION_VERW;
+
+ if (taa_mitigation != TAA_MITIGATION_OFF)
+ verw_clear_cpu_buf_mitigation_selected = true;
+}
+
+static void __init taa_update_mitigation(void)
+{
+ if (!taa_vulnerable() || cpu_mitigations_off())
return;
- if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ if (verw_clear_cpu_buf_mitigation_selected)
taa_mitigation = TAA_MITIGATION_VERW;
- else
- taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
- /*
- * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
- * A microcode update fixes this behavior to clear CPU buffers. It also
- * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
- * ARCH_CAP_TSX_CTRL_MSR bit.
- *
- * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
- * update is required.
- */
- if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
- !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
- taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+ if (taa_mitigation == TAA_MITIGATION_VERW) {
+ /* Check if the requisite ucode is available. */
+ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
- /*
- * TSX is enabled, select alternate mitigation for TAA which is
- * the same as MDS. Enable MDS static branch to clear CPU buffers.
- *
- * For guests that can't determine whether the correct microcode is
- * present on host, enable the mitigation for UCODE_NEEDED as well.
- */
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ /*
+ * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
+ * A microcode update fixes this behavior to clear CPU buffers. It also
+ * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
+ * ARCH_CAP_TSX_CTRL_MSR bit.
+ *
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
+ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+ }
- if (taa_nosmt || cpu_mitigations_auto_nosmt())
- cpu_smt_disable(false);
+ pr_info("%s\n", taa_strings[taa_mitigation]);
+}
+
+static void __init taa_apply_mitigation(void)
+{
+ if (taa_mitigation == TAA_MITIGATION_VERW ||
+ taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
+ /*
+ * TSX is enabled, select alternate mitigation for TAA which is
+ * the same as MDS. Enable MDS static branch to clear CPU buffers.
+ *
+ * For guests that can't determine whether the correct microcode is
+ * present on host, enable the mitigation for UCODE_NEEDED as well.
+ */
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+
+ if (taa_nosmt || cpu_mitigations_auto_nosmt())
+ cpu_smt_disable(false);
+ }
}
static int __init tsx_async_abort_parse_cmdline(char *str)
@@ -432,25 +556,62 @@ static void __init mmio_select_mitigation(void)
return;
}
+ /* Microcode will be checked in mmio_update_mitigation(). */
+ if (mmio_mitigation == MMIO_MITIGATION_AUTO)
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return;
/*
* Enable CPU buffer clear mitigation for host and VMM, if also affected
- * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+ * by MDS or TAA.
*/
- if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
- boot_cpu_has(X86_FEATURE_RTM)))
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
+ verw_clear_cpu_buf_mitigation_selected = true;
+}
+
+static void __init mmio_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || cpu_mitigations_off())
+ return;
+
+ if (verw_clear_cpu_buf_mitigation_selected)
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+
+ if (mmio_mitigation == MMIO_MITIGATION_VERW) {
+ /*
+ * Check if the system has the right microcode.
+ *
+ * CPU Fill buffer clear mitigation is enumerated by either an explicit
+ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+ * affected systems.
+ */
+ if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
+ (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+ boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
+ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+ }
+
+ pr_info("%s\n", mmio_strings[mmio_mitigation]);
+}
+
+static void __init mmio_apply_mitigation(void)
+{
+ if (mmio_mitigation == MMIO_MITIGATION_OFF)
+ return;
/*
- * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
- * mitigations, disable KVM-only mitigation in that case.
+ * Only enable the VMM mitigation if the CPU buffer clear mitigation is
+ * not being used.
*/
- if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
- static_branch_disable(&mmio_stale_data_clear);
- else
- static_branch_enable(&mmio_stale_data_clear);
+ if (verw_clear_cpu_buf_mitigation_selected) {
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ static_branch_disable(&cpu_buf_vm_clear);
+ } else {
+ static_branch_enable(&cpu_buf_vm_clear);
+ }
/*
* If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
@@ -460,21 +621,6 @@ static void __init mmio_select_mitigation(void)
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
- /*
- * Check if the system has the right microcode.
- *
- * CPU Fill buffer clear mitigation is enumerated by either an explicit
- * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
- * affected systems.
- */
- if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
- (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
- boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
- !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
- mmio_mitigation = MMIO_MITIGATION_VERW;
- else
- mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
-
if (mmio_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
}
@@ -509,22 +655,48 @@ static const char * const rfds_strings[] = {
[RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
};
+static inline bool __init verw_clears_cpu_reg_file(void)
+{
+ return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
+}
+
static void __init rfds_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
rfds_mitigation = RFDS_MITIGATION_OFF;
return;
}
+
+ if (rfds_mitigation == RFDS_MITIGATION_AUTO)
+ rfds_mitigation = RFDS_MITIGATION_VERW;
+
if (rfds_mitigation == RFDS_MITIGATION_OFF)
return;
- if (rfds_mitigation == RFDS_MITIGATION_AUTO)
+ if (verw_clears_cpu_reg_file())
+ verw_clear_cpu_buf_mitigation_selected = true;
+}
+
+static void __init rfds_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off())
+ return;
+
+ if (verw_clear_cpu_buf_mitigation_selected)
rfds_mitigation = RFDS_MITIGATION_VERW;
- if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+ if (rfds_mitigation == RFDS_MITIGATION_VERW) {
+ if (!verw_clears_cpu_reg_file())
+ rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+ }
+
+ pr_info("%s\n", rfds_strings[rfds_mitigation]);
+}
+
+static void __init rfds_apply_mitigation(void)
+{
+ if (rfds_mitigation == RFDS_MITIGATION_VERW)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
- else
- rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
}
static __init int rfds_parse_cmdline(char *str)
@@ -545,74 +717,11 @@ static __init int rfds_parse_cmdline(char *str)
early_param("reg_file_data_sampling", rfds_parse_cmdline);
#undef pr_fmt
-#define pr_fmt(fmt) "" fmt
-
-static void __init md_clear_update_mitigation(void)
-{
- if (cpu_mitigations_off())
- return;
-
- if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
- goto out;
-
- /*
- * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
- * Stale Data mitigation, if necessary.
- */
- if (mds_mitigation == MDS_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_MDS)) {
- mds_mitigation = MDS_MITIGATION_FULL;
- mds_select_mitigation();
- }
- if (taa_mitigation == TAA_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_TAA)) {
- taa_mitigation = TAA_MITIGATION_VERW;
- taa_select_mitigation();
- }
- /*
- * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
- * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
- */
- if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
- mmio_mitigation = MMIO_MITIGATION_VERW;
- mmio_select_mitigation();
- }
- if (rfds_mitigation == RFDS_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_RFDS)) {
- rfds_mitigation = RFDS_MITIGATION_VERW;
- rfds_select_mitigation();
- }
-out:
- if (boot_cpu_has_bug(X86_BUG_MDS))
- pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
- if (boot_cpu_has_bug(X86_BUG_TAA))
- pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
- if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
- pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
- if (boot_cpu_has_bug(X86_BUG_RFDS))
- pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
-}
-
-static void __init md_clear_select_mitigation(void)
-{
- mds_select_mitigation();
- taa_select_mitigation();
- mmio_select_mitigation();
- rfds_select_mitigation();
-
- /*
- * As these mitigations are inter-related and rely on VERW instruction
- * to clear the microarchitural buffers, update and print their status
- * after mitigation selection is done for each of these vulnerabilities.
- */
- md_clear_update_mitigation();
-}
-
-#undef pr_fmt
#define pr_fmt(fmt) "SRBDS: " fmt
enum srbds_mitigations {
SRBDS_MITIGATION_OFF,
+ SRBDS_MITIGATION_AUTO,
SRBDS_MITIGATION_UCODE_NEEDED,
SRBDS_MITIGATION_FULL,
SRBDS_MITIGATION_TSX_OFF,
@@ -620,7 +729,7 @@ enum srbds_mitigations {
};
static enum srbds_mitigations srbds_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_FULL : SRBDS_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
static const char * const srbds_strings[] = {
[SRBDS_MITIGATION_OFF] = "Vulnerable",
@@ -671,8 +780,13 @@ void update_srbds_msr(void)
static void __init srbds_select_mitigation(void)
{
- if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS) || cpu_mitigations_off()) {
+ srbds_mitigation = SRBDS_MITIGATION_OFF;
return;
+ }
+
+ if (srbds_mitigation == SRBDS_MITIGATION_AUTO)
+ srbds_mitigation = SRBDS_MITIGATION_FULL;
/*
* Check to see if this is one of the MDS_NO systems supporting TSX that
@@ -686,13 +800,17 @@ static void __init srbds_select_mitigation(void)
srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
- else if (cpu_mitigations_off() || srbds_off)
+ else if (srbds_off)
srbds_mitigation = SRBDS_MITIGATION_OFF;
- update_srbds_msr();
pr_info("%s\n", srbds_strings[srbds_mitigation]);
}
+static void __init srbds_apply_mitigation(void)
+{
+ update_srbds_msr();
+}
+
static int __init srbds_parse_cmdline(char *str)
{
if (!str)
@@ -739,6 +857,7 @@ early_param("l1d_flush", l1d_flush_parse_cmdline);
enum gds_mitigations {
GDS_MITIGATION_OFF,
+ GDS_MITIGATION_AUTO,
GDS_MITIGATION_UCODE_NEEDED,
GDS_MITIGATION_FORCE,
GDS_MITIGATION_FULL,
@@ -747,7 +866,7 @@ enum gds_mitigations {
};
static enum gds_mitigations gds_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_FULL : GDS_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
static const char * const gds_strings[] = {
[GDS_MITIGATION_OFF] = "Vulnerable",
@@ -788,6 +907,7 @@ void update_gds_msr(void)
case GDS_MITIGATION_FORCE:
case GDS_MITIGATION_UCODE_NEEDED:
case GDS_MITIGATION_HYPERVISOR:
+ case GDS_MITIGATION_AUTO:
return;
}
@@ -811,26 +931,21 @@ static void __init gds_select_mitigation(void)
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
gds_mitigation = GDS_MITIGATION_HYPERVISOR;
- goto out;
+ return;
}
if (cpu_mitigations_off())
gds_mitigation = GDS_MITIGATION_OFF;
/* Will verify below that mitigation _can_ be disabled */
+ if (gds_mitigation == GDS_MITIGATION_AUTO)
+ gds_mitigation = GDS_MITIGATION_FULL;
+
/* No microcode */
if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
- if (gds_mitigation == GDS_MITIGATION_FORCE) {
- /*
- * This only needs to be done on the boot CPU so do it
- * here rather than in update_gds_msr()
- */
- setup_clear_cpu_cap(X86_FEATURE_AVX);
- pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
- } else {
+ if (gds_mitigation != GDS_MITIGATION_FORCE)
gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
- }
- goto out;
+ return;
}
/* Microcode has mitigation, use it */
@@ -851,9 +966,25 @@ static void __init gds_select_mitigation(void)
*/
gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
}
+}
+
+static void __init gds_apply_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_GDS))
+ return;
+
+ /* Microcode is present */
+ if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
+ update_gds_msr();
+ else if (gds_mitigation == GDS_MITIGATION_FORCE) {
+ /*
+ * This only needs to be done on the boot CPU so do it
+ * here rather than in update_gds_msr()
+ */
+ setup_clear_cpu_cap(X86_FEATURE_AVX);
+ pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
+ }
- update_gds_msr();
-out:
pr_info("%s\n", gds_strings[gds_mitigation]);
}
@@ -914,10 +1045,14 @@ static bool smap_works_speculatively(void)
static void __init spectre_v1_select_mitigation(void)
{
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off())
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+}
+
+static void __init spectre_v1_apply_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off())
return;
- }
if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
/*
@@ -972,6 +1107,7 @@ enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
enum retbleed_mitigation {
RETBLEED_MITIGATION_NONE,
+ RETBLEED_MITIGATION_AUTO,
RETBLEED_MITIGATION_UNRET,
RETBLEED_MITIGATION_IBPB,
RETBLEED_MITIGATION_IBRS,
@@ -979,14 +1115,6 @@ enum retbleed_mitigation {
RETBLEED_MITIGATION_STUFF,
};
-enum retbleed_mitigation_cmd {
- RETBLEED_CMD_OFF,
- RETBLEED_CMD_AUTO,
- RETBLEED_CMD_UNRET,
- RETBLEED_CMD_IBPB,
- RETBLEED_CMD_STUFF,
-};
-
static const char * const retbleed_strings[] = {
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
@@ -997,9 +1125,7 @@ static const char * const retbleed_strings[] = {
};
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
- RETBLEED_MITIGATION_NONE;
-static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_CMD_AUTO : RETBLEED_CMD_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
static int __ro_after_init retbleed_nosmt = false;
@@ -1016,15 +1142,15 @@ static int __init retbleed_parse_cmdline(char *str)
}
if (!strcmp(str, "off")) {
- retbleed_cmd = RETBLEED_CMD_OFF;
+ retbleed_mitigation = RETBLEED_MITIGATION_NONE;
} else if (!strcmp(str, "auto")) {
- retbleed_cmd = RETBLEED_CMD_AUTO;
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
} else if (!strcmp(str, "unret")) {
- retbleed_cmd = RETBLEED_CMD_UNRET;
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
} else if (!strcmp(str, "ibpb")) {
- retbleed_cmd = RETBLEED_CMD_IBPB;
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
} else if (!strcmp(str, "stuff")) {
- retbleed_cmd = RETBLEED_CMD_STUFF;
+ retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
} else if (!strcmp(str, "nosmt")) {
retbleed_nosmt = true;
} else if (!strcmp(str, "force")) {
@@ -1045,72 +1171,109 @@ early_param("retbleed", retbleed_parse_cmdline);
static void __init retbleed_select_mitigation(void)
{
- bool mitigate_smt = false;
-
- if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
- return;
-
- switch (retbleed_cmd) {
- case RETBLEED_CMD_OFF:
+ if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) {
+ retbleed_mitigation = RETBLEED_MITIGATION_NONE;
return;
+ }
- case RETBLEED_CMD_UNRET:
- if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
- } else {
+ switch (retbleed_mitigation) {
+ case RETBLEED_MITIGATION_UNRET:
+ if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
- goto do_cmd_auto;
}
break;
-
- case RETBLEED_CMD_IBPB:
+ case RETBLEED_MITIGATION_IBPB:
if (!boot_cpu_has(X86_FEATURE_IBPB)) {
pr_err("WARNING: CPU does not support IBPB.\n");
- goto do_cmd_auto;
- } else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
- retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
- } else {
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
+ } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
- goto do_cmd_auto;
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
}
break;
+ case RETBLEED_MITIGATION_STUFF:
+ if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
+ pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
+ } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+ pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
+ }
+ break;
+ default:
+ break;
+ }
- case RETBLEED_CMD_STUFF:
- if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) &&
- spectre_v2_enabled == SPECTRE_V2_RETPOLINE) {
- retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
+ if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
+ return;
- } else {
- if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))
- pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
- else
- pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
+ /* Intel mitigation selected in retbleed_update_mitigation() */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
+ boot_cpu_has(X86_FEATURE_IBPB))
+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ else
+ retbleed_mitigation = RETBLEED_MITIGATION_NONE;
+ }
+}
- goto do_cmd_auto;
- }
- break;
+static void __init retbleed_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
+ return;
+
+ if (retbleed_mitigation == RETBLEED_MITIGATION_NONE)
+ goto out;
-do_cmd_auto:
- case RETBLEED_CMD_AUTO:
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
- else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
- boot_cpu_has(X86_FEATURE_IBPB))
- retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+ /*
+ * retbleed=stuff is only allowed on Intel. If stuffing can't be used
+ * then a different mitigation will be selected below.
+ */
+ if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) {
+ if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) {
+ pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
+ retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
}
+ }
+ /*
+ * Let IBRS trump all on Intel without affecting the effects of the
+ * retbleed= cmdline option except for call depth based stuffing
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ switch (spectre_v2_enabled) {
+ case SPECTRE_V2_IBRS:
+ retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
+ break;
+ case SPECTRE_V2_EIBRS:
+ case SPECTRE_V2_EIBRS_RETPOLINE:
+ case SPECTRE_V2_EIBRS_LFENCE:
+ retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
+ break;
+ default:
+ if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
+ pr_err(RETBLEED_INTEL_MSG);
+ }
+ /* If nothing has set the mitigation yet, default to NONE. */
+ if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO)
+ retbleed_mitigation = RETBLEED_MITIGATION_NONE;
+ }
+out:
+ pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
+}
- /*
- * The Intel mitigation (IBRS or eIBRS) was already selected in
- * spectre_v2_select_mitigation(). 'retbleed_mitigation' will
- * be set accordingly below.
- */
- break;
- }
+static void __init retbleed_apply_mitigation(void)
+{
+ bool mitigate_smt = false;
switch (retbleed_mitigation) {
+ case RETBLEED_MITIGATION_NONE:
+ return;
+
case RETBLEED_MITIGATION_UNRET:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
@@ -1160,28 +1323,6 @@ do_cmd_auto:
if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
-
- /*
- * Let IBRS trump all on Intel without affecting the effects of the
- * retbleed= cmdline option except for call depth based stuffing
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- switch (spectre_v2_enabled) {
- case SPECTRE_V2_IBRS:
- retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
- break;
- case SPECTRE_V2_EIBRS:
- case SPECTRE_V2_EIBRS_RETPOLINE:
- case SPECTRE_V2_EIBRS_LFENCE:
- retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
- break;
- default:
- if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
- pr_err(RETBLEED_INTEL_MSG);
- }
- }
-
- pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
}
#undef pr_fmt
@@ -1261,6 +1402,8 @@ enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_IBRS,
};
+static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO;
+
enum spectre_v2_user_cmd {
SPECTRE_V2_USER_CMD_NONE,
SPECTRE_V2_USER_CMD_AUTO,
@@ -1299,31 +1442,18 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
}
-static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
-
-static enum spectre_v2_user_cmd __init
-spectre_v2_parse_user_cmdline(void)
+static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void)
{
- enum spectre_v2_user_cmd mode;
char arg[20];
int ret, i;
- mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
- SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
-
- switch (spectre_v2_cmd) {
- case SPECTRE_V2_CMD_NONE:
+ if (cpu_mitigations_off() || !IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2))
return SPECTRE_V2_USER_CMD_NONE;
- case SPECTRE_V2_CMD_FORCE:
- return SPECTRE_V2_USER_CMD_FORCE;
- default:
- break;
- }
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
arg, sizeof(arg));
if (ret < 0)
- return mode;
+ return SPECTRE_V2_USER_CMD_AUTO;
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
if (match_option(arg, ret, v2_user_options[i].option)) {
@@ -1334,7 +1464,7 @@ spectre_v2_parse_user_cmdline(void)
}
pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
- return mode;
+ return SPECTRE_V2_USER_CMD_AUTO;
}
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
@@ -1342,60 +1472,72 @@ static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
}
-static void __init
-spectre_v2_user_select_mitigation(void)
+static void __init spectre_v2_user_select_mitigation(void)
{
- enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
- enum spectre_v2_user_cmd cmd;
-
if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
return;
- cmd = spectre_v2_parse_user_cmdline();
- switch (cmd) {
+ switch (spectre_v2_parse_user_cmdline()) {
case SPECTRE_V2_USER_CMD_NONE:
- goto set_mode;
+ return;
case SPECTRE_V2_USER_CMD_FORCE:
- mode = SPECTRE_V2_USER_STRICT;
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
break;
case SPECTRE_V2_USER_CMD_AUTO:
case SPECTRE_V2_USER_CMD_PRCTL:
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
+ break;
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
- mode = SPECTRE_V2_USER_PRCTL;
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
break;
case SPECTRE_V2_USER_CMD_SECCOMP:
+ if (IS_ENABLED(CONFIG_SECCOMP))
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
+ else
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
+ spectre_v2_user_stibp = spectre_v2_user_ibpb;
+ break;
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
if (IS_ENABLED(CONFIG_SECCOMP))
- mode = SPECTRE_V2_USER_SECCOMP;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
else
- mode = SPECTRE_V2_USER_PRCTL;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
break;
}
- /* Initialize Indirect Branch Prediction Barrier */
- if (boot_cpu_has(X86_FEATURE_IBPB)) {
- static_branch_enable(&switch_vcpu_ibpb);
+ /*
+ * At this point, an STIBP mode other than "off" has been set.
+ * If STIBP support is not being forced, check if STIBP always-on
+ * is preferred.
+ */
+ if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
+ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+ spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
- spectre_v2_user_ibpb = mode;
- switch (cmd) {
- case SPECTRE_V2_USER_CMD_NONE:
- break;
- case SPECTRE_V2_USER_CMD_FORCE:
- case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
- case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
- static_branch_enable(&switch_mm_always_ibpb);
- spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
- break;
- case SPECTRE_V2_USER_CMD_PRCTL:
- case SPECTRE_V2_USER_CMD_AUTO:
- case SPECTRE_V2_USER_CMD_SECCOMP:
- static_branch_enable(&switch_mm_cond_ibpb);
- break;
- }
+ if (!boot_cpu_has(X86_FEATURE_IBPB))
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
- pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
- static_key_enabled(&switch_mm_always_ibpb) ?
- "always-on" : "conditional");
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
+ spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
+}
+
+static void __init spectre_v2_user_update_mitigation(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
+ return;
+
+ /* The spectre_v2 cmd line can override spectre_v2_user options */
+ if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
+ } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
}
/*
@@ -1413,30 +1555,44 @@ spectre_v2_user_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!cpu_smt_possible() ||
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
- !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
+ !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
+ spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
return;
+ }
- /*
- * At this point, an STIBP mode other than "off" has been set.
- * If STIBP support is not being forced, check if STIBP always-on
- * is preferred.
- */
- if (mode != SPECTRE_V2_USER_STRICT &&
- boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
- mode = SPECTRE_V2_USER_STRICT_PREFERRED;
-
- if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
- retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
- if (mode != SPECTRE_V2_USER_STRICT &&
- mode != SPECTRE_V2_USER_STRICT_PREFERRED)
+ if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
+ (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
+ retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
+ if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
+ spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
- mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+ spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
}
+ pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
+}
+
+static void __init spectre_v2_user_apply_mitigation(void)
+{
+ /* Initialize Indirect Branch Prediction Barrier */
+ if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
+ static_branch_enable(&switch_vcpu_ibpb);
- spectre_v2_user_stibp = mode;
+ switch (spectre_v2_user_ibpb) {
+ case SPECTRE_V2_USER_STRICT:
+ static_branch_enable(&switch_mm_always_ibpb);
+ break;
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ static_branch_enable(&switch_mm_cond_ibpb);
+ break;
+ default:
+ break;
+ }
-set_mode:
- pr_info("%s\n", spectre_v2_user_strings[mode]);
+ pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+ static_key_enabled(&switch_mm_always_ibpb) ?
+ "always-on" : "conditional");
+ }
}
static const char * const spectre_v2_strings[] = {
@@ -1656,12 +1812,13 @@ static bool __init spec_ctrl_bhi_dis(void)
enum bhi_mitigations {
BHI_MITIGATION_OFF,
+ BHI_MITIGATION_AUTO,
BHI_MITIGATION_ON,
BHI_MITIGATION_VMEXIT_ONLY,
};
static enum bhi_mitigations bhi_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
static int __init spectre_bhi_parse_cmdline(char *str)
{
@@ -1683,6 +1840,25 @@ early_param("spectre_bhi", spectre_bhi_parse_cmdline);
static void __init bhi_select_mitigation(void)
{
+ if (!boot_cpu_has(X86_BUG_BHI) || cpu_mitigations_off())
+ bhi_mitigation = BHI_MITIGATION_OFF;
+
+ if (bhi_mitigation == BHI_MITIGATION_AUTO)
+ bhi_mitigation = BHI_MITIGATION_ON;
+}
+
+static void __init bhi_update_mitigation(void)
+{
+ if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
+ bhi_mitigation = BHI_MITIGATION_OFF;
+
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
+ spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)
+ bhi_mitigation = BHI_MITIGATION_OFF;
+}
+
+static void __init bhi_apply_mitigation(void)
+{
if (bhi_mitigation == BHI_MITIGATION_OFF)
return;
@@ -1714,75 +1890,80 @@ static void __init bhi_select_mitigation(void)
static void __init spectre_v2_select_mitigation(void)
{
- enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
- enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
+ spectre_v2_cmd = spectre_v2_parse_cmdline();
- /*
- * If the CPU is not affected and the command line mode is NONE or AUTO
- * then nothing to do.
- */
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
- (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
+ (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO))
return;
- switch (cmd) {
+ switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE:
return;
case SPECTRE_V2_CMD_FORCE:
case SPECTRE_V2_CMD_AUTO:
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
- mode = SPECTRE_V2_EIBRS;
- break;
- }
-
- if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
- boot_cpu_has_bug(X86_BUG_RETBLEED) &&
- retbleed_cmd != RETBLEED_CMD_OFF &&
- retbleed_cmd != RETBLEED_CMD_STUFF &&
- boot_cpu_has(X86_FEATURE_IBRS) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- mode = SPECTRE_V2_IBRS;
+ spectre_v2_enabled = SPECTRE_V2_EIBRS;
break;
}
- mode = spectre_v2_select_retpoline();
+ spectre_v2_enabled = spectre_v2_select_retpoline();
break;
case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
pr_err(SPECTRE_V2_LFENCE_MSG);
- mode = SPECTRE_V2_LFENCE;
+ spectre_v2_enabled = SPECTRE_V2_LFENCE;
break;
case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
- mode = SPECTRE_V2_RETPOLINE;
+ spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
break;
case SPECTRE_V2_CMD_RETPOLINE:
- mode = spectre_v2_select_retpoline();
+ spectre_v2_enabled = spectre_v2_select_retpoline();
break;
case SPECTRE_V2_CMD_IBRS:
- mode = SPECTRE_V2_IBRS;
+ spectre_v2_enabled = SPECTRE_V2_IBRS;
break;
case SPECTRE_V2_CMD_EIBRS:
- mode = SPECTRE_V2_EIBRS;
+ spectre_v2_enabled = SPECTRE_V2_EIBRS;
break;
case SPECTRE_V2_CMD_EIBRS_LFENCE:
- mode = SPECTRE_V2_EIBRS_LFENCE;
+ spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
break;
case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
- mode = SPECTRE_V2_EIBRS_RETPOLINE;
+ spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
break;
}
+}
+
+static void __init spectre_v2_update_mitigation(void)
+{
+ if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO) {
+ if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
+ boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+ retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
+ retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
+ boot_cpu_has(X86_FEATURE_IBRS) &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ spectre_v2_enabled = SPECTRE_V2_IBRS;
+ }
+ }
- if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && !cpu_mitigations_off())
+ pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
+}
+
+static void __init spectre_v2_apply_mitigation(void)
+{
+ if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
- if (spectre_v2_in_ibrs_mode(mode)) {
+ if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
} else {
@@ -1791,8 +1972,10 @@ static void __init spectre_v2_select_mitigation(void)
}
}
- switch (mode) {
+ switch (spectre_v2_enabled) {
case SPECTRE_V2_NONE:
+ return;
+
case SPECTRE_V2_EIBRS:
break;
@@ -1818,18 +2001,12 @@ static void __init spectre_v2_select_mitigation(void)
* JMPs gets protection against BHI and Intramode-BTI, but RET
* prediction from a non-RSB predictor is still a risk.
*/
- if (mode == SPECTRE_V2_EIBRS_LFENCE ||
- mode == SPECTRE_V2_EIBRS_RETPOLINE ||
- mode == SPECTRE_V2_RETPOLINE)
+ if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
+ spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
+ spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
spec_ctrl_disable_kernel_rrsba();
- if (boot_cpu_has(X86_BUG_BHI))
- bhi_select_mitigation();
-
- spectre_v2_enabled = mode;
- pr_info("%s\n", spectre_v2_strings[mode]);
-
- spectre_v2_select_rsb_mitigation(mode);
+ spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
@@ -1837,28 +2014,26 @@ static void __init spectre_v2_select_mitigation(void)
* firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
* otherwise enabled.
*
- * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
- * the user might select retpoline on the kernel command line and if
- * the CPU supports Enhanced IBRS, kernel might un-intentionally not
- * enable IBRS around firmware calls.
+ * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
+ * boot_cpu_has(), because the user might select retpoline on the kernel
+ * command line and if the CPU supports Enhanced IBRS, kernel might
+ * un-intentionally not enable IBRS around firmware calls.
*/
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
boot_cpu_has(X86_FEATURE_IBPB) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
- if (retbleed_cmd != RETBLEED_CMD_IBPB) {
+ if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
pr_info("Enabling Speculation Barrier for firmware calls\n");
}
- } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
+ } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
+ !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
-
- /* Set up IBPB and STIBP depending on the general spectre V2 command */
- spectre_v2_cmd = cmd;
}
static void update_stibp_msr(void * __unused)
@@ -2047,19 +2222,18 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
return cmd;
}
-static enum ssb_mitigation __init __ssb_select_mitigation(void)
+static void __init ssb_select_mitigation(void)
{
- enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
enum ssb_mitigation_cmd cmd;
if (!boot_cpu_has(X86_FEATURE_SSBD))
- return mode;
+ goto out;
cmd = ssb_parse_cmdline();
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
(cmd == SPEC_STORE_BYPASS_CMD_NONE ||
cmd == SPEC_STORE_BYPASS_CMD_AUTO))
- return mode;
+ return;
switch (cmd) {
case SPEC_STORE_BYPASS_CMD_SECCOMP:
@@ -2068,28 +2242,35 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
* enabled.
*/
if (IS_ENABLED(CONFIG_SECCOMP))
- mode = SPEC_STORE_BYPASS_SECCOMP;
+ ssb_mode = SPEC_STORE_BYPASS_SECCOMP;
else
- mode = SPEC_STORE_BYPASS_PRCTL;
+ ssb_mode = SPEC_STORE_BYPASS_PRCTL;
break;
case SPEC_STORE_BYPASS_CMD_ON:
- mode = SPEC_STORE_BYPASS_DISABLE;
+ ssb_mode = SPEC_STORE_BYPASS_DISABLE;
break;
case SPEC_STORE_BYPASS_CMD_AUTO:
case SPEC_STORE_BYPASS_CMD_PRCTL:
- mode = SPEC_STORE_BYPASS_PRCTL;
+ ssb_mode = SPEC_STORE_BYPASS_PRCTL;
break;
case SPEC_STORE_BYPASS_CMD_NONE:
break;
}
+out:
+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+ pr_info("%s\n", ssb_strings[ssb_mode]);
+}
+
+static void __init ssb_apply_mitigation(void)
+{
/*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
*/
- if (mode == SPEC_STORE_BYPASS_DISABLE) {
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
/*
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
@@ -2103,16 +2284,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
update_spec_ctrl(x86_spec_ctrl_base);
}
}
-
- return mode;
-}
-
-static void ssb_select_mitigation(void)
-{
- ssb_mode = __ssb_select_mitigation();
-
- if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
- pr_info("%s\n", ssb_strings[ssb_mode]);
}
#undef pr_fmt
@@ -2368,7 +2539,7 @@ EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
/* Default mitigation for L1TF-affected CPUs */
enum l1tf_mitigations l1tf_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_FLUSH : L1TF_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
#if IS_ENABLED(CONFIG_KVM_INTEL)
EXPORT_SYMBOL_GPL(l1tf_mitigation);
#endif
@@ -2416,22 +2587,33 @@ static void override_cache_bits(struct cpuinfo_x86 *c)
static void __init l1tf_select_mitigation(void)
{
+ if (!boot_cpu_has_bug(X86_BUG_L1TF) || cpu_mitigations_off()) {
+ l1tf_mitigation = L1TF_MITIGATION_OFF;
+ return;
+ }
+
+ if (l1tf_mitigation == L1TF_MITIGATION_AUTO) {
+ if (cpu_mitigations_auto_nosmt())
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+ else
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH;
+ }
+}
+
+static void __init l1tf_apply_mitigation(void)
+{
u64 half_pa;
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
- if (cpu_mitigations_off())
- l1tf_mitigation = L1TF_MITIGATION_OFF;
- else if (cpu_mitigations_auto_nosmt())
- l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
-
override_cache_bits(&boot_cpu_data);
switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF:
case L1TF_MITIGATION_FLUSH_NOWARN:
case L1TF_MITIGATION_FLUSH:
+ case L1TF_MITIGATION_AUTO:
break;
case L1TF_MITIGATION_FLUSH_NOSMT:
case L1TF_MITIGATION_FULL:
@@ -2491,6 +2673,7 @@ early_param("l1tf", l1tf_cmdline);
enum srso_mitigation {
SRSO_MITIGATION_NONE,
+ SRSO_MITIGATION_AUTO,
SRSO_MITIGATION_UCODE_NEEDED,
SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
SRSO_MITIGATION_MICROCODE,
@@ -2500,14 +2683,6 @@ enum srso_mitigation {
SRSO_MITIGATION_BP_SPEC_REDUCE,
};
-enum srso_mitigation_cmd {
- SRSO_CMD_OFF,
- SRSO_CMD_MICROCODE,
- SRSO_CMD_SAFE_RET,
- SRSO_CMD_IBPB,
- SRSO_CMD_IBPB_ON_VMEXIT,
-};
-
static const char * const srso_strings[] = {
[SRSO_MITIGATION_NONE] = "Vulnerable",
[SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
@@ -2519,8 +2694,7 @@ static const char * const srso_strings[] = {
[SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
};
-static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
-static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
+static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
static int __init srso_parse_cmdline(char *str)
{
@@ -2528,15 +2702,15 @@ static int __init srso_parse_cmdline(char *str)
return -EINVAL;
if (!strcmp(str, "off"))
- srso_cmd = SRSO_CMD_OFF;
+ srso_mitigation = SRSO_MITIGATION_NONE;
else if (!strcmp(str, "microcode"))
- srso_cmd = SRSO_CMD_MICROCODE;
+ srso_mitigation = SRSO_MITIGATION_MICROCODE;
else if (!strcmp(str, "safe-ret"))
- srso_cmd = SRSO_CMD_SAFE_RET;
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
else if (!strcmp(str, "ibpb"))
- srso_cmd = SRSO_CMD_IBPB;
+ srso_mitigation = SRSO_MITIGATION_IBPB;
else if (!strcmp(str, "ibpb-vmexit"))
- srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
else
pr_err("Ignoring unknown SRSO option (%s).", str);
@@ -2548,132 +2722,83 @@ early_param("spec_rstack_overflow", srso_parse_cmdline);
static void __init srso_select_mitigation(void)
{
- bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
+ bool has_microcode;
- if (!boot_cpu_has_bug(X86_BUG_SRSO) ||
- cpu_mitigations_off() ||
- srso_cmd == SRSO_CMD_OFF) {
- if (boot_cpu_has(X86_FEATURE_SBPB))
- x86_pred_cmd = PRED_CMD_SBPB;
- goto out;
- }
+ if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
+ srso_mitigation = SRSO_MITIGATION_NONE;
+
+ if (srso_mitigation == SRSO_MITIGATION_NONE)
+ return;
+ if (srso_mitigation == SRSO_MITIGATION_AUTO)
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+
+ has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
if (has_microcode) {
/*
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
- *
- * Zen1/2 don't have SBPB, no need to try to enable it here.
*/
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
- goto out;
- }
-
- if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
- srso_mitigation = SRSO_MITIGATION_IBPB;
- goto out;
+ srso_mitigation = SRSO_MITIGATION_NONE;
+ return;
}
} else {
pr_warn("IBPB-extending microcode not applied!\n");
pr_warn(SRSO_NOTICE);
-
- /* may be overwritten by SRSO_CMD_SAFE_RET below */
- srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
}
- switch (srso_cmd) {
- case SRSO_CMD_MICROCODE:
- if (has_microcode) {
- srso_mitigation = SRSO_MITIGATION_MICROCODE;
- pr_warn(SRSO_NOTICE);
- }
- break;
-
- case SRSO_CMD_SAFE_RET:
- if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
+ switch (srso_mitigation) {
+ case SRSO_MITIGATION_SAFE_RET:
+ if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
goto ibpb_on_vmexit;
+ }
- if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
- /*
- * Enable the return thunk for generated code
- * like ftrace, static_call, etc.
- */
- setup_force_cpu_cap(X86_FEATURE_RETHUNK);
- setup_force_cpu_cap(X86_FEATURE_UNRET);
-
- if (boot_cpu_data.x86 == 0x19) {
- setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
- x86_return_thunk = srso_alias_return_thunk;
- } else {
- setup_force_cpu_cap(X86_FEATURE_SRSO);
- x86_return_thunk = srso_return_thunk;
- }
- if (has_microcode)
- srso_mitigation = SRSO_MITIGATION_SAFE_RET;
- else
- srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
- } else {
+ if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
+ srso_mitigation = SRSO_MITIGATION_NONE;
}
- break;
- case SRSO_CMD_IBPB:
- if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
- if (has_microcode) {
- setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
- setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
- srso_mitigation = SRSO_MITIGATION_IBPB;
-
- /*
- * IBPB on entry already obviates the need for
- * software-based untraining so clear those in case some
- * other mitigation like Retbleed has selected them.
- */
- setup_clear_cpu_cap(X86_FEATURE_UNRET);
- setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
-
- /*
- * There is no need for RSB filling: write_ibpb() ensures
- * all predictions, including the RSB, are invalidated,
- * regardless of IBPB implementation.
- */
- setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- }
- } else {
- pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
- }
+ if (!has_microcode)
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
break;
-
ibpb_on_vmexit:
- case SRSO_CMD_IBPB_ON_VMEXIT:
+ case SRSO_MITIGATION_IBPB_ON_VMEXIT:
if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
break;
}
-
- if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
- if (has_microcode) {
- setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
- srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
-
- /*
- * There is no need for RSB filling: write_ibpb() ensures
- * all predictions, including the RSB, are invalidated,
- * regardless of IBPB implementation.
- */
- setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- }
- } else {
+ fallthrough;
+ case SRSO_MITIGATION_IBPB:
+ if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
+ srso_mitigation = SRSO_MITIGATION_NONE;
}
+
+ if (!has_microcode)
+ srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
break;
default:
break;
}
+}
-out:
+static void __init srso_update_mitigation(void)
+{
+ /* If retbleed is using IBPB, that works for SRSO as well */
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
+ boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
+ srso_mitigation = SRSO_MITIGATION_IBPB;
+
+ if (boot_cpu_has_bug(X86_BUG_SRSO) && !cpu_mitigations_off())
+ pr_info("%s\n", srso_strings[srso_mitigation]);
+}
+
+static void __init srso_apply_mitigation(void)
+{
/*
* Clear the feature flag if this mitigation is not selected as that
* feature flag controls the BpSpecReduce MSR bit toggling in KVM.
@@ -2681,8 +2806,52 @@ out:
if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
- if (srso_mitigation != SRSO_MITIGATION_NONE)
- pr_info("%s\n", srso_strings[srso_mitigation]);
+ if (srso_mitigation == SRSO_MITIGATION_NONE) {
+ if (boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ return;
+ }
+
+ switch (srso_mitigation) {
+ case SRSO_MITIGATION_SAFE_RET:
+ case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
+ /*
+ * Enable the return thunk for generated code
+ * like ftrace, static_call, etc.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+ if (boot_cpu_data.x86 == 0x19) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+ x86_return_thunk = srso_alias_return_thunk;
+ } else {
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
+ x86_return_thunk = srso_return_thunk;
+ }
+ break;
+ case SRSO_MITIGATION_IBPB:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+ /*
+ * IBPB on entry already obviates the need for
+ * software-based untraining so clear those in case some
+ * other mitigation like Retbleed has selected them.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+ fallthrough;
+ case SRSO_MITIGATION_IBPB_ON_VMEXIT:
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ /*
+ * There is no need for RSB filling: entry_ibpb() ensures
+ * all predictions, including the RSB, are invalidated,
+ * regardless of IBPB implementation.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ break;
+ default:
+ break;
+ }
}
#undef pr_fmt
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4ada55f126ae..8a1f9a889eb4 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -242,6 +242,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
#endif
} };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+SYM_PIC_ALIAS(gdt_page);
#ifdef CONFIG_X86_64
static int __init x86_nopcid_setup(char *s)
@@ -1005,17 +1006,18 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_capability[CPUID_D_1_EAX] = eax;
}
- /* AMD-defined flags: level 0x80000001 */
+ /*
+ * Check if extended CPUID leaves are implemented: Max extended
+ * CPUID leaf must be in the 0x80000001-0x8000ffff range.
+ */
eax = cpuid_eax(0x80000000);
- c->extended_cpuid_level = eax;
+ c->extended_cpuid_level = ((eax & 0xffff0000) == 0x80000000) ? eax : 0;
- if ((eax & 0xffff0000) == 0x80000000) {
- if (eax >= 0x80000001) {
- cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+ if (c->extended_cpuid_level >= 0x80000001) {
+ cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
- c->x86_capability[CPUID_8000_0001_ECX] = ecx;
- c->x86_capability[CPUID_8000_0001_EDX] = edx;
- }
+ c->x86_capability[CPUID_8000_0001_ECX] = ecx;
+ c->x86_capability[CPUID_8000_0001_EDX] = edx;
}
if (c->extended_cpuid_level >= 0x80000007) {
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 4a10d35e70aa..96cb992d50ef 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -1098,15 +1098,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
static int __init save_microcode_in_initrd(void)
{
- unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
+ unsigned int cpuid_1_eax;
enum ucode_state ret;
struct cpio_data cp;
- if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+ if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
+ cpuid_1_eax = native_cpuid_eax(1);
+
if (!find_blobs_in_containers(&cp))
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index b3658d11e7b6..079f046ee26d 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -41,8 +41,8 @@
#include "internal.h"
-static struct microcode_ops *microcode_ops;
-bool dis_ucode_ldr = true;
+static struct microcode_ops *microcode_ops;
+static bool dis_ucode_ldr = false;
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
@@ -84,6 +84,9 @@ static bool amd_check_current_patch_level(void)
u32 lvl, dummy, i;
u32 *levels;
+ if (x86_cpuid_vendor() != X86_VENDOR_AMD)
+ return false;
+
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
levels = final_levels;
@@ -95,27 +98,29 @@ static bool amd_check_current_patch_level(void)
return false;
}
-static bool __init check_loader_disabled_bsp(void)
+bool __init microcode_loader_disabled(void)
{
- static const char *__dis_opt_str = "dis_ucode_ldr";
- const char *cmdline = boot_command_line;
- const char *option = __dis_opt_str;
+ if (dis_ucode_ldr)
+ return true;
/*
- * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
- * completely accurate as xen pv guests don't see that CPUID bit set but
- * that's good enough as they don't land on the BSP path anyway.
+ * Disable when:
+ *
+ * 1) The CPU does not support CPUID.
+ *
+ * 2) Bit 31 in CPUID[1]:ECX is clear
+ * The bit is reserved for hypervisor use. This is still not
+ * completely accurate as XEN PV guests don't see that CPUID bit
+ * set, but that's good enough as they don't land on the BSP
+ * path anyway.
+ *
+ * 3) Certain AMD patch levels are not allowed to be
+ * overwritten.
*/
- if (native_cpuid_ecx(1) & BIT(31))
- return true;
-
- if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
- if (amd_check_current_patch_level())
- return true;
- }
-
- if (cmdline_find_option_bool(cmdline, option) <= 0)
- dis_ucode_ldr = false;
+ if (!have_cpuid_p() ||
+ native_cpuid_ecx(1) & BIT(31) ||
+ amd_check_current_patch_level())
+ dis_ucode_ldr = true;
return dis_ucode_ldr;
}
@@ -125,7 +130,10 @@ void __init load_ucode_bsp(void)
unsigned int cpuid_1_eax;
bool intel = true;
- if (!have_cpuid_p())
+ if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
+ dis_ucode_ldr = true;
+
+ if (microcode_loader_disabled())
return;
cpuid_1_eax = native_cpuid_eax(1);
@@ -146,9 +154,6 @@ void __init load_ucode_bsp(void)
return;
}
- if (check_loader_disabled_bsp())
- return;
-
if (intel)
load_ucode_intel_bsp(&early_data);
else
@@ -159,6 +164,11 @@ void load_ucode_ap(void)
{
unsigned int cpuid_1_eax;
+ /*
+ * Can't use microcode_loader_disabled() here - .init section
+ * hell. It doesn't have to either - the BSP variant must've
+ * parsed cmdline already anyway.
+ */
if (dis_ucode_ldr)
return;
@@ -810,7 +820,7 @@ static int __init microcode_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
int error;
- if (dis_ucode_ldr)
+ if (microcode_loader_disabled())
return -EINVAL;
if (c->x86_vendor == X86_VENDOR_INTEL)
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 819199bc0119..2a397da43923 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void)
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
return 0;
- if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0;
uci.mc = get_microcode_blob(&uci, true);
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
index 5df621752fef..50a9702ae4e2 100644
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void)
return x86_family(eax);
}
-extern bool dis_ucode_ldr;
extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 3aad78bfcb26..cba75306e5b6 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/console.h>
#include <linux/kernel.h>
+#include <linux/kexec.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/screen_info.h>
@@ -144,6 +145,11 @@ static __init void early_serial_hw_init(unsigned divisor)
static_call(serial_out)(early_serial_base, DLL, divisor & 0xff);
static_call(serial_out)(early_serial_base, DLH, (divisor >> 8) & 0xff);
static_call(serial_out)(early_serial_base, LCR, c & ~DLAB);
+
+#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_X86_64)
+ if (static_call_query(serial_in) == io_serial_in)
+ kexec_debug_8250_port = early_serial_base;
+#endif
}
#define DEFAULT_BAUD 9600
@@ -327,6 +333,9 @@ static __init void early_pci_serial_init(char *s)
/* WARNING! assuming the address is always in the first 4G */
early_serial_base =
(unsigned long)early_ioremap(bar0 & PCI_BASE_ADDRESS_MEM_MASK, 0x10);
+#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_X86_64)
+ kexec_debug_8250_mmio32 = bar0 & PCI_BASE_ADDRESS_MEM_MASK;
+#endif
write_pci_config(bus, slot, func, PCI_COMMAND,
cmdreg|PCI_COMMAND_MEMORY);
}
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index cace6e8d7cc7..0853ba3fd04a 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -55,10 +55,10 @@ void ftrace_arch_code_modify_post_process(void)
{
/*
* ftrace_make_{call,nop}() may be called during
- * module load, and we need to finish the text_poke_queue()
+ * module load, and we need to finish the smp_text_poke_batch_add()
* that they do, here.
*/
- text_poke_finish();
+ smp_text_poke_batch_finish();
ftrace_poke_late = 0;
mutex_unlock(&text_mutex);
}
@@ -119,7 +119,7 @@ ftrace_modify_code_direct(unsigned long ip, const char *old_code,
/* replace the text with the new text */
if (ftrace_poke_late)
- text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_batch_add((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
else
text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
return 0;
@@ -186,11 +186,11 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ip = (unsigned long)(&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func);
- text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
ip = (unsigned long)(&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func);
- text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
return 0;
}
@@ -247,10 +247,10 @@ void ftrace_replace_code(int enable)
break;
}
- text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_batch_add((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
ftrace_update_record(rec, enable);
}
- text_poke_finish();
+ smp_text_poke_batch_finish();
}
void arch_ftrace_update_code(int command)
@@ -492,7 +492,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
mutex_lock(&text_mutex);
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
- text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
mutex_unlock(&text_mutex);
}
@@ -586,7 +586,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
const char *new;
new = ftrace_jmp_replace(ip, (unsigned long)func);
- text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+ smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
return 0;
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index de001b2146ab..375f2d7f1762 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
*ptr = (unsigned long)ptep + PAGE_OFFSET;
#ifdef CONFIG_MICROCODE_INITRD32
- /* Running on a hypervisor? */
- if (native_cpuid_ecx(1) & BIT(31))
- return;
-
params = (struct boot_params *)__pa_nodebug(&boot_params);
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
return;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index fa9b6339975f..510fb41f55fc 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -47,7 +47,8 @@
* Manage page tables very early on.
*/
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
-static unsigned int __initdata next_early_pgt;
+unsigned int __initdata next_early_pgt;
+SYM_PIC_ALIAS(next_early_pgt);
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
#ifdef CONFIG_X86_5LEVEL
@@ -61,221 +62,15 @@ EXPORT_SYMBOL(ptrs_per_p4d);
#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
EXPORT_SYMBOL(page_offset_base);
+SYM_PIC_ALIAS(page_offset_base);
unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
EXPORT_SYMBOL(vmalloc_base);
+SYM_PIC_ALIAS(vmalloc_base);
unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
EXPORT_SYMBOL(vmemmap_base);
+SYM_PIC_ALIAS(vmemmap_base);
#endif
-static inline bool check_la57_support(void)
-{
- if (!IS_ENABLED(CONFIG_X86_5LEVEL))
- return false;
-
- /*
- * 5-level paging is detected and enabled at kernel decompression
- * stage. Only check if it has been enabled there.
- */
- if (!(native_read_cr4() & X86_CR4_LA57))
- return false;
-
- RIP_REL_REF(__pgtable_l5_enabled) = 1;
- RIP_REL_REF(pgdir_shift) = 48;
- RIP_REL_REF(ptrs_per_p4d) = 512;
- RIP_REL_REF(page_offset_base) = __PAGE_OFFSET_BASE_L5;
- RIP_REL_REF(vmalloc_base) = __VMALLOC_BASE_L5;
- RIP_REL_REF(vmemmap_base) = __VMEMMAP_BASE_L5;
-
- return true;
-}
-
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
- pmdval_t *pmd,
- unsigned long p2v_offset)
-{
- unsigned long paddr, paddr_end;
- int i;
-
- /* Encrypt the kernel and related (if SME is active) */
- sme_encrypt_kernel(bp);
-
- /*
- * Clear the memory encryption mask from the .bss..decrypted section.
- * The bss section will be memset to zero later in the initialization so
- * there is no need to zero it after changing the memory encryption
- * attribute.
- */
- if (sme_get_me_mask()) {
- paddr = (unsigned long)&RIP_REL_REF(__start_bss_decrypted);
- paddr_end = (unsigned long)&RIP_REL_REF(__end_bss_decrypted);
-
- for (; paddr < paddr_end; paddr += PMD_SIZE) {
- /*
- * On SNP, transition the page to shared in the RMP table so that
- * it is consistent with the page table attribute change.
- *
- * __start_bss_decrypted has a virtual address in the high range
- * mapping (kernel .text). PVALIDATE, by way of
- * early_snp_set_memory_shared(), requires a valid virtual
- * address but the kernel is currently running off of the identity
- * mapping so use the PA to get a *currently* valid virtual address.
- */
- early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD);
-
- i = pmd_index(paddr - p2v_offset);
- pmd[i] -= sme_get_me_mask();
- }
- }
-
- /*
- * Return the SME encryption mask (if SME is active) to be used as a
- * modifier for the initial pgdir entry programmed into CR3.
- */
- return sme_get_me_mask();
-}
-
-/* Code in __startup_64() can be relocated during execution, but the compiler
- * doesn't have to generate PC-relative relocations when accessing globals from
- * that function. Clang actually does not generate them, which leads to
- * boot-time crashes. To work around this problem, every global pointer must
- * be accessed using RIP_REL_REF(). Kernel virtual addresses can be determined
- * by subtracting p2v_offset from the RIP-relative address.
- */
-unsigned long __head __startup_64(unsigned long p2v_offset,
- struct boot_params *bp)
-{
- pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts);
- unsigned long physaddr = (unsigned long)&RIP_REL_REF(_text);
- unsigned long va_text, va_end;
- unsigned long pgtable_flags;
- unsigned long load_delta;
- pgdval_t *pgd;
- p4dval_t *p4d;
- pudval_t *pud;
- pmdval_t *pmd, pmd_entry;
- bool la57;
- int i;
-
- la57 = check_la57_support();
-
- /* Is the address too large? */
- if (physaddr >> MAX_PHYSMEM_BITS)
- for (;;);
-
- /*
- * Compute the delta between the address I am compiled to run at
- * and the address I am actually running at.
- */
- load_delta = __START_KERNEL_map + p2v_offset;
- RIP_REL_REF(phys_base) = load_delta;
-
- /* Is the address not 2M aligned? */
- if (load_delta & ~PMD_MASK)
- for (;;);
-
- va_text = physaddr - p2v_offset;
- va_end = (unsigned long)&RIP_REL_REF(_end) - p2v_offset;
-
- /* Include the SME encryption mask in the fixup value */
- load_delta += sme_get_me_mask();
-
- /* Fixup the physical addresses in the page table */
-
- pgd = &RIP_REL_REF(early_top_pgt)->pgd;
- pgd[pgd_index(__START_KERNEL_map)] += load_delta;
-
- if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) {
- p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt);
- p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
-
- pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
- }
-
- RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta;
- RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta;
-
- for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
- RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta;
-
- /*
- * Set up the identity mapping for the switchover. These
- * entries should *NOT* have the global bit set! This also
- * creates a bunch of nonsense entries but that is fine --
- * it avoids problems around wraparound.
- */
-
- pud = &early_pgts[0]->pmd;
- pmd = &early_pgts[1]->pmd;
- RIP_REL_REF(next_early_pgt) = 2;
-
- pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
-
- if (la57) {
- p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd;
-
- i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
- pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
- pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
-
- i = physaddr >> P4D_SHIFT;
- p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
- p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
- } else {
- i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
- pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
- pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
- }
-
- i = physaddr >> PUD_SHIFT;
- pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
- pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
-
- pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
- /* Filter out unsupported __PAGE_KERNEL_* bits: */
- pmd_entry &= RIP_REL_REF(__supported_pte_mask);
- pmd_entry += sme_get_me_mask();
- pmd_entry += physaddr;
-
- for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
- int idx = i + (physaddr >> PMD_SHIFT);
-
- pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
- }
-
- /*
- * Fixup the kernel text+data virtual addresses. Note that
- * we might write invalid pmds, when the kernel is relocated
- * cleanup_highmap() fixes this up along with the mappings
- * beyond _end.
- *
- * Only the region occupied by the kernel image has so far
- * been checked against the table of usable memory regions
- * provided by the firmware, so invalidate pages outside that
- * region. A page table entry that maps to a reserved area of
- * memory would allow processor speculation into that area,
- * and on some hardware (particularly the UV platform) even
- * speculative access to some reserved areas is caught as an
- * error, causing the BIOS to halt the system.
- */
-
- pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd;
-
- /* invalidate pages before the kernel image */
- for (i = 0; i < pmd_index(va_text); i++)
- pmd[i] &= ~_PAGE_PRESENT;
-
- /* fixup pages that are part of the kernel image */
- for (; i <= pmd_index(va_end); i++)
- if (pmd[i] & _PAGE_PRESENT)
- pmd[i] += load_delta;
-
- /* invalidate pages after the kernel image */
- for (; i < PTRS_PER_PMD; i++)
- pmd[i] &= ~_PAGE_PRESENT;
-
- return sme_postprocess_startup(bp, pmd, p2v_offset);
-}
-
/* Wipe all early page tables except for the kernel symbol map */
static void __init reset_early_page_tables(void)
{
@@ -513,41 +308,6 @@ void __init __noreturn x86_64_start_reservations(char *real_mode_data)
start_kernel();
}
-/*
- * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
- * used until the idt_table takes over. On the boot CPU this happens in
- * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
- * this happens in the functions called from head_64.S.
- *
- * The idt_table can't be used that early because all the code modifying it is
- * in idt.c and can be instrumented by tracing or KASAN, which both don't work
- * during early CPU bringup. Also the idt_table has the runtime vectors
- * configured which require certain CPU state to be setup already (like TSS),
- * which also hasn't happened yet in early CPU bringup.
- */
-static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
-
-/* This may run while still in the direct mapping */
-static void __head startup_64_load_idt(void *vc_handler)
-{
- struct desc_ptr desc = {
- .address = (unsigned long)&RIP_REL_REF(bringup_idt_table),
- .size = sizeof(bringup_idt_table) - 1,
- };
- struct idt_data data;
- gate_desc idt_desc;
-
- /* @vc_handler is set only for a VMM Communication Exception */
- if (vc_handler) {
- init_idt_data(&data, X86_TRAP_VC, vc_handler);
- idt_init_desc(&idt_desc, &data);
- native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
- }
-
- native_load_idt(&desc);
-}
-
-/* This is used when running on kernel addresses */
void early_setup_idt(void)
{
void *handler = NULL;
@@ -559,30 +319,3 @@ void early_setup_idt(void)
startup_64_load_idt(handler);
}
-
-/*
- * Setup boot CPU state needed before kernel switches to virtual addresses.
- */
-void __head startup_64_setup_gdt_idt(void)
-{
- struct desc_struct *gdt = (void *)(__force unsigned long)gdt_page.gdt;
- void *handler = NULL;
-
- struct desc_ptr startup_gdt_descr = {
- .address = (unsigned long)&RIP_REL_REF(*gdt),
- .size = GDT_SIZE - 1,
- };
-
- /* Load GDT */
- native_load_gdt(&startup_gdt_descr);
-
- /* New GDT is live - reload data segment registers */
- asm volatile("movl %%eax, %%ds\n"
- "movl %%eax, %%ss\n"
- "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
-
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
- handler = &RIP_REL_REF(vc_no_ghcb);
-
- startup_64_load_idt(handler);
-}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 2e42056d2306..76743dfad6ab 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -86,7 +86,7 @@ SYM_CODE_START(startup_32)
movl $pa(__bss_stop),%ecx
subl %edi,%ecx
shrl $2,%ecx
- rep ; stosl
+ rep stosl
/*
* Copy bootup parameters out of the way.
* Note: %esi still has the pointer to the real-mode data.
@@ -98,15 +98,13 @@ SYM_CODE_START(startup_32)
movl $pa(boot_params),%edi
movl $(PARAM_SIZE/4),%ecx
cld
- rep
- movsl
+ rep movsl
movl pa(boot_params) + NEW_CL_POINTER,%esi
andl %esi,%esi
jz 1f # No command line
movl $pa(boot_command_line),%edi
movl $(COMMAND_LINE_SIZE/4),%ecx
- rep
- movsl
+ rep movsl
1:
#ifdef CONFIG_OLPC
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index fefe2a25cf02..069420853304 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -573,6 +573,7 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
/* Pure iret required here - don't use INTERRUPT_RETURN */
iretq
SYM_CODE_END(vc_no_ghcb)
+SYM_PIC_ALIAS(vc_no_ghcb);
#endif
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
@@ -604,10 +605,12 @@ SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
.fill PTI_USER_PGD_FILL,8,0
SYM_DATA_END(early_top_pgt)
+SYM_PIC_ALIAS(early_top_pgt)
SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
SYM_DATA_END(early_dynamic_pgts)
+SYM_PIC_ALIAS(early_dynamic_pgts);
SYM_DATA(early_recursion_flag, .long 0)
@@ -651,6 +654,7 @@ SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
.fill 511,8,0
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
SYM_DATA_END(level4_kernel_pgt)
+SYM_PIC_ALIAS(level4_kernel_pgt)
#endif
SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
@@ -659,6 +663,7 @@ SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
SYM_DATA_END(level3_kernel_pgt)
+SYM_PIC_ALIAS(level3_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
/*
@@ -676,6 +681,7 @@ SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
*/
PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
SYM_DATA_END(level2_kernel_pgt)
+SYM_PIC_ALIAS(level2_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
.fill (512 - 4 - FIXMAP_PMD_NUM),8,0
@@ -688,6 +694,7 @@ SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
/* 6 MB reserved space + a 2MB hole */
.fill 4,8,0
SYM_DATA_END(level2_fixmap_pgt)
+SYM_PIC_ALIAS(level2_fixmap_pgt)
SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
.rept (FIXMAP_PMD_NUM)
@@ -703,6 +710,7 @@ SYM_DATA(smpboot_control, .long 0)
.align 16
/* This must match the first entry in level2_kernel_pgt */
SYM_DATA(phys_base, .quad 0x0)
+SYM_PIC_ALIAS(phys_base);
EXPORT_SYMBOL(phys_base)
#include "../xen/xen-head.S"
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index f5b8ef02d172..a7949a54a0ff 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -102,7 +102,7 @@ __jump_label_transform(struct jump_entry *entry,
return;
}
- text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
+ smp_text_poke_single((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
}
static void __ref jump_label_transform(struct jump_entry *entry,
@@ -135,7 +135,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
mutex_lock(&text_mutex);
jlp = __jump_label_patch(entry, type);
- text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
+ smp_text_poke_batch_add((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
return true;
}
@@ -143,6 +143,6 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
void arch_jump_label_transform_apply(void)
{
mutex_lock(&text_mutex);
- text_poke_finish();
+ smp_text_poke_batch_finish();
mutex_unlock(&text_mutex);
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 09608fd93687..47cb8eb138ba 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -808,7 +808,7 @@ void arch_arm_kprobe(struct kprobe *p)
u8 int3 = INT3_INSN_OPCODE;
text_poke(p->addr, &int3, 1);
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
}
@@ -818,7 +818,7 @@ void arch_disarm_kprobe(struct kprobe *p)
perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
text_poke(p->addr, &p->opcode, 1);
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
}
void arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 36d6809c6c9e..0aabd4c4e2c4 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -488,7 +488,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
insn_buff[0] = JMP32_INSN_OPCODE;
*(s32 *)(&insn_buff[1]) = rel;
- text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
+ smp_text_poke_single(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
list_del_init(&op->list);
}
@@ -513,11 +513,11 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op)
JMP32_INSN_SIZE - INT3_INSN_SIZE);
text_poke(addr, new, INT3_INSN_SIZE);
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
text_poke(addr + INT3_INSN_SIZE,
new + INT3_INSN_SIZE,
JMP32_INSN_SIZE - INT3_INSN_SIZE);
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
}
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index a68f5a0a9f37..949c9e4bfad2 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -76,6 +76,19 @@ map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p)
static int map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) { return 0; }
#endif
+static int map_mmio_serial(struct x86_mapping_info *info, pgd_t *level4p)
+{
+ unsigned long mstart, mend;
+
+ if (!kexec_debug_8250_mmio32)
+ return 0;
+
+ mstart = kexec_debug_8250_mmio32 & PAGE_MASK;
+ mend = (kexec_debug_8250_mmio32 + PAGE_SIZE + 23) & PAGE_MASK;
+ pr_info("Map PCI serial at %lx - %lx\n", mstart, mend);
+ return kernel_ident_mapping_init(info, level4p, mstart, mend);
+}
+
#ifdef CONFIG_KEXEC_FILE
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_bzImage64_ops,
@@ -285,6 +298,10 @@ static int init_pgtable(struct kimage *image, unsigned long control_page)
if (result)
return result;
+ result = map_mmio_serial(&info, image->arch.pgd);
+ if (result)
+ return result;
+
/*
* This must be last because the intermediate page table pages it
* allocates will not be control pages and may overlap the image.
@@ -304,6 +321,24 @@ static void load_segments(void)
);
}
+static void prepare_debug_idt(unsigned long control_page, unsigned long vec_ofs)
+{
+ gate_desc idtentry = { 0 };
+ int i;
+
+ idtentry.bits.p = 1;
+ idtentry.bits.type = GATE_TRAP;
+ idtentry.segment = __KERNEL_CS;
+ idtentry.offset_low = (control_page & 0xFFFF) + vec_ofs;
+ idtentry.offset_middle = (control_page >> 16) & 0xFFFF;
+ idtentry.offset_high = control_page >> 32;
+
+ for (i = 0; i < 16; i++) {
+ kexec_debug_idt[i] = idtentry;
+ idtentry.offset_low += KEXEC_DEBUG_EXC_HANDLER_SIZE;
+ }
+}
+
int machine_kexec_prepare(struct kimage *image)
{
void *control_page = page_address(image->control_code_page);
@@ -321,6 +356,9 @@ int machine_kexec_prepare(struct kimage *image)
if (image->type == KEXEC_TYPE_DEFAULT)
kexec_pa_swap_page = page_to_pfn(image->swap_page) << PAGE_SHIFT;
+ prepare_debug_idt((unsigned long)__pa(control_page),
+ (unsigned long)kexec_debug_exc_vectors - reloc_start);
+
__memcpy(control_page, __relocate_kernel_start, reloc_end - reloc_start);
set_memory_rox((unsigned long)control_page, 1);
@@ -396,16 +434,10 @@ void __nocfi machine_kexec(struct kimage *image)
* with from a table in memory. At no other time is the
* descriptor table in memory accessed.
*
- * I take advantage of this here by force loading the
- * segments, before I zap the gdt with an invalid value.
+ * Take advantage of this here by force loading the segments,
+ * before the GDT is zapped with an invalid value.
*/
load_segments();
- /*
- * The gdt & idt are now invalid.
- * If you want to load them you must set up your own idt & gdt.
- */
- native_idt_invalidate();
- native_gdt_invalidate();
/* now call it */
image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index a7998f351701..231d6326d1fd 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -206,7 +206,7 @@ static int write_relocate_add(Elf64_Shdr *sechdrs,
write, apply);
if (!early) {
- text_poke_sync();
+ smp_text_poke_sync_each_cpu();
mutex_unlock(&text_mutex);
}
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index c7c4b1917336..57276f134d12 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -263,17 +263,17 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
movl %edx, %edi
movl $1024, %ecx
- rep ; movsl
+ rep movsl
movl %ebp, %edi
movl %eax, %esi
movl $1024, %ecx
- rep ; movsl
+ rep movsl
movl %eax, %edi
movl %edx, %esi
movl $1024, %ecx
- rep ; movsl
+ rep movsl
lea PAGE_SIZE(%ebp), %esi
jmp 0b
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index ac058971a382..ea604f4d0b52 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -39,6 +39,8 @@ SYM_DATA(kexec_va_control_page, .quad 0)
SYM_DATA(kexec_pa_table_page, .quad 0)
SYM_DATA(kexec_pa_swap_page, .quad 0)
SYM_DATA_LOCAL(pa_backup_pages_map, .quad 0)
+SYM_DATA(kexec_debug_8250_mmio32, .quad 0)
+SYM_DATA(kexec_debug_8250_port, .word 0)
.balign 16
SYM_DATA_START_LOCAL(kexec_debug_gdt)
@@ -50,6 +52,11 @@ SYM_DATA_START_LOCAL(kexec_debug_gdt)
.quad 0x00cf92000000ffff /* __KERNEL_DS */
SYM_DATA_END_LABEL(kexec_debug_gdt, SYM_L_LOCAL, kexec_debug_gdt_end)
+ .balign 8
+SYM_DATA_START(kexec_debug_idt)
+ .skip 0x100, 0x00
+SYM_DATA_END(kexec_debug_idt)
+
.section .text..relocate_kernel,"ax";
.code64
SYM_CODE_START_NOALIGN(relocate_kernel)
@@ -72,8 +79,13 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
pushq %r15
pushf
- /* zero out flags, and disable interrupts */
- pushq $0
+ /* Invalidate GDT/IDT, zero out flags */
+ pushq $0
+ pushq $0
+
+ lidt (%rsp)
+ lgdt (%rsp)
+ addq $8, %rsp
popfq
/* Switch to the identity mapped page tables */
@@ -139,6 +151,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
movq %ds, %rax
movq %rax, %ds
+ /* Now an IDTR on the stack to load the IDT the kernel created */
+ leaq kexec_debug_idt(%rip), %rsi
+ pushq %rsi
+ pushw $0xff
+ lidt (%rsp)
+ addq $10, %rsp
+
+ //int3
+
/*
* Clear X86_CR4_CET (if it was set) such that we can clear CR0_WP
* below.
@@ -342,20 +363,20 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
/* copy source page to swap page */
movq kexec_pa_swap_page(%rip), %rdi
movl $512, %ecx
- rep ; movsq
+ rep movsq
/* copy destination page to source page */
movq %rax, %rdi
movq %rdx, %rsi
movl $512, %ecx
- rep ; movsq
+ rep movsq
/* copy swap page to destination page */
movq %rdx, %rdi
movq kexec_pa_swap_page(%rip), %rsi
.Lnoswap:
movl $512, %ecx
- rep ; movsq
+ rep movsq
lea PAGE_SIZE(%rax), %rsi
jmp .Lloop
@@ -364,3 +385,222 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
ret
int3
SYM_CODE_END(swap_pages)
+
+/*
+ * Generic 'print character' routine
+ * - %al: Character to be printed (may clobber %rax)
+ * - %rdx: MMIO address or port.
+ */
+#define XMTRDY 0x20
+
+#define TXR 0 /* Transmit register (WRITE) */
+#define LSR 5 /* Line Status */
+
+SYM_CODE_START_LOCAL_NOALIGN(pr_char_8250)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ addw $LSR, %dx
+ xchg %al, %ah
+.Lxmtrdy_loop:
+ inb %dx, %al
+ testb $XMTRDY, %al
+ jnz .Lready
+ pause
+ jmp .Lxmtrdy_loop
+
+.Lready:
+ subw $LSR, %dx
+ xchg %al, %ah
+ outb %al, %dx
+pr_char_null:
+ ANNOTATE_NOENDBR
+
+ ANNOTATE_UNRET_SAFE
+ ret
+SYM_CODE_END(pr_char_8250)
+
+SYM_CODE_START_LOCAL_NOALIGN(pr_char_8250_mmio32)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+.Lxmtrdy_loop_mmio:
+ movb (LSR*4)(%rdx), %ah
+ testb $XMTRDY, %ah
+ jnz .Lready_mmio
+ pause
+ jmp .Lxmtrdy_loop_mmio
+
+.Lready_mmio:
+ movb %al, (%rdx)
+ ANNOTATE_UNRET_SAFE
+ ret
+SYM_CODE_END(pr_char_8250_mmio32)
+
+/*
+ * Load pr_char function pointer into %rsi and load %rdx with whatever
+ * that function wants to see there (typically port/MMIO address).
+ */
+.macro pr_setup
+ leaq pr_char_8250(%rip), %rsi
+ movw kexec_debug_8250_port(%rip), %dx
+ testw %dx, %dx
+ jnz 1f
+
+ leaq pr_char_8250_mmio32(%rip), %rsi
+ movq kexec_debug_8250_mmio32(%rip), %rdx
+ testq %rdx, %rdx
+ jnz 1f
+
+ leaq pr_char_null(%rip), %rsi
+1:
+.endm
+
+/* Print the nybble in %bl, clobber %rax */
+SYM_CODE_START_LOCAL_NOALIGN(pr_nybble)
+ UNWIND_HINT_FUNC
+ movb %bl, %al
+ nop
+ andb $0x0f, %al
+ addb $0x30, %al
+ cmpb $0x3a, %al
+ jb 1f
+ addb $('a' - '0' - 10), %al
+ ANNOTATE_RETPOLINE_SAFE
+1: jmp *%rsi
+SYM_CODE_END(pr_nybble)
+
+SYM_CODE_START_LOCAL_NOALIGN(pr_qword)
+ UNWIND_HINT_FUNC
+ movq $16, %rcx
+1: rolq $4, %rbx
+ call pr_nybble
+ loop 1b
+ movb $'\n', %al
+ ANNOTATE_RETPOLINE_SAFE
+ jmp *%rsi
+SYM_CODE_END(pr_qword)
+
+.macro print_reg a, b, c, d, r
+ movb $\a, %al
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rsi
+ movb $\b, %al
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rsi
+ movb $\c, %al
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rsi
+ movb $\d, %al
+ ANNOTATE_RETPOLINE_SAFE
+ call *%rsi
+ movq \r, %rbx
+ call pr_qword
+.endm
+
+SYM_CODE_START_NOALIGN(kexec_debug_exc_vectors)
+ /* Each of these is 6 bytes. */
+.macro vec_err exc
+ UNWIND_HINT_ENTRY
+ . = kexec_debug_exc_vectors + (\exc * KEXEC_DEBUG_EXC_HANDLER_SIZE)
+ nop
+ nop
+ pushq $\exc
+ jmp exc_handler
+.endm
+
+.macro vec_noerr exc
+ UNWIND_HINT_ENTRY
+ . = kexec_debug_exc_vectors + (\exc * KEXEC_DEBUG_EXC_HANDLER_SIZE)
+ pushq $0
+ pushq $\exc
+ jmp exc_handler
+.endm
+
+ ANNOTATE_NOENDBR
+ vec_noerr 0 // #DE
+ vec_noerr 1 // #DB
+ vec_noerr 2 // #NMI
+ vec_noerr 3 // #BP
+ vec_noerr 4 // #OF
+ vec_noerr 5 // #BR
+ vec_noerr 6 // #UD
+ vec_noerr 7 // #NM
+ vec_err 8 // #DF
+ vec_noerr 9
+ vec_err 10 // #TS
+ vec_err 11 // #NP
+ vec_err 12 // #SS
+ vec_err 13 // #GP
+ vec_err 14 // #PF
+ vec_noerr 15
+SYM_CODE_END(kexec_debug_exc_vectors)
+
+SYM_CODE_START_LOCAL_NOALIGN(exc_handler)
+ /* No need for RET mitigations during kexec */
+ VALIDATE_UNRET_END
+
+ pushq %rax
+ pushq %rbx
+ pushq %rcx
+ pushq %rdx
+ pushq %rsi
+
+ /* Stack frame */
+#define EXC_SS 0x58 /* Architectural... */
+#define EXC_RSP 0x50
+#define EXC_EFLAGS 0x48
+#define EXC_CS 0x40
+#define EXC_RIP 0x38
+#define EXC_ERRORCODE 0x30 /* Either architectural or zero pushed by handler */
+#define EXC_EXCEPTION 0x28 /* Pushed by handler entry point */
+#define EXC_RAX 0x20 /* Pushed just above in exc_handler */
+#define EXC_RBX 0x18
+#define EXC_RCX 0x10
+#define EXC_RDX 0x08
+#define EXC_RSI 0x00
+
+ /* Set up %rdx/%rsi for debug output */
+ pr_setup
+
+ /* rip and exception info */
+ print_reg 'E', 'x', 'c', ':', EXC_EXCEPTION(%rsp)
+ print_reg 'E', 'r', 'r', ':', EXC_ERRORCODE(%rsp)
+ print_reg 'r', 'i', 'p', ':', EXC_RIP(%rsp)
+ print_reg 'r', 's', 'p', ':', EXC_RSP(%rsp)
+
+ /* We spilled these to the stack */
+ print_reg 'r', 'a', 'x', ':', EXC_RAX(%rsp)
+ print_reg 'r', 'b', 'x', ':', EXC_RBX(%rsp)
+ print_reg 'r', 'c', 'x', ':', EXC_RCX(%rsp)
+ print_reg 'r', 'd', 'x', ':', EXC_RDX(%rsp)
+ print_reg 'r', 's', 'i', ':', EXC_RSI(%rsp)
+
+ /* Other registers untouched */
+ print_reg 'r', 'd', 'i', ':', %rdi
+ print_reg 'r', '8', ' ', ':', %r8
+ print_reg 'r', '9', ' ', ':', %r9
+ print_reg 'r', '1', '0', ':', %r10
+ print_reg 'r', '1', '1', ':', %r11
+ print_reg 'r', '1', '2', ':', %r12
+ print_reg 'r', '1', '3', ':', %r13
+ print_reg 'r', '1', '4', ':', %r14
+ print_reg 'r', '1', '5', ':', %r15
+ print_reg 'c', 'r', '2', ':', %cr2
+
+ /* Only return from INT3 */
+ cmpq $3, EXC_EXCEPTION(%rsp)
+ jne .Ldie
+
+ popq %rsi
+ popq %rdx
+ popq %rcx
+ popq %rbx
+ popq %rax
+
+ addq $16, %rsp
+ iretq
+
+.Ldie:
+ hlt
+ jmp .Ldie
+
+SYM_CODE_END(exc_handler)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9d2a13b37833..e0cf1595a0ab 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -134,6 +134,7 @@ struct ist_info ist_info;
struct cpuinfo_x86 boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
+SYM_PIC_ALIAS(boot_cpu_data);
#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
__visible unsigned long mmu_cr4_features __ro_after_init;
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index a59c72e77645..8164a7323c17 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -108,7 +108,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
if (system_state == SYSTEM_BOOTING || modinit)
return text_poke_early(insn, code, size);
- text_poke_bp(insn, code, size, emulate);
+ smp_text_poke_single(insn, code, size, emulate);
}
static void __static_call_validate(u8 *insn, bool tail, bool tramp)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index f48325dfaa01..42e1d6cc48e9 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -882,16 +882,16 @@ static void do_int3_user(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW(exc_int3)
{
/*
- * poke_int3_handler() is completely self contained code; it does (and
+ * smp_text_poke_int3_handler() is completely self contained code; it does (and
* must) *NOT* call out to anything, lest it hits upon yet another
* INT3.
*/
- if (poke_int3_handler(regs))
+ if (smp_text_poke_int3_handler(regs))
return;
/*
* irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
- * and therefore can trigger INT3, hence poke_int3_handler() must
+ * and therefore can trigger INT3, hence smp_text_poke_int3_handler() must
* be done before. If the entry came from kernel mode, then use
* nmi_enter() because the INT3 could have been hit in any context
* including NMI.
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index ccdc45e5b759..d813f64a89d6 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -79,11 +79,13 @@ const_cpu_current_top_of_stack = cpu_current_top_of_stack;
#define BSS_DECRYPTED \
. = ALIGN(PMD_SIZE); \
__start_bss_decrypted = .; \
+ __pi___start_bss_decrypted = .; \
*(.bss..decrypted); \
. = ALIGN(PAGE_SIZE); \
__start_bss_decrypted_unused = .; \
. = ALIGN(PMD_SIZE); \
__end_bss_decrypted = .; \
+ __pi___end_bss_decrypted = .; \
#else
@@ -128,6 +130,7 @@ SECTIONS
/* Text and read-only data */
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
+ __pi__text = .;
_stext = .;
ALIGN_ENTRY_TEXT_BEGIN
*(.text..__x86.rethunk_untrain)
@@ -391,6 +394,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
_end = .;
+ __pi__end = .;
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
@@ -466,10 +470,18 @@ SECTIONS
}
/*
- * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
+ * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
+ * this. Let's assume that nobody will be running a COMPILE_TEST kernel and
+ * let's assert that fuller build coverage is more valuable than being able to
+ * run a COMPILE_TEST kernel.
+ */
+#ifndef CONFIG_COMPILE_TEST
+/*
+ * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
*/
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
+#endif
/* needed for Clang - see arch/x86/entry/entry.S */
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 050a0e229a4d..f2b36d32ef40 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -104,6 +104,9 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
+ if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
+ kvm_mmu_free_obsolete_roots(vcpu);
+
/*
* Checking root.hpa is sufficient even when KVM has mirror root.
* We can have either:
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 63bb77ee1bb1..8d1b632e33d2 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5974,6 +5974,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
}
+EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
@@ -7669,9 +7670,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
}
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+ int level)
+{
+ return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
+}
+
+static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+ int level)
+{
+ lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
+}
+
+static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+ int level)
+{
+ lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
+}
+
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{
+ struct kvm_memory_slot *slot = range->slot;
+ int level;
+
/*
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
@@ -7686,6 +7708,38 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
return false;
+ if (WARN_ON_ONCE(range->end <= range->start))
+ return false;
+
+ /*
+ * If the head and tail pages of the range currently allow a hugepage,
+ * i.e. reside fully in the slot and don't have mixed attributes, then
+ * add each corresponding hugepage range to the ongoing invalidation,
+ * e.g. to prevent KVM from creating a hugepage in response to a fault
+ * for a gfn whose attributes aren't changing. Note, only the range
+ * of gfns whose attributes are being modified needs to be explicitly
+ * unmapped, as that will unmap any existing hugepages.
+ */
+ for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
+ gfn_t start = gfn_round_for_level(range->start, level);
+ gfn_t end = gfn_round_for_level(range->end - 1, level);
+ gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
+
+ if ((start != range->start || start + nr_pages > range->end) &&
+ start >= slot->base_gfn &&
+ start + nr_pages <= slot->base_gfn + slot->npages &&
+ !hugepage_test_mixed(slot, start, level))
+ kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
+
+ if (end == start)
+ continue;
+
+ if ((end + nr_pages) > range->end &&
+ (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
+ !hugepage_test_mixed(slot, end, level))
+ kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
+ }
+
/* Unmap the old attribute page. */
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
range->attr_filter = KVM_FILTER_SHARED;
@@ -7695,23 +7749,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
return kvm_unmap_gfn_range(kvm, range);
}
-static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
- int level)
-{
- return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
-}
-
-static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
- int level)
-{
- lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
-}
-static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
- int level)
-{
- lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
-}
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, int level, unsigned long attrs)
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 699e551ec93b..9864c057187d 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
kvm_mmu_reset_context(vcpu);
}
+EXPORT_SYMBOL_GPL(kvm_smm_changed);
void process_smi(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0bc708ee2788..a7a7dc507336 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3173,9 +3173,14 @@ skip_vmsa_free:
kvfree(svm->sev_es.ghcb_sa);
}
+static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
+{
+ return (((u64)control->exit_code_hi) << 32) | control->exit_code;
+}
+
static void dump_ghcb(struct vcpu_svm *svm)
{
- struct ghcb *ghcb = svm->sev_es.ghcb;
+ struct vmcb_control_area *control = &svm->vmcb->control;
unsigned int nbits;
/* Re-use the dump_invalid_vmcb module parameter */
@@ -3184,18 +3189,24 @@ static void dump_ghcb(struct vcpu_svm *svm)
return;
}
- nbits = sizeof(ghcb->save.valid_bitmap) * 8;
+ nbits = sizeof(svm->sev_es.valid_bitmap) * 8;
- pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
+ /*
+ * Print KVM's snapshot of the GHCB values that were (unsuccessfully)
+ * used to handle the exit. If the guest has since modified the GHCB
+ * itself, dumping the raw GHCB won't help debug why KVM was unable to
+ * handle the VMGEXIT that KVM observed.
+ */
+ pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
- ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
+ kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
- ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
+ control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
- ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
+ control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
- ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
- pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
+ svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm));
+ pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap);
}
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
@@ -3266,11 +3277,6 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
-static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
-{
- return (((u64)control->exit_code_hi) << 32) | control->exit_code;
-}
-
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d5d0c5c3300b..a89c271a1951 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -607,9 +607,6 @@ static void svm_disable_virtualization_cpu(void)
kvm_cpu_svm_disable();
amd_pmu_disable_virt();
-
- if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
- msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
}
static int svm_enable_virtualization_cpu(void)
@@ -687,9 +684,6 @@ static int svm_enable_virtualization_cpu(void)
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
}
- if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
- msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
-
return 0;
}
@@ -1518,6 +1512,63 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}
+#ifdef CONFIG_CPU_MITIGATIONS
+static DEFINE_SPINLOCK(srso_lock);
+static atomic_t srso_nr_vms;
+
+static void svm_srso_clear_bp_spec_reduce(void *ign)
+{
+ struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
+
+ if (!sd->bp_spec_reduce_set)
+ return;
+
+ msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+ sd->bp_spec_reduce_set = false;
+}
+
+static void svm_srso_vm_destroy(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+ return;
+
+ if (atomic_dec_return(&srso_nr_vms))
+ return;
+
+ guard(spinlock)(&srso_lock);
+
+ /*
+ * Verify a new VM didn't come along, acquire the lock, and increment
+ * the count before this task acquired the lock.
+ */
+ if (atomic_read(&srso_nr_vms))
+ return;
+
+ on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
+}
+
+static void svm_srso_vm_init(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+ return;
+
+ /*
+ * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
+ * transition, i.e. destroying the last VM, is fully complete, e.g. so
+ * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
+ */
+ if (atomic_inc_not_zero(&srso_nr_vms))
+ return;
+
+ guard(spinlock)(&srso_lock);
+
+ atomic_inc(&srso_nr_vms);
+}
+#else
+static void svm_srso_vm_init(void) { }
+static void svm_srso_vm_destroy(void) { }
+#endif
+
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1550,6 +1601,11 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
+ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
+ !sd->bp_spec_reduce_set) {
+ sd->bp_spec_reduce_set = true;
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+ }
svm->guest_state_loaded = true;
}
@@ -2231,6 +2287,10 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
*/
if (!sev_es_guest(vcpu->kvm)) {
clear_page(svm->vmcb);
+#ifdef CONFIG_KVM_SMM
+ if (is_smm(vcpu))
+ kvm_smm_changed(vcpu, false);
+#endif
kvm_vcpu_reset(vcpu, true);
}
@@ -5036,6 +5096,8 @@ static void svm_vm_destroy(struct kvm *kvm)
{
avic_vm_destroy(kvm);
sev_vm_destroy(kvm);
+
+ svm_srso_vm_destroy();
}
static int svm_vm_init(struct kvm *kvm)
@@ -5061,6 +5123,7 @@ static int svm_vm_init(struct kvm *kvm)
return ret;
}
+ svm_srso_vm_init();
return 0;
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index d4490eaed55d..f16b068c4228 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -335,6 +335,8 @@ struct svm_cpu_data {
u32 next_asid;
u32 min_asid;
+ bool bp_spec_reduce_set;
+
struct vmcb *save_area;
unsigned long save_area_pa;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5c5766467a61..0aba4719ae0a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -273,6 +273,7 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
case L1TF_MITIGATION_OFF:
l1tf = VMENTER_L1D_FLUSH_NEVER;
break;
+ case L1TF_MITIGATION_AUTO:
case L1TF_MITIGATION_FLUSH_NOWARN:
case L1TF_MITIGATION_FLUSH:
case L1TF_MITIGATION_FLUSH_NOSMT:
@@ -7358,10 +7359,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
* mitigation for MDS is done late in VMentry and is still
* executed in spite of L1D Flush. This is because an extra VERW
* should not matter much after the big hammer L1D Flush.
+ *
+ * cpu_buf_vm_clear is used when system is not vulnerable to MDS/TAA,
+ * and is affected by MMIO Stale Data. In such cases mitigation in only
+ * needed against an MMIO capable guest.
*/
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
- else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+ else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();
@@ -7700,6 +7705,7 @@ int vmx_vm_init(struct kvm *kvm)
case L1TF_MITIGATION_FLUSH_NOWARN:
/* 'I explicitly don't care' is set */
break;
+ case L1TF_MITIGATION_AUTO:
case L1TF_MITIGATION_FLUSH:
case L1TF_MITIGATION_FLUSH_NOSMT:
case L1TF_MITIGATION_FULL:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index df5b99ea1f18..9896fd574bfc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4597,7 +4597,7 @@ static bool kvm_is_vm_type_supported(unsigned long type)
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
}
-static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
+static inline u64 kvm_sync_valid_fields(struct kvm *kvm)
{
return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
}
@@ -11493,7 +11493,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_queued_exception *ex = &vcpu->arch.exception;
struct kvm_run *kvm_run = vcpu->run;
- u32 sync_valid_fields;
+ u64 sync_valid_fields;
int r;
r = kvm_mmu_post_init_vm(vcpu->kvm);
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index e86eda2c0b04..eb2d2e1cbddd 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -75,7 +75,7 @@ static void delay_tsc(u64 cycles)
/* Allow RT tasks to run */
preempt_enable();
- rep_nop();
+ native_pause();
preempt_disable();
/*
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 98631c0e7a11..f786401ac15d 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -631,14 +631,21 @@ static bool get_desc(struct desc_struct *out, unsigned short sel)
/* Bits [15:3] contain the index of the desired entry. */
sel >>= 3;
- mutex_lock(&current->active_mm->context.lock);
- ldt = current->active_mm->context.ldt;
+ /*
+ * If we're not in a valid context with a real (not just lazy)
+ * user mm, then don't even try.
+ */
+ if (!nmi_uaccess_okay())
+ return false;
+
+ mutex_lock(&current->mm->context.lock);
+ ldt = current->mm->context.ldt;
if (ldt && sel < ldt->nr_entries) {
*out = ldt->entries[sel];
success = true;
}
- mutex_unlock(&current->active_mm->context.lock);
+ mutex_unlock(&current->mm->context.lock);
return success;
}
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 6ffb931b9fb1..149a57e334ab 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -324,6 +324,11 @@ int insn_get_opcode(struct insn *insn)
}
insn->attr = inat_get_opcode_attribute(op);
+ if (insn->x86_64 && inat_is_invalid64(insn->attr)) {
+ /* This instruction is invalid, like UD2. Stop decoding. */
+ insn->attr &= INAT_INV64;
+ }
+
while (inat_is_escape(insn->attr)) {
/* Get escaped opcode */
op = get_next(insn_byte_t, insn);
@@ -337,6 +342,7 @@ int insn_get_opcode(struct insn *insn)
insn->attr = 0;
return -EINVAL;
}
+
end:
opcode->got = 1;
return 0;
@@ -658,7 +664,6 @@ int insn_get_immediate(struct insn *insn)
}
if (!inat_has_immediate(insn->attr))
- /* no immediates */
goto done;
switch (inat_immediate_size(insn->attr)) {
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 5eecb45d05d5..c20e04764edc 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -10,7 +10,7 @@
static __always_inline void rep_movs(void *to, const void *from, size_t n)
{
unsigned long d0, d1, d2;
- asm volatile("rep ; movsl\n\t"
+ asm volatile("rep movsl\n\t"
"testb $2,%b4\n\t"
"je 1f\n\t"
"movsw\n"
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 0ae2e1712e2e..12a23fa7c44c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -41,6 +41,7 @@ SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
+SYM_PIC_ALIAS(memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_START_LOCAL(memcpy_orig)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index d66b710d628f..fb5a03cf5ab7 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -42,6 +42,7 @@ SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
+SYM_PIC_ALIAS(memset)
EXPORT_SYMBOL(memset)
SYM_FUNC_START_LOCAL(memset_orig)
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index a26c43abd47d..9f3116609c8c 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -40,6 +40,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
__stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
+SYM_PIC_ALIAS(__x86_indirect_thunk_\reg)
.endm
@@ -394,6 +395,7 @@ SYM_CODE_START(__x86_return_thunk)
#endif
int3
SYM_CODE_END(__x86_return_thunk)
+SYM_PIC_ALIAS(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
#endif /* CONFIG_MITIGATION_RETHUNK */
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 53b3f202267c..f87ec24fa579 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -40,8 +40,7 @@ char *strncpy(char *dest, const char *src, size_t count)
"stosb\n\t"
"testb %%al,%%al\n\t"
"jne 1b\n\t"
- "rep\n\t"
- "stosb\n"
+ "rep stosb\n"
"2:"
: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
: "0" (src), "1" (dest), "2" (count) : "memory");
@@ -54,8 +53,7 @@ EXPORT_SYMBOL(strncpy);
char *strcat(char *dest, const char *src)
{
int d0, d1, d2, d3;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"decl %1\n"
"1:\tlodsb\n\t"
"stosb\n\t"
@@ -72,8 +70,7 @@ EXPORT_SYMBOL(strcat);
char *strncat(char *dest, const char *src, size_t count)
{
int d0, d1, d2, d3;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"decl %1\n\t"
"movl %8,%3\n"
"1:\tdecl %3\n\t"
@@ -167,8 +164,7 @@ size_t strlen(const char *s)
{
int d0;
size_t res;
- asm volatile("repne\n\t"
- "scasb"
+ asm volatile("repne scasb"
: "=c" (res), "=&D" (d0)
: "1" (s), "a" (0), "0" (0xffffffffu)
: "memory");
@@ -184,8 +180,7 @@ void *memchr(const void *cs, int c, size_t count)
void *res;
if (!count)
return NULL;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"je 1f\n\t"
"movl $1,%0\n"
"1:\tdecl %0"
@@ -202,7 +197,7 @@ void *memscan(void *addr, int c, size_t size)
{
if (!size)
return addr;
- asm volatile("repnz; scasb\n\t"
+ asm volatile("repnz scasb\n\t"
"jnz 1f\n\t"
"dec %%edi\n"
"1:"
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 38f37df056f7..28267985e85f 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -8,16 +8,14 @@ int d0, d1;
register char *__res;
__asm__ __volatile__(
"movl %6,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
+ "repne scasb\n\t"
"notl %%ecx\n\t"
"decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */
"movl %%ecx,%%edx\n"
"1:\tmovl %6,%%edi\n\t"
"movl %%esi,%%eax\n\t"
"movl %%edx,%%ecx\n\t"
- "repe\n\t"
- "cmpsb\n\t"
+ "repe cmpsb\n\t"
"je 2f\n\t" /* also works for empty string, see above */
"xchgl %%eax,%%esi\n\t"
"incl %%esi\n\t"
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 422257c350c6..f6f436f1d573 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -38,9 +38,9 @@ do { \
might_fault(); \
__asm__ __volatile__( \
ASM_STAC "\n" \
- "0: rep; stosl\n" \
+ "0: rep stosl\n" \
" movl %2,%0\n" \
- "1: rep; stosb\n" \
+ "1: rep stosb\n" \
"2: " ASM_CLAC "\n" \
_ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %2) \
_ASM_EXTABLE_UA(1b, 2b) \
@@ -140,9 +140,9 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
- "99: rep; movsl\n"
+ "99: rep movsl\n"
"36: movl %%eax, %0\n"
- "37: rep; movsb\n"
+ "37: rep movsb\n"
"100:\n"
_ASM_EXTABLE_UA(1b, 100b)
_ASM_EXTABLE_UA(2b, 100b)
@@ -242,9 +242,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
- "6: rep; movsl\n"
+ "6: rep movsl\n"
" movl %%eax,%0\n"
- "7: rep; movsb\n"
+ "7: rep movsb\n"
"8:\n"
_ASM_EXTABLE_UA(0b, 8b)
_ASM_EXTABLE_UA(1b, 8b)
@@ -293,14 +293,14 @@ do { \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
- "4: rep; movsb\n" \
+ "4: rep movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
- "0: rep; movsl\n" \
+ "0: rep movsl\n" \
" movl %3,%0\n" \
- "1: rep; movsb\n" \
+ "1: rep movsb\n" \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(4b, 2b, EX_TYPE_UCOPY_LEN1, %3) \
_ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %3) \
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index f5dd84eb55dc..262f7ca1fb95 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -35,7 +35,7 @@
# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
#
-# REX2 Prefix
+# REX2 Prefix Superscripts
# - (!REX2): REX2 is not allowed
# - (REX2): REX2 variant e.g. JMPABS
@@ -147,7 +147,7 @@ AVXcode:
# 0x60 - 0x6f
60: PUSHA/PUSHAD (i64)
61: POPA/POPAD (i64)
-62: BOUND Gv,Ma (i64) | EVEX (Prefix)
+62: BOUND Gv,Ma (i64) | EVEX (Prefix),(o64)
63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
64: SEG=FS (Prefix)
65: SEG=GS (Prefix)
@@ -253,8 +253,8 @@ c0: Grp2 Eb,Ib (1A)
c1: Grp2 Ev,Ib (1A)
c2: RETN Iw (f64)
c3: RETN
-c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
-c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix),(o64)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix),(o64)
c6: Grp11A Eb,Ib (1A)
c7: Grp11B Ev,Iz (1A)
c8: ENTER Iw,Ib
@@ -286,10 +286,10 @@ df: ESC
# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64) (!REX2)
-e1: LOOPE/LOOPZ Jb (f64) (!REX2)
-e2: LOOP Jb (f64) (!REX2)
-e3: JrCXZ Jb (f64) (!REX2)
+e0: LOOPNE/LOOPNZ Jb (f64),(!REX2)
+e1: LOOPE/LOOPZ Jb (f64),(!REX2)
+e2: LOOP Jb (f64),(!REX2)
+e3: JrCXZ Jb (f64),(!REX2)
e4: IN AL,Ib (!REX2)
e5: IN eAX,Ib (!REX2)
e6: OUT Ib,AL (!REX2)
@@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2)
# in "near" jumps and calls is 16-bit. For CALL,
# push of return address is 16-bit wide, RSP is decremented by 2
# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64) (!REX2)
-e9: JMP-near Jz (f64) (!REX2)
-ea: JMP-far Ap (i64) (!REX2)
-eb: JMP-short Jb (f64) (!REX2)
+e8: CALL Jz (f64),(!REX2)
+e9: JMP-near Jz (f64),(!REX2)
+ea: JMP-far Ap (i64),(!REX2)
+eb: JMP-short Jb (f64),(!REX2)
ec: IN AL,DX (!REX2)
ed: IN eAX,DX (!REX2)
ee: OUT DX,AL (!REX2)
@@ -478,22 +478,22 @@ AVXcode: 1
7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
# 0x0f 0x80-0x8f
# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64) (!REX2)
-81: JNO Jz (f64) (!REX2)
-82: JB/JC/JNAE Jz (f64) (!REX2)
-83: JAE/JNB/JNC Jz (f64) (!REX2)
-84: JE/JZ Jz (f64) (!REX2)
-85: JNE/JNZ Jz (f64) (!REX2)
-86: JBE/JNA Jz (f64) (!REX2)
-87: JA/JNBE Jz (f64) (!REX2)
-88: JS Jz (f64) (!REX2)
-89: JNS Jz (f64) (!REX2)
-8a: JP/JPE Jz (f64) (!REX2)
-8b: JNP/JPO Jz (f64) (!REX2)
-8c: JL/JNGE Jz (f64) (!REX2)
-8d: JNL/JGE Jz (f64) (!REX2)
-8e: JLE/JNG Jz (f64) (!REX2)
-8f: JNLE/JG Jz (f64) (!REX2)
+80: JO Jz (f64),(!REX2)
+81: JNO Jz (f64),(!REX2)
+82: JB/JC/JNAE Jz (f64),(!REX2)
+83: JAE/JNB/JNC Jz (f64),(!REX2)
+84: JE/JZ Jz (f64),(!REX2)
+85: JNE/JNZ Jz (f64),(!REX2)
+86: JBE/JNA Jz (f64),(!REX2)
+87: JA/JNBE Jz (f64),(!REX2)
+88: JS Jz (f64),(!REX2)
+89: JNS Jz (f64),(!REX2)
+8a: JP/JPE Jz (f64),(!REX2)
+8b: JNP/JPO Jz (f64),(!REX2)
+8c: JL/JNGE Jz (f64),(!REX2)
+8d: JNL/JGE Jz (f64),(!REX2)
+8e: JLE/JNG Jz (f64),(!REX2)
+8f: JNLE/JG Jz (f64),(!REX2)
# 0x0f 0x90-0x9f
90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 32035d5be5a0..3faa60f13a61 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -3,12 +3,10 @@
KCOV_INSTRUMENT_tlb.o := n
KCOV_INSTRUMENT_mem_encrypt.o := n
KCOV_INSTRUMENT_mem_encrypt_amd.o := n
-KCOV_INSTRUMENT_mem_encrypt_identity.o := n
KCOV_INSTRUMENT_pgprot.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_amd.o := n
-KASAN_SANITIZE_mem_encrypt_identity.o := n
KASAN_SANITIZE_pgprot.o := n
# Disable KCSAN entirely, because otherwise we get warnings that some functions
@@ -16,12 +14,10 @@ KASAN_SANITIZE_pgprot.o := n
KCSAN_SANITIZE := n
# Avoid recursion by not calling KMSAN hooks for CEA code.
KMSAN_SANITIZE_cpu_entry_area.o := n
-KMSAN_SANITIZE_mem_encrypt_identity.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_amd.o = -pg
-CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
CFLAGS_REMOVE_pgprot.o = -pg
endif
@@ -32,7 +28,6 @@ obj-y += pat/
# Make sure __phys_addr has no stackprotector
CFLAGS_physaddr.o := -fno-stack-protector
-CFLAGS_mem_encrypt_identity.o := -fno-stack-protector
CFLAGS_fault.o := -I $(src)/../include/asm/trace
@@ -63,5 +58,4 @@ obj-$(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION) += pti.o
obj-$(CONFIG_X86_MEM_ENCRYPT) += mem_encrypt.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o
-obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index bfa444a7dbb0..aa56d9ac0b8f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -28,6 +28,7 @@
#include <asm/text-patching.h>
#include <asm/memtype.h>
#include <asm/paravirt.h>
+#include <asm/mmu_context.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -824,31 +825,33 @@ void __init poking_init(void)
spinlock_t *ptl;
pte_t *ptep;
- poking_mm = mm_alloc();
- BUG_ON(!poking_mm);
+ text_poke_mm = mm_alloc();
+ BUG_ON(!text_poke_mm);
/* Xen PV guests need the PGD to be pinned. */
- paravirt_enter_mmap(poking_mm);
+ paravirt_enter_mmap(text_poke_mm);
+
+ set_notrack_mm(text_poke_mm);
/*
* Randomize the poking address, but make sure that the following page
* will be mapped at the same PMD. We need 2 pages, so find space for 3,
* and adjust the address if the PMD ends after the first one.
*/
- poking_addr = TASK_UNMAPPED_BASE;
+ text_poke_mm_addr = TASK_UNMAPPED_BASE;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
+ text_poke_mm_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
(TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
- if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
- poking_addr += PAGE_SIZE;
+ if (((text_poke_mm_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
+ text_poke_mm_addr += PAGE_SIZE;
/*
* We need to trigger the allocation of the page-tables that will be
* needed for poking now. Later, poking may be performed in an atomic
* section, which might cause allocation to fail.
*/
- ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+ ptep = get_locked_pte(text_poke_mm, text_poke_mm_addr, &ptl);
BUG_ON(!ptep);
pte_unmap_unlock(ptep, ptl);
}
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 7490ff6d83b1..faf3a13fb6ba 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -40,7 +40,9 @@
* section is later cleared.
*/
u64 sme_me_mask __section(".data") = 0;
+SYM_PIC_ALIAS(sme_me_mask);
u64 sev_status __section(".data") = 0;
+SYM_PIC_ALIAS(sev_status);
u64 sev_check_data __section(".data") = 0;
EXPORT_SYMBOL(sme_me_mask);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index f7ae44d3dd9e..e62af72923e8 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -10,6 +10,7 @@
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
EXPORT_SYMBOL(physical_mask);
+SYM_PIC_ALIAS(physical_mask);
#endif
pgtable_t pte_alloc_one(struct mm_struct *mm)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index eb83348f9305..2ea8cec6cd49 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -847,7 +847,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* mm_cpumask. The TLB shootdown code can figure out from
* cpu_tlbstate_shared.is_lazy whether or not to send an IPI.
*/
- if (IS_ENABLED(CONFIG_DEBUG_VM) && WARN_ON_ONCE(prev != &init_mm &&
+ if (IS_ENABLED(CONFIG_DEBUG_VM) &&
+ WARN_ON_ONCE(prev != &init_mm && !is_notrack_mm(prev) &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
@@ -899,20 +900,13 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
cond_mitigation(tsk);
/*
- * Let nmi_uaccess_okay() and finish_asid_transition()
- * know that CR3 is changing.
+ * Indicate that CR3 is about to change. nmi_uaccess_okay()
+ * and others are sensitive to the window where mm_cpumask(),
+ * CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
- /*
- * Leave this CPU in prev's mm_cpumask. Atomic writes to
- * mm_cpumask can be expensive under contention. The CPU
- * will be removed lazily at TLB flush time.
- */
- VM_WARN_ON_ONCE(prev != &init_mm && !cpumask_test_cpu(cpu,
- mm_cpumask(prev)));
-
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpumask_set_cpu(cpu, mm_cpumask(next));
@@ -972,6 +966,77 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
}
/*
+ * Using a temporary mm allows to set temporary mappings that are not accessible
+ * by other CPUs. Such mappings are needed to perform sensitive memory writes
+ * that override the kernel memory protections (e.g., W^X), without exposing the
+ * temporary page-table mappings that are required for these write operations to
+ * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
+ * mapping is torn down. Temporary mms can also be used for EFI runtime service
+ * calls or similar functionality.
+ *
+ * It is illegal to schedule while using a temporary mm -- the context switch
+ * code is unaware of the temporary mm and does not know how to context switch.
+ * Use a real (non-temporary) mm in a kernel thread if you need to sleep.
+ *
+ * Note: For sensitive memory writes, the temporary mm needs to be used
+ * exclusively by a single core, and IRQs should be disabled while the
+ * temporary mm is loaded, thereby preventing interrupt handler bugs from
+ * overriding the kernel memory protection.
+ */
+struct mm_struct *use_temporary_mm(struct mm_struct *temp_mm)
+{
+ struct mm_struct *prev_mm;
+
+ lockdep_assert_preemption_disabled();
+ guard(irqsave)();
+
+ /*
+ * Make sure not to be in TLB lazy mode, as otherwise we'll end up
+ * with a stale address space WITHOUT being in lazy mode after
+ * restoring the previous mm.
+ */
+ if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
+ leave_mm();
+
+ prev_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ switch_mm_irqs_off(NULL, temp_mm, current);
+
+ /*
+ * If breakpoints are enabled, disable them while the temporary mm is
+ * used. Userspace might set up watchpoints on addresses that are used
+ * in the temporary mm, which would lead to wrong signals being sent or
+ * crashes.
+ *
+ * Note that breakpoints are not disabled selectively, which also causes
+ * kernel breakpoints (e.g., perf's) to be disabled. This might be
+ * undesirable, but still seems reasonable as the code that runs in the
+ * temporary mm should be short.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_disable();
+
+ return prev_mm;
+}
+
+void unuse_temporary_mm(struct mm_struct *prev_mm)
+{
+ lockdep_assert_preemption_disabled();
+ guard(irqsave)();
+
+ /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
+ cpumask_clear_cpu(smp_processor_id(), mm_cpumask(this_cpu_read(cpu_tlbstate.loaded_mm)));
+
+ switch_mm_irqs_off(NULL, prev_mm, current);
+
+ /*
+ * Restore the breakpoints if they were disabled before the temporary mm
+ * was loaded.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_restore();
+}
+
+/*
* Call this when reinitializing a CPU. It fixes the following potential
* problems:
*
@@ -1204,8 +1269,16 @@ done:
static bool should_flush_tlb(int cpu, void *data)
{
+ struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
struct flush_tlb_info *info = data;
+ /*
+ * Order the 'loaded_mm' and 'is_lazy' against their
+ * write ordering in switch_mm_irqs_off(). Ensure
+ * 'is_lazy' is at least as new as 'loaded_mm'.
+ */
+ smp_rmb();
+
/* Lazy TLB will get flushed at the next context switch. */
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
return false;
@@ -1214,8 +1287,15 @@ static bool should_flush_tlb(int cpu, void *data)
if (!info->mm)
return true;
+ /*
+ * While switching, the remote CPU could have state from
+ * either the prev or next mm. Assume the worst and flush.
+ */
+ if (loaded_mm == LOADED_MM_SWITCHING)
+ return true;
+
/* The target mm is loaded, and the CPU is not lazy. */
- if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
+ if (loaded_mm == info->mm)
return true;
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 9e5fe2ba858f..e2b9991c3326 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -629,7 +629,7 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
goto out;
ret = 1;
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
- text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
+ smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL);
ret = 0;
}
out:
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index a4b4ebd41b8f..e7e8f77f77f8 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -89,6 +89,7 @@ int __init efi_alloc_page_tables(void)
efi_mm.pgd = efi_pgd;
mm_init_cpumask(&efi_mm);
init_new_context(NULL, &efi_mm);
+ set_notrack_mm(&efi_mm);
return 0;
@@ -434,15 +435,12 @@ void __init efi_dump_pagetable(void)
*/
static void efi_enter_mm(void)
{
- efi_prev_mm = current->active_mm;
- current->active_mm = &efi_mm;
- switch_mm(efi_prev_mm, &efi_mm, NULL);
+ efi_prev_mm = use_temporary_mm(&efi_mm);
}
static void efi_leave_mm(void)
{
- current->active_mm = efi_prev_mm;
- switch_mm(&efi_mm, efi_prev_mm, NULL);
+ unuse_temporary_mm(efi_prev_mm);
}
void arch_efi_call_virt_setup(void)
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index cfa18ec7d55f..1d78e5631bb8 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -87,8 +87,7 @@ SYM_CODE_START(pvh_start_xen)
mov %ebx, %esi
movl rva(pvh_start_info_sz)(%ebp), %ecx
shr $2,%ecx
- rep
- movsl
+ rep movsl
leal rva(early_stack_end)(%ebp), %esp
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 5606a15cf9a1..fb910d9f8471 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -69,8 +69,7 @@ copy_loop:
movl pbe_orig_address(%edx), %edi
movl $(PAGE_SIZE >> 2), %ecx
- rep
- movsl
+ rep movsl
movl pbe_next(%edx), %edx
jmp copy_loop
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 66f066b8feda..c73be0a02a6c 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -138,8 +138,7 @@ SYM_FUNC_START(core_restore_code)
movq pbe_address(%rdx), %rsi
movq pbe_orig_address(%rdx), %rdi
movq $(PAGE_SIZE >> 3), %rcx
- rep
- movsq
+ rep movsq
/* progress to the next pbe */
movq pbe_next(%rdx), %rdx
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 5770c8097f32..2c19d7fc8a85 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -64,6 +64,8 @@ BEGIN {
modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
+ invalid64_expr = "\\(i64\\)"
+ only64_expr = "\\(o64\\)"
rex_expr = "^((REX(\\.[XRWB]+)+)|(REX$))"
rex2_expr = "\\(REX2\\)"
no_rex2_expr = "\\(!REX2\\)"
@@ -319,6 +321,11 @@ function convert_operands(count,opnd, i,j,imm,mod)
if (match(ext, force64_expr))
flags = add_flags(flags, "INAT_FORCE64")
+ # check invalid in 64-bit (and no only64)
+ if (match(ext, invalid64_expr) &&
+ !match($0, only64_expr))
+ flags = add_flags(flags, "INAT_INV64")
+
# check REX2 not allowed
if (match(ext, no_rex2_expr))
flags = add_flags(flags, "INAT_NO_REX2")
diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h
index ab5c8e47049c..9193a7790a71 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_32.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_32.h
@@ -31,8 +31,8 @@ struct faultinfo {
#define ___backtrack_faulted(_faulted) \
asm volatile ( \
- "mov $0, %0\n" \
"movl $__get_kernel_nofault_faulted_%=,%1\n" \
+ "mov $0, %0\n" \
"jmp _end_%=\n" \
"__get_kernel_nofault_faulted_%=:\n" \
"mov $1, %0;" \
diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h
index 26fb4835d3e9..61e4ca1e0ab5 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_64.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_64.h
@@ -31,8 +31,8 @@ struct faultinfo {
#define ___backtrack_faulted(_faulted) \
asm volatile ( \
- "mov $0, %0\n" \
"movq $__get_kernel_nofault_faulted_%=,%1\n" \
+ "mov $0, %0\n" \
"jmp _end_%=\n" \
"__get_kernel_nofault_faulted_%=:\n" \
"mov $1, %0;" \
diff --git a/block/blk.h b/block/blk.h
index 328075787814..594eeba7b949 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -480,7 +480,8 @@ static inline void blk_zone_update_request_bio(struct request *rq,
* the original BIO sector so that blk_zone_write_plug_bio_endio() can
* lookup the zone write plug.
*/
- if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
+ if (req_op(rq) == REQ_OP_ZONE_APPEND ||
+ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
bio->bi_iter.bi_sector = rq->__sector;
}
void blk_zone_write_plug_bio_endio(struct bio *bio);
diff --git a/block/ioprio.c b/block/ioprio.c
index 73301a261429..f0ee2798539c 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -46,12 +46,8 @@ int ioprio_check_cap(int ioprio)
*/
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
return -EPERM;
- fallthrough;
- /* rt has prio field too */
- case IOPRIO_CLASS_BE:
- if (level >= IOPRIO_NR_LEVELS)
- return -EINVAL;
break;
+ case IOPRIO_CLASS_BE:
case IOPRIO_CLASS_IDLE:
break;
case IOPRIO_CLASS_NONE:
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 36934c78d127..ffeedcf20b0f 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -163,11 +163,10 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
if (ret)
goto unlock;
}
- if (!scomp_scratch_users) {
+ if (!scomp_scratch_users++) {
ret = crypto_scomp_alloc_scratches();
if (ret)
- goto unlock;
- scomp_scratch_users++;
+ scomp_scratch_users--;
}
unlock:
mutex_unlock(&scomp_lock);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 5e1d709c6a46..ccaaf6c100c0 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -544,7 +544,7 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_entry_vpu_ts);
ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
boot_params->system_time_us);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n",
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
boot_params->power_profile);
}
@@ -646,7 +646,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
if (IVPU_WA(disable_d0i2))
- boot_params->power_profile = 1;
+ boot_params->power_profile |= BIT(1);
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index ec9a3629da3a..633160470c93 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev)
else
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 5;
- vdev->timeout.state_dump_msg = 10;
+ vdev->timeout.state_dump_msg = 100;
}
}
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index 300f749971d4..d2d82651976d 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -14,7 +14,7 @@
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
-#define DCT_DEFAULT_ACTIVE_PERCENT 15u
+#define DCT_DEFAULT_ACTIVE_PERCENT 30u
#define DCT_PERIOD_US 35300u
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 863e3cd6ace5..b28da35c30b6 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
err_erase_xa:
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
err_unlock:
- mutex_unlock(&vdev->submitted_jobs_lock);
mutex_unlock(&file_priv->lock);
+ mutex_unlock(&vdev->submitted_jobs_lock);
ivpu_rpm_put(vdev);
return ret;
}
@@ -874,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_create *args = data;
struct ivpu_cmdq *cmdq;
+ int ret;
- if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
return -ENODEV;
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
@@ -891,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
mutex_unlock(&file_priv->lock);
+ ivpu_rpm_put(vdev);
+
return cmdq ? 0 : -ENOMEM;
}
@@ -900,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_destroy *args = data;
struct ivpu_cmdq *cmdq;
- u32 cmdq_id;
+ u32 cmdq_id = 0;
int ret;
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
return -ENODEV;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->lock);
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
if (!cmdq || cmdq->is_legacy) {
ret = -ENOENT;
- goto err_unlock;
+ } else {
+ cmdq_id = cmdq->id;
+ ivpu_cmdq_destroy(file_priv, cmdq);
+ ret = 0;
}
- cmdq_id = cmdq->id;
- ivpu_cmdq_destroy(file_priv, cmdq);
mutex_unlock(&file_priv->lock);
- ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
- return 0;
-err_unlock:
- mutex_unlock(&file_priv->lock);
+ /* Abort any pending jobs only if cmdq was destroyed */
+ if (!ret)
+ ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
+
+ ivpu_rpm_put(vdev);
+
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index b5891e91f7ab..ac0e22454596 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -428,16 +428,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
active_us = (DCT_PERIOD_US * active_percent) / 100;
inactive_us = DCT_PERIOD_US - active_us;
+ vdev->pm->dct_active_percent = active_percent;
+
+ ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
+ active_percent, active_us, inactive_us);
+
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret);
return ret;
}
- vdev->pm->dct_active_percent = active_percent;
-
- ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
- active_percent, active_us, inactive_us);
return 0;
}
@@ -445,15 +446,16 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
{
int ret;
+ vdev->pm->dct_active_percent = 0;
+
+ ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
+
ret = ivpu_jsm_dct_disable(vdev);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret);
return ret;
}
- vdev->pm->dct_active_percent = 0;
-
- ivpu_dbg(vdev, PM, "DCT disabled\n");
return 0;
}
@@ -466,7 +468,7 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
return;
- if (vdev->pm->dct_active_percent)
+ if (enable)
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
else
ret = ivpu_pm_dct_disable(vdev);
diff --git a/drivers/base/module.c b/drivers/base/module.c
index 5bc71bea883a..218aaa096455 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
if (mod)
mk = &mod->mkobj;
else if (drv->mod_name) {
- struct kobject *mkobj;
-
- /* Lookup built-in module entry in /sys/modules */
- mkobj = kset_find_obj(module_kset, drv->mod_name);
- if (mkobj) {
- mk = container_of(mkobj, struct module_kobject, kobj);
+ /* Lookup or create built-in module entry in /sys/modules */
+ mk = lookup_or_create_module_kobject(drv->mod_name);
+ if (mk) {
/* remember our module structure */
drv->p->mkobj = mk;
- /* kset_find_obj took a reference */
- kobject_put(mkobj);
+ /* lookup_or_create_module_kobject took a reference */
+ kobject_put(&mk->kobj);
}
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1813cfd0c4bd..cfccf3ff36e7 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1440,7 +1440,7 @@ static void platform_shutdown(struct device *_dev)
static int platform_dma_configure(struct device *dev)
{
- struct platform_driver *drv = to_platform_driver(dev->driver);
+ struct device_driver *drv = READ_ONCE(dev->driver);
struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr;
int ret = 0;
@@ -1451,8 +1451,8 @@ static int platform_dma_configure(struct device *dev)
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
- /* @drv may not be valid when we're called from the IOMMU layer */
- if (ret || !dev->driver || drv->driver_managed_dma)
+ /* @dev->driver may not be valid when we're called from the IOMMU layer */
+ if (ret || !drv || to_platform_driver(drv)->driver_managed_dma)
return ret;
ret = iommu_device_use_default_domain(dev);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 46cba261075f..b8ba7de08753 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -505,6 +505,17 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
lo->lo_min_dio_size = loop_query_min_dio_size(lo);
}
+static int loop_check_backing_file(struct file *file)
+{
+ if (!file->f_op->read_iter)
+ return -EINVAL;
+
+ if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -526,6 +537,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
return -EBADF;
+ error = loop_check_backing_file(file);
+ if (error)
+ return error;
+
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -963,6 +978,14 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (!file)
return -EBADF;
+
+ if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
+ return -EINVAL;
+
+ error = loop_check_backing_file(file);
+ if (error)
+ return error;
+
is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 40f971a66d3e..f9032076bc06 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -201,15 +201,10 @@ struct ublk_params_header {
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- struct ublk_queue *ubq, int tag, size_t offset);
+ const struct ublk_queue *ubq, int tag, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
int tag);
-static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
-{
- return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
-}
-
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
{
return ub->dev_info.flags & UBLK_F_ZONED;
@@ -609,14 +604,19 @@ static void ublk_apply_params(struct ublk_device *ub)
ublk_dev_param_zoned_apply(ub);
}
+static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
- return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
+ return ubq->flags & UBLK_F_USER_COPY;
}
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
- return !ublk_support_user_copy(ubq);
+ return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq);
}
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
@@ -624,8 +624,11 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
/*
* read()/write() is involved in user copy, so request reference
* has to be grabbed
+ *
+ * for zero copy, request buffer need to be registered to io_uring
+ * buffer table, so reference is needed
*/
- return ublk_support_user_copy(ubq);
+ return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq);
}
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
@@ -1946,13 +1949,20 @@ static void ublk_io_release(void *priv)
}
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
- struct ublk_queue *ubq, unsigned int tag,
+ const struct ublk_queue *ubq, unsigned int tag,
unsigned int index, unsigned int issue_flags)
{
struct ublk_device *ub = cmd->file->private_data;
+ const struct ublk_io *io = &ubq->ios[tag];
struct request *req;
int ret;
+ if (!ublk_support_zero_copy(ubq))
+ return -EINVAL;
+
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ return -EINVAL;
+
req = __ublk_check_and_get_req(ub, ubq, tag, 0);
if (!req)
return -EINVAL;
@@ -1968,8 +1978,17 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
}
static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
+ const struct ublk_queue *ubq, unsigned int tag,
unsigned int index, unsigned int issue_flags)
{
+ const struct ublk_io *io = &ubq->ios[tag];
+
+ if (!ublk_support_zero_copy(ubq))
+ return -EINVAL;
+
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ return -EINVAL;
+
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
@@ -2073,7 +2092,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_IO_REGISTER_IO_BUF:
return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
case UBLK_IO_UNREGISTER_IO_BUF:
- return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
+ return ublk_unregister_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
case UBLK_IO_FETCH_REQ:
ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
if (ret)
@@ -2125,13 +2144,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- struct ublk_queue *ubq, int tag, size_t offset)
+ const struct ublk_queue *ubq, int tag, size_t offset)
{
struct request *req;
- if (!ublk_need_req_ref(ubq))
- return NULL;
-
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
if (!req)
return NULL;
@@ -2245,6 +2261,9 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
if (!ubq)
return ERR_PTR(-EINVAL);
+ if (!ublk_support_user_copy(ubq))
+ return ERR_PTR(-EACCES);
+
if (tag >= ubq->q_depth)
return ERR_PTR(-EINVAL);
@@ -2783,13 +2802,18 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
UBLK_F_URING_CMD_COMP_IN_TASK;
- /* GET_DATA isn't needed any more with USER_COPY */
- if (ublk_dev_is_user_copy(ub))
+ /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
+ if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
- /* Zoned storage support requires user copy feature */
+ /*
+ * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
+ * returning write_append_lba, which is only allowed in case of
+ * user copy or zero copy
+ */
if (ublk_dev_is_zoned(ub) &&
- (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
+ (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags &
+ (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) {
ret = -EINVAL;
goto out_free_dev_number;
}
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index c1e69fcc9c4f..0a759ea26fd3 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -957,8 +957,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
/* This is a debug event that comes from IML and OP image when it
* starts execution. There is no need pass this event to stack.
*/
- if (skb->data[2] == 0x97)
+ if (skb->data[2] == 0x97) {
+ hci_recv_diag(hdev, skb);
return 0;
+ }
}
return hci_recv_frame(hdev, skb);
@@ -974,7 +976,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
u8 pkt_type;
u16 plen;
u32 pcie_pkt_type;
- struct sk_buff *new_skb;
void *pdata;
struct hci_dev *hdev = data->hdev;
@@ -1051,24 +1052,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
- new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
- if (!new_skb) {
- bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
- skb->len);
- ret = -ENOMEM;
- goto exit_error;
- }
-
- hci_skb_pkt_type(new_skb) = pkt_type;
- skb_put_data(new_skb, skb->data, plen);
+ hci_skb_pkt_type(skb) = pkt_type;
hdev->stat.byte_rx += plen;
+ skb_trim(skb, plen);
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
- ret = btintel_pcie_recv_event(hdev, new_skb);
+ ret = btintel_pcie_recv_event(hdev, skb);
else
- ret = hci_recv_frame(hdev, new_skb);
+ ret = hci_recv_frame(hdev, skb);
+ skb = NULL; /* skb is freed in the callee */
exit_error:
+ if (skb)
+ kfree_skb(skb);
+
if (ret)
hdev->stat.err_rx++;
@@ -1202,8 +1199,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data *data = container_of(work,
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
- int err;
- struct hci_dev *hdev = data->hdev;
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
@@ -1224,11 +1219,7 @@ static void btintel_pcie_rx_work(struct work_struct *work)
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
- err = btintel_pcie_recv_frame(data, skb);
- if (err)
- bt_dev_err(hdev, "Failed to send received frame: %d",
- err);
- kfree_skb(skb);
+ btintel_pcie_recv_frame(data, skb);
}
}
@@ -1281,10 +1272,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
/* Check CR_TIA and CR_HIA for change */
- if (cr_tia == cr_hia) {
- bt_dev_warn(hdev, "RXQ: no new CD found");
+ if (cr_tia == cr_hia)
return;
- }
rxq = &data->rxq;
@@ -1320,6 +1309,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
return IRQ_WAKE_THREAD;
}
+static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+}
+
+static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+}
+
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
@@ -1351,12 +1350,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_gp0_handler(data);
/* For TX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
btintel_pcie_msix_tx_handle(data);
+ if (!btintel_pcie_is_rxq_empty(data))
+ btintel_pcie_msix_rx_handle(data);
+ }
/* For RX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
btintel_pcie_msix_rx_handle(data);
+ if (!btintel_pcie_is_txackq_empty(data))
+ btintel_pcie_msix_tx_handle(data);
+ }
/*
* Before sending the interrupt the HW disables it to prevent a nested
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index edd5eead1e93..1d26207b2ba7 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -723,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ return 0;
+
sdio_claim_host(bdev->func);
/* Disable interrupt */
@@ -1443,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func)
if (!bdev)
return;
+ hdev = bdev->hdev;
+
+ /* Make sure to call btmtksdio_close before removing sdio card */
+ if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ btmtksdio_close(hdev);
+
/* Be consistent the state in btmtksdio_probe */
pm_runtime_get_noresume(bdev->dev);
- hdev = bdev->hdev;
-
sdio_set_drvdata(func, NULL);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 5012b5ff92c8..a42dedb78e0a 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -3010,22 +3010,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
-/*
- * ==0: not a dump pkt.
- * < 0: fails to handle a dump pkt
- * > 0: otherwise.
- */
+/* Return: 0 on success, negative errno on failure. */
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- int ret = 1;
+ int ret = 0;
u8 pkt_type;
u8 *sk_ptr;
unsigned int sk_len;
u16 seqno;
u32 dump_size;
- struct hci_event_hdr *event_hdr;
- struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
@@ -3035,30 +3029,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
sk_len = skb->len;
if (pkt_type == HCI_ACLDATA_PKT) {
- acl_hdr = hci_acl_hdr(skb);
- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
- return 0;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
- event_hdr = (struct hci_event_hdr *)sk_ptr;
- } else {
- event_hdr = hci_event_hdr(skb);
}
- if ((event_hdr->evt != HCI_VENDOR_PKT)
- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
- return 0;
-
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
- if ((sk_len < offsetof(struct qca_dump_hdr, data))
- || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
- || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
- return 0;
-
- /*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -3132,17 +3110,84 @@ out:
return ret;
}
+/* Return: true if the ACL packet is a dump packet, false otherwise. */
+static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ u8 *sk_ptr;
+ unsigned int sk_len;
+
+ struct hci_event_hdr *event_hdr;
+ struct hci_acl_hdr *acl_hdr;
+ struct qca_dump_hdr *dump_hdr;
+
+ sk_ptr = skb->data;
+ sk_len = skb->len;
+
+ acl_hdr = hci_acl_hdr(skb);
+ if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
+ return false;
+
+ sk_ptr += HCI_ACL_HDR_SIZE;
+ sk_len -= HCI_ACL_HDR_SIZE;
+ event_hdr = (struct hci_event_hdr *)sk_ptr;
+
+ if ((event_hdr->evt != HCI_VENDOR_PKT) ||
+ (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
+ return false;
+
+ sk_ptr += HCI_EVENT_HDR_SIZE;
+ sk_len -= HCI_EVENT_HDR_SIZE;
+
+ dump_hdr = (struct qca_dump_hdr *)sk_ptr;
+ if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
+ (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ return false;
+
+ return true;
+}
+
+/* Return: true if the event packet is a dump packet, false otherwise. */
+static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ u8 *sk_ptr;
+ unsigned int sk_len;
+
+ struct hci_event_hdr *event_hdr;
+ struct qca_dump_hdr *dump_hdr;
+
+ sk_ptr = skb->data;
+ sk_len = skb->len;
+
+ event_hdr = hci_event_hdr(skb);
+
+ if ((event_hdr->evt != HCI_VENDOR_PKT)
+ || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
+ return false;
+
+ sk_ptr += HCI_EVENT_HDR_SIZE;
+ sk_len -= HCI_EVENT_HDR_SIZE;
+
+ dump_hdr = (struct qca_dump_hdr *)sk_ptr;
+ if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
+ (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ return false;
+
+ return true;
+}
+
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (acl_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (evt_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 39f7c2d736d1..b603c25f3dfa 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
#ifdef CONFIG_CLKEVT_I8253
void clockevent_i8253_disable(void)
{
- raw_spin_lock(&i8253_lock);
+ guard(raw_spinlock_irqsave)(&i8253_lock);
/*
* Writing the MODE register should stop the counter, according to
@@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
outb_p(0, PIT_CH0);
outb_p(0x30, PIT_MODE);
-
- raw_spin_unlock(&i8253_lock);
}
static int pit_shutdown(struct clock_event_device *evt)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 924314cdeebc..d26b610e4f24 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -909,8 +909,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
- if (acpi_cpufreq_driver.set_boost)
- policy->boost_supported = true;
+ if (acpi_cpufreq_driver.set_boost) {
+ if (policy->boost_supported) {
+ /*
+ * The firmware may have altered boost state while the
+ * CPU was offline (for example during a suspend-resume
+ * cycle).
+ */
+ if (policy->boost_enabled != boost_state(cpu))
+ set_boost(policy, policy->boost_enabled);
+ } else {
+ policy->boost_supported = true;
+ }
+ }
return result;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index acf19b0042bb..f45ded62b0e0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -536,14 +536,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ unsigned int relation)
{
unsigned int idx;
+ target_freq = clamp_val(target_freq, min, max);
+
if (!policy->freq_table)
return target_freq;
- idx = cpufreq_frequency_table_target(policy, target_freq, relation);
+ idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
@@ -577,8 +581,7 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
if (unlikely(min > max))
min = max;
- return __resolve_freq(policy, clamp_val(target_freq, min, max),
- CPUFREQ_RELATION_LE);
+ return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2397,8 +2400,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
- target_freq = clamp_val(target_freq, policy->min, policy->max);
- target_freq = __resolve_freq(policy, target_freq, relation);
+ target_freq = __resolve_freq(policy, target_freq, policy->min,
+ policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2727,8 +2730,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* compiler optimizations around them because they may be accessed
* concurrently by cpufreq_driver_resolve_freq() during the update.
*/
- WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
- new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
+ new_data.min, new_data.max,
+ CPUFREQ_RELATION_H));
+ new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
+ new_data.max, CPUFREQ_RELATION_L);
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
trace_cpu_frequency_limits(policy);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index a7c38b8b3e78..0e65d37c9231 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
}
- index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
+ policy->max, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c03a91502f84..35de513af6c9 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -115,8 +115,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -147,7 +147,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
- if ((freq < policy->min) || (freq > policy->max))
+ if (freq < min || freq > max)
continue;
if (freq == target_freq) {
optimal.driver_data = i;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f41ed0b9e610..ba9bf06f1c77 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -598,6 +598,9 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
+ if (!cpu_feature_enabled(X86_FEATURE_IDA))
+ return true;
+
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 3e971f902363..dcd7008fe06b 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
if (status & priv->ecc_stat_ce_mask) {
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
&err_addr);
- if (priv->ecc_uecnt_offset)
+ if (priv->ecc_cecnt_offset)
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
&err_count);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
@@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
}
}
- /* Interrupt mode set to every SBERR */
- regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
- ALTR_A10_ECC_INTMODE);
/* Enable ECC */
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
ALTR_A10_ECC_CTRL_OFST));
@@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
return PTR_ERR(edac->ecc_mgr_map);
}
+ /* Set irq mask for DDR SBE to avoid any pending irq before registration */
+ regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
+ (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
+
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 3727e72c8c2e..7248d24c4908 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
+#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
+#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 19295282de24..fe55613a8ea9 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -299,7 +299,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
}
- ffa_rx_release();
+ if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
+ ffa_rx_release();
mutex_unlock(&drv_info->rx_lock);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 7af01664ce7e..3a5474015f7d 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -255,6 +255,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent,
if (!dev)
return NULL;
+ /* Drop the refcnt bumped implicitly by device_find_child */
+ put_device(dev);
+
return to_scmi_dev(dev);
}
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 1c75a4c9c371..0390d5ff195e 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1248,7 +1248,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
}
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
- struct scmi_xfer *xfer, ktime_t stop)
+ struct scmi_xfer *xfer, ktime_t stop,
+ bool *ooo)
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
@@ -1257,7 +1258,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
* in case of out-of-order receptions of delayed responses
*/
return info->desc->ops->poll_done(cinfo, xfer) ||
- try_wait_for_completion(&xfer->done) ||
+ (*ooo = try_wait_for_completion(&xfer->done)) ||
ktime_after(ktime_get(), stop);
}
@@ -1274,15 +1275,17 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
* itself to support synchronous commands replies.
*/
if (!desc->sync_cmds_completed_on_ret) {
+ bool ooo = false;
+
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
*/
ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
- spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
- xfer, stop));
- if (ktime_after(ktime_get(), stop)) {
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
+ stop, &ooo));
+ if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
dev_err(dev,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 0a883091259a..e3c2e38b746d 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -6,14 +6,11 @@ config FW_CS_DSP
config FW_CS_DSP_KUNIT_TEST_UTILS
tristate
- depends on KUNIT && REGMAP
- select FW_CS_DSP
config FW_CS_DSP_KUNIT_TEST
tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
- depends on KUNIT && REGMAP
+ depends on KUNIT && REGMAP && FW_CS_DSP
default KUNIT_ALL_TESTS
- select FW_CS_DSP
select FW_CS_DSP_KUNIT_TEST_UTILS
help
This builds KUnit tests for cs_dsp.
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d23a1b9fed75..2f173391b63d 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -85,7 +85,6 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o smbios.o
-lib-$(CONFIG_EFI_MIXED) += x86-mixed.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2cba2b6ebe1c..f01925ed8176 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -188,7 +188,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
select STACKDEPOT
- depends on DRM_KMS_HELPER
+ select DRM_KMS_HELPER
depends on DEBUG_KERNEL
depends on EXPERT
help
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
index c98c647f981d..54cde090c3f4 100644
--- a/drivers/gpu/drm/adp/adp_drv.c
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -121,7 +121,6 @@ struct adp_drv_private {
dma_addr_t mask_iova;
int be_irq;
int fe_irq;
- spinlock_t irq_lock;
struct drm_pending_vblank_event *event;
};
@@ -288,6 +287,7 @@ static void adp_crtc_atomic_enable(struct drm_crtc *crtc,
writel(BIT(0), adp->be + ADBE_BLEND_EN3);
writel(BIT(0), adp->be + ADBE_BLEND_BYPASS);
writel(BIT(0), adp->be + ADBE_BLEND_EN4);
+ drm_crtc_vblank_on(crtc);
}
static void adp_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -310,6 +310,7 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
u32 frame_num = 1;
+ unsigned long flags;
struct adp_drv_private *adp = crtc_to_adp(crtc);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc);
u64 new_size = ALIGN(new_state->mode.hdisplay *
@@ -330,13 +331,19 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
}
writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO);
//FIXME: use adbe flush interrupt
- spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
- drm_crtc_vblank_get(crtc);
- adp->event = crtc->state->event;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ crtc->state->event = NULL;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, event);
+ else
+ adp->event = event;
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
}
static const struct drm_crtc_funcs adp_crtc_funcs = {
@@ -482,8 +489,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg)
u32 int_status;
u32 int_ctl;
- spin_lock(&adp->irq_lock);
-
int_status = readl(adp->fe + ADP_INT_STATUS);
if (int_status & ADP_INT_STATUS_VBLANK) {
drm_crtc_handle_vblank(&adp->crtc);
@@ -501,7 +506,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg)
writel(int_status, adp->fe + ADP_INT_STATUS);
- spin_unlock(&adp->irq_lock);
return IRQ_HANDLED;
}
@@ -512,8 +516,7 @@ static int adp_drm_bind(struct device *dev)
struct adp_drv_private *adp = to_adp(drm);
int err;
- adp_disable_vblank(adp);
- writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+ writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL);
adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0);
if (IS_ERR(adp->next_bridge)) {
@@ -567,8 +570,6 @@ static int adp_probe(struct platform_device *pdev)
if (IS_ERR(adp))
return PTR_ERR(adp);
- spin_lock_init(&adp->irq_lock);
-
dev_set_drvdata(&pdev->dev, &adp->drm);
err = adp_parse_of(pdev, adp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ef6e78224fdf..c3641331d4de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1614,11 +1614,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
-static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index b7f8f2ff143d..707e131f89d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
-/**
- * amdgpu_choose_low_power_state
- *
- * @adev: amdgpu_device_pointer
- *
- * Choose the target low power state for the GPU
- */
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
-{
- if (adev->in_runpm)
- return;
-
- if (amdgpu_acpi_is_s0ix_active(adev))
- adev->in_s0ix = true;
- else if (amdgpu_acpi_is_s3_active(adev))
- adev->in_s3 = true;
-}
-
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7f354cd532dc..f8b3e04d71ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4907,28 +4907,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* @data: data
*
* This function is called when the system is about to suspend or hibernate.
- * It is used to evict resources from the device before the system goes to
- * sleep while there is still access to swap.
+ * It is used to set the appropriate flags so that eviction can be optimized
+ * in the pm prepare callback.
*/
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
void *data)
{
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
- int r;
switch (mode) {
case PM_HIBERNATION_PREPARE:
adev->in_s4 = true;
- fallthrough;
- case PM_SUSPEND_PREPARE:
- r = amdgpu_device_evict_resources(adev);
- /*
- * This is considered non-fatal at this time because
- * amdgpu_device_prepare() will also fatally evict resources.
- * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
- */
- if (r)
- drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
+ break;
+ case PM_POST_HIBERNATION:
+ adev->in_s4 = false;
break;
}
@@ -4949,15 +4941,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
- amdgpu_choose_low_power_state(adev);
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- goto unprepare;
+ return r;
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
@@ -4968,15 +4958,10 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
- goto unprepare;
+ return r;
}
return 0;
-
-unprepare:
- adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
-
- return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e6913fcf2c7b..44e120f9f764 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -199,6 +199,11 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
+ /* XGMI-accessible memory should never be DMA-mapped */
+ if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible(
+ dma_buf_attach_adev(attach), bo)))
+ return ERR_PTR(-EINVAL);
+
r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
bo->tbo.base.size, attach->dev,
dir, &sgt);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 24ee4710f807..72c807f5822e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2615,13 +2615,8 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
-
- r = amdgpu_device_resume(drm_dev, true);
- adev->in_s4 = false;
- return r;
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2634,9 +2629,6 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
-
- adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index cdcdae7f71ce..83adf81defc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -66,7 +66,6 @@
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
-#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index f1dc13b3ab38..cbbeadeb53f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -41,7 +41,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index 43195c079748..086a647308df 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -32,7 +32,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index fcb8dd2876bc..40940b4ab400 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -33,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
- RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ if (amdgpu_sriov_vf(adev)) {
+ /* this is fine because SR_IOV doesn't remap the register */
+ RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ }
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index a88d25a06c29..6ccd31c8bc69 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -35,7 +35,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 49f7eb4fbd11..2c9239a22f39 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -32,7 +32,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 2ece3ae75ec1..bed5ef4d8788 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -360,7 +360,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
+#define MMIO_REG_HOLE_OFFSET 0x44000
static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 8e7a36f26e9c..b8d835c9e17e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index d716510b8dd6..3eec1b8feaee 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 22ae1939476f..0b19f0ab4480 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -40,6 +40,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index c6f6392c1c20..1f777c125b00 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_HARVEST_MMSCH 0
@@ -614,7 +615,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 3e176b4b7c69..012f6ea928ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -45,6 +45,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index ba603b2246e2..a1171e6152ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
+#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000)
#define VCN_HARVEST_MMSCH 0
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index d99d05f42f1d..b90da3d3e140 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -533,7 +533,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 581d8629b9d9..e0e84ef7f568 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -503,6 +503,52 @@ static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
}
/**
+ * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode
+ *
+ * @vinst: VCN instance
+ * @new_state: pause state
+ *
+ * Pause dpg mode for VCN block
+ */
+static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
+{
+ struct amdgpu_device *adev = vinst->adev;
+ uint32_t reg_data = 0;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, vinst->inst);
+
+ /* pause/unpause if state is changed */
+ if (vinst->pause_state.fw_based != new_state->fw_based) {
+ DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n",
+ vinst->pause_state.fw_based, new_state->fw_based,
+ new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE");
+ reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ /* pause DPG */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
+
+ /* wait for ACK */
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+ } else {
+ /* unpause DPG, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
+ }
+ vinst->pause_state.fw_based = new_state->fw_based;
+ }
+
+ return 0;
+}
+
+
+/**
* vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
*
* @vinst: VCN instance
@@ -518,6 +564,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
volatile struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
int vcn_inst;
uint32_t tmp;
@@ -582,6 +629,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ /* Pause dpg */
+ vcn_v5_0_1_pause_dpg_mode(vinst, &state);
+
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
@@ -775,9 +825,13 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
vcn_inst = GET_INST(VCN, inst_idx);
+ /* Unpause dpg */
+ vcn_v5_0_1_pause_dpg_mode(vinst, &state);
+
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 536f73131c2d..64df8ca448b3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -673,15 +673,21 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ dc_stream_adjust_vmin_vmax(adev->dm.dc,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -12743,7 +12749,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
- if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
+ if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
@@ -12752,22 +12758,14 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
goto out;
}
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
+ if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
+ /* The reply is stored in the top nibble of the command. */
+ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
- payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
- if (!payload->write && p_notify->aux_reply.length &&
- (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
-
- if (payload->length != p_notify->aux_reply.length) {
- DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
- p_notify->aux_reply.length,
- payload->address, payload->length);
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
- goto out;
- }
-
+ if (!payload->write && p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
- }
/* success */
ret = p_notify->aux_reply.length;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 5198a079b463..8f22ad966543 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -173,6 +173,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int conn_index = aconnector->base.index;
guard(mutex)(&hdcp_w->mutex);
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -220,7 +223,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
unsigned int conn_index = aconnector->base.index;
guard(mutex)(&hdcp_w->mutex);
- hdcp_w->aconnector[conn_index] = aconnector;
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -236,7 +238,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
-
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
process_output(hdcp_w);
}
@@ -254,6 +259,10 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
hdcp_w->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
}
process_output(hdcp_w);
@@ -488,6 +497,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_work = handle;
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
int link_index = aconnector->dc_link->link_index;
+ unsigned int conn_index = aconnector->base.index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -544,7 +554,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
-
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = aconnector;
process_output(hdcp_w);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7ceedf626d23..074b79fd5822 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -51,6 +51,9 @@
#define PEAK_FACTOR_X1000 1006
+/*
+ * This function handles both native AUX and I2C-Over-AUX transactions.
+ */
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -87,15 +90,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
- result = 0;
+ result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
- if (payload.write && result >= 0)
- result = msg->size;
+ /*
+ * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
+ */
+ if (payload.write && result >= 0) {
+ if (result) {
+ /*one byte indicating partially written bytes. Force 0 to retry*/
+ drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
+ result = 0;
+ } else if (!payload.reply[0])
+ /*I2C_ACK|AUX_ACK*/
+ result = msg->size;
+ }
- if (result < 0)
+ if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -114,6 +127,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
+ drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
+ }
+
+ if (payload.reply[0])
+ drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
+ payload.reply[0]);
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 5d16f36ec95c..ed6584535e89 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -234,7 +234,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
if (!result)
return false;
+ DC_FP_START();
result = dml2_build_mode_programming(mode_programming);
+ DC_FP_END();
if (!result)
return false;
@@ -277,7 +279,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
+ DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
+ DC_FP_END();
if (!is_supported)
return false;
@@ -288,16 +292,12 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
{
bool out = false;
- DC_FP_START();
-
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
- DC_FP_END();
-
return out;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 2061d43b92e1..ab6baf269801 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -973,7 +973,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
-static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out)
+static struct scaler_data *get_scaler_data_for_plane(
+ const struct dc_plane_state *in,
+ struct dc_state *context)
{
int i;
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
@@ -994,7 +996,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
}
ASSERT(i < MAX_PIPES);
- memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
+ return &temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
@@ -1057,11 +1059,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
const struct dc_plane_state *in, struct dc_state *context,
const struct soc_bounding_box_st *soc)
{
- struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
- if (!scaler_data)
- return;
-
- get_scaler_data_for_plane(in, context, scaler_data);
+ struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context);
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
@@ -1126,8 +1124,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->DynamicMetadataTransmittedBytes[location] = 0;
out->NumberOfCursors[location] = 1;
-
- kfree(scaler_data);
}
static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 2a59cc61ed8c..944650cb13de 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -2114,8 +2114,6 @@ static bool dcn32_resource_construct(
#define REG_STRUCT dccg_regs
dccg_regs_init();
- DC_FP_START();
-
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn32;
@@ -2501,14 +2499,10 @@ static bool dcn32_resource_construct(
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;
- DC_FP_END();
-
return true;
create_fail:
- DC_FP_END();
-
dcn32_resource_destruct(pool);
return false;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 17fc5dc708f4..60e5ac179c15 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -549,7 +549,7 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
break;
- len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery);
+ len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery);
}
if (recovery)
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index c299cd94d3f7..cf2463090d3a 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -964,6 +964,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
struct drm_file *file = f->private_data;
struct drm_device *dev = file->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
@@ -983,6 +987,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
if (dev->driver->show_fdinfo)
dev->driver->show_fdinfo(&p, file);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(drm_show_fdinfo);
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 38431e8360e7..de424e670995 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1469,9 +1469,9 @@ map_pages:
}
i += 1 << order;
num_dma_mapped = i;
+ range->flags.has_dma_mapping = true;
}
- range->flags.has_dma_mapping = true;
if (zdd) {
range->flags.has_devmem_pages = true;
range->dpagemap = dpagemap;
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 89e05a5bed1d..a4cd476f9b30 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -404,12 +404,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
struct mipi_dbi *dbi = &dbidev->dbi;
- size_t len = width * height * 2;
+ const struct drm_format_info *dst_format;
+ size_t len;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
+ dst_format = drm_format_info(dbidev->pixel_format);
+ len = drm_format_info_min_pitch(dst_format, 0, width) * height;
+
memset(dbidev->tx_buf, 0, len);
mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 02f95108c637..6dc2d31ccb5a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -242,7 +242,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool is_mst = intel_dp->is_mst;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int bpp_x16, slots = -EINVAL;
int dsc_slice_count = 0;
int max_dpt_bpp_x16;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 64e9317f58fb..71ee01d9ef64 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1001,6 +1001,10 @@ void intel_rps_dec_waiters(struct intel_rps *rps)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
+ /* Don't decrement num_waiters for req where increment was skipped */
+ if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
+ return;
+
intel_guc_slpc_dec_waiters(slpc);
} else {
atomic_dec(&rps->num_waiters);
@@ -1029,11 +1033,15 @@ void intel_rps_boost(struct i915_request *rq)
if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
return;
- if (slpc->min_freq_softlimit >= slpc->boost_freq)
- return;
-
/* Return if old value is non zero */
if (!atomic_fetch_inc(&slpc->num_waiters)) {
+ /*
+ * Skip queuing boost work if frequency is already boosted,
+ * but still increment num_waiters.
+ */
+ if (slpc->min_freq_softlimit >= slpc->boost_freq)
+ return;
+
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
queue_work(rps_to_gt(rps)->i915->unordered_wq,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
index 9aae779c4da3..4969d3de2bac 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
@@ -23,6 +23,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp);
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
+bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
#else
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
@@ -34,8 +35,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
return 0;
}
-#endif
+static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
+{
+ return false;
+}
-bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
+#endif
#endif /*__INTEL_PXP_GSCCS_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 7cc84472cece..edddfc036c6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
- if (error)
+ if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
if (nouveau_fence_signal(fence))
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 232b03c1a259..33a37539de57 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
-static const struct drm_display_mode auo_g101evn010_mode = {
- .clock = 68930,
- .hdisplay = 1280,
- .hsync_start = 1280 + 82,
- .hsync_end = 1280 + 82 + 2,
- .htotal = 1280 + 82 + 2 + 84,
- .vdisplay = 800,
- .vsync_start = 800 + 8,
- .vsync_end = 800 + 8 + 2,
- .vtotal = 800 + 8 + 2 + 6,
+static const struct display_timing auo_g101evn010_timing = {
+ .pixelclock = { 64000000, 68930000, 85000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 8, 64, 256 },
+ .hback_porch = { 8, 64, 256 },
+ .hsync_len = { 40, 168, 767 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 8, 100 },
+ .vback_porch = { 4, 8, 100 },
+ .vsync_len = { 8, 16, 223 },
};
static const struct panel_desc auo_g101evn010 = {
- .modes = &auo_g101evn010_mode,
- .num_modes = 1,
+ .timings = &auo_g101evn010_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index fd4215e2f982..925fbc2cda70 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -216,6 +216,9 @@ static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_EXPECT_NULL(test, shmem->sgt);
+ ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
KUNIT_ASSERT_EQ(test, ret, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index 93c007f18855..9e2d72c447ee 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -8,20 +8,6 @@
#include <linux/swap.h>
/*
- * Casting from randomized struct file * to struct ttm_backup * is fine since
- * struct ttm_backup is never defined nor dereferenced.
- */
-static struct file *ttm_backup_to_file(struct ttm_backup *backup)
-{
- return (void *)backup;
-}
-
-static struct ttm_backup *ttm_file_to_backup(struct file *file)
-{
- return (void *)file;
-}
-
-/*
* Need to map shmem indices to handle since a handle value
* of 0 means error, following the swp_entry_t convention.
*/
@@ -40,12 +26,12 @@ static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
* @backup: The struct backup pointer used to obtain the handle
* @handle: The handle obtained from the @backup_page function.
*/
-void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
+void ttm_backup_drop(struct file *backup, pgoff_t handle)
{
loff_t start = ttm_backup_handle_to_shmem_idx(handle);
start <<= PAGE_SHIFT;
- shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
+ shmem_truncate_range(file_inode(backup), start,
start + PAGE_SIZE - 1);
}
@@ -55,16 +41,15 @@ void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
* @backup: The struct backup pointer used to back up the page.
* @dst: The struct page to copy into.
* @handle: The handle returned when the page was backed up.
- * @intr: Try to perform waits interruptable or at least killable.
+ * @intr: Try to perform waits interruptible or at least killable.
*
* Return: 0 on success, Negative error code on failure, notably
* -EINTR if @intr was set to true and a signal is pending.
*/
-int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
pgoff_t handle, bool intr)
{
- struct file *filp = ttm_backup_to_file(backup);
- struct address_space *mapping = filp->f_mapping;
+ struct address_space *mapping = backup->f_mapping;
struct folio *from_folio;
pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
@@ -106,12 +91,11 @@ int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
* the folio size- and usage.
*/
s64
-ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
+ttm_backup_backup_page(struct file *backup, struct page *page,
bool writeback, pgoff_t idx, gfp_t page_gfp,
gfp_t alloc_gfp)
{
- struct file *filp = ttm_backup_to_file(backup);
- struct address_space *mapping = filp->f_mapping;
+ struct address_space *mapping = backup->f_mapping;
unsigned long handle = 0;
struct folio *to_folio;
int ret;
@@ -161,9 +145,9 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
*
* After a call to this function, it's illegal to use the @backup pointer.
*/
-void ttm_backup_fini(struct ttm_backup *backup)
+void ttm_backup_fini(struct file *backup)
{
- fput(ttm_backup_to_file(backup));
+ fput(backup);
}
/**
@@ -194,14 +178,10 @@ EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
*
* Create a backup utilizing shmem objects.
*
- * Return: A pointer to a struct ttm_backup on success,
+ * Return: A pointer to a struct file on success,
* an error pointer on error.
*/
-struct ttm_backup *ttm_backup_shmem_create(loff_t size)
+struct file *ttm_backup_shmem_create(loff_t size)
{
- struct file *filp;
-
- filp = shmem_file_setup("ttm shmem backup", size, 0);
-
- return ttm_file_to_backup(filp);
+ return shmem_file_setup("ttm shmem backup", size, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 95b86003c50d..5bf3c969907c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1093,7 +1093,8 @@ struct ttm_bo_swapout_walk {
struct ttm_lru_walk walk;
/** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
gfp_t gfp_flags;
-
+ /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */
+ /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */
bool hit_low, evict_low;
};
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 83b10706ba89..c2ea865be657 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -506,7 +506,7 @@ static void ttm_pool_allocated_page_commit(struct page *allocated,
* if successful, populate the page-table and dma-address arrays.
*/
static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
- struct ttm_backup *backup,
+ struct file *backup,
const struct ttm_operation_ctx *ctx,
struct ttm_pool_alloc_state *alloc)
@@ -655,7 +655,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = &tt->pages[start_page];
- struct ttm_backup *backup = tt->backup;
+ struct file *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
@@ -963,7 +963,7 @@ void ttm_pool_drop_backed_up(struct ttm_tt *tt)
long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_backup_flags *flags)
{
- struct ttm_backup *backup = tt->backup;
+ struct file *backup = tt->backup;
struct page *page;
unsigned long handle;
gfp_t alloc_gfp;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index df0aa6c4b8b8..698cd4bf5e46 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -544,7 +544,7 @@ EXPORT_SYMBOL(ttm_tt_pages_limit);
*/
int ttm_tt_setup_backup(struct ttm_tt *tt)
{
- struct ttm_backup *backup =
+ struct file *backup =
ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 4a7701a33cf8..eb35482f6fb5 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -744,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NOMINAL;
}
-/* If the current address or return address have changed, then the GPU
- * has probably made progress and we should delay the reset. This
- * could fail if the GPU got in an infinite loop in the CL, but that
- * is pretty unlikely outside of an i-g-t testcase.
- */
+static void
+v3d_sched_skip_reset(struct drm_sched_job *sched_job)
+{
+ struct drm_gpu_scheduler *sched = sched_job->sched;
+
+ spin_lock(&sched->job_list_lock);
+ list_add(&sched_job->list, &sched->pending_list);
+ spin_unlock(&sched->job_list_lock);
+}
+
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
@@ -758,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
+ /* If the current address or return address have changed, then the GPU
+ * has probably made progress and we should delay the reset. This
+ * could fail if the GPU got in an infinite loop in the CL, but that
+ * is pretty unlikely outside of an i-g-t testcase.
+ */
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
+
+ v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -800,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
- /* If we've made progress, skip reset and let the timer get
- * rearmed.
+ /* If we've made progress, skip reset, add the job to the pending
+ * list, and let the timer get rearmed.
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
+
+ v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index ef1e5256c56a..0e502feaca81 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt,
unsigned int fw_ref, i;
u32 reg_val;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+ }
for (i = 0; i < info->num_mocs_regs; i++) {
if (!(i & 1)) {
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index f2bb9168967c..e2bb156c71fb 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -52,6 +52,8 @@ struct xe_eu_stall_data_stream {
struct xe_gt *gt;
struct xe_bo *bo;
+ /* Lock to protect data buffer pointers */
+ struct mutex xecore_buf_lock;
struct per_xecore_buf *xecore_buf;
struct {
bool reported_to_user;
@@ -208,6 +210,9 @@ int xe_eu_stall_init(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
int ret;
+ if (!xe_eu_stall_supported_on_platform(xe))
+ return 0;
+
gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL);
if (!gt->eu_stall) {
ret = -ENOMEM;
@@ -378,7 +383,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
u16 group, instance;
unsigned int xecore;
- mutex_lock(&gt->eu_stall->stream_lock);
+ mutex_lock(&stream->xecore_buf_lock);
for_each_dss_steering(xecore, gt, group, instance) {
xecore_buf = &stream->xecore_buf[xecore];
read_ptr = xecore_buf->read;
@@ -396,7 +401,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
set_bit(xecore, stream->data_drop.mask);
xecore_buf->write = write_ptr;
}
- mutex_unlock(&gt->eu_stall->stream_lock);
+ mutex_unlock(&stream->xecore_buf_lock);
return min_data_present;
}
@@ -511,11 +516,13 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st
unsigned int xecore;
int ret = 0;
+ mutex_lock(&stream->xecore_buf_lock);
if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) {
if (!stream->data_drop.reported_to_user) {
stream->data_drop.reported_to_user = true;
xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n",
XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask);
+ mutex_unlock(&stream->xecore_buf_lock);
return -EIO;
}
stream->data_drop.reported_to_user = false;
@@ -527,6 +534,7 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st
if (ret || count == total_size)
break;
}
+ mutex_unlock(&stream->xecore_buf_lock);
return total_size ?: (ret ?: -EAGAIN);
}
@@ -583,6 +591,7 @@ static void xe_eu_stall_stream_free(struct xe_eu_stall_data_stream *stream)
{
struct xe_gt *gt = stream->gt;
+ mutex_destroy(&stream->xecore_buf_lock);
gt->eu_stall->stream = NULL;
kfree(stream);
}
@@ -718,6 +727,7 @@ static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream,
}
init_waitqueue_head(&stream->poll_wq);
+ mutex_init(&stream->xecore_buf_lock);
INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn);
stream->per_xecore_buf_size = per_xecore_buf_size;
stream->sampling_rate_mult = props->sampling_rate_mult;
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.h b/drivers/gpu/drm/xe/xe_eu_stall.h
index ed9d0f233566..d1c76e503799 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.h
+++ b/drivers/gpu/drm/xe/xe_eu_stall.h
@@ -7,6 +7,7 @@
#define __XE_EU_STALL_H__
#include "xe_gt_types.h"
+#include "xe_sriov.h"
size_t xe_eu_stall_get_per_xecore_buf_size(void);
size_t xe_eu_stall_data_record_size(struct xe_device *xe);
@@ -19,6 +20,6 @@ int xe_eu_stall_stream_open(struct drm_device *dev,
static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe)
{
- return xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20;
+ return !IS_SRIOV_VF(xe) && (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20);
}
#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index fd41113f8572..0bcf97063ff6 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -555,6 +555,28 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
flush_work(&gsc->work);
}
+void xe_gsc_stop_prepare(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int ret;
+
+ if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
+ return;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
+
+ /*
+ * If the GSC FW load or the proxy init are interrupted, the only way
+ * to recover it is to do an FLR and reload the GSC from scratch.
+ * Therefore, let's wait for the init to complete before stopping
+ * operations. The proxy init is the last step, so we can just wait on
+ * that
+ */
+ ret = xe_gsc_wait_for_proxy_init_done(gsc);
+ if (ret)
+ xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
+}
+
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
* GSC engine reset by writing a notification bit in the GS1 register and then
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index d99f66c38075..b8b8e0810ad9 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -16,6 +16,7 @@ struct xe_hw_engine;
int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
+void xe_gsc_stop_prepare(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 8cf70b228ff3..d0519cd6704a 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -71,6 +71,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ /* Proxy init can take up to 500ms, so wait double that for safety */
+ return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
+ HECI1_FWSTS1_CURRENT_STATE,
+ HECI1_FWSTS1_PROXY_STATE_NORMAL,
+ USEC_PER_SEC, NULL, false);
+}
+
static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
{
struct xe_gt *gt = gsc_to_gt(gsc);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
index fdef56995cd4..765602221dbc 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -12,6 +12,7 @@ struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 10a9e3c72b36..66198cf2662c 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -857,7 +857,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_uc_stop_prepare(&gt->uc);
+ xe_uc_suspend_prepare(&gt->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 2d63a69cbfa3..f7005a3643e6 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -92,22 +92,23 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
unsigned int fw_ref;
+ int ret = 0;
xe_pm_runtime_get(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- xe_pm_runtime_put(xe);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto fw_put;
}
for_each_hw_engine(hwe, gt, id)
xe_hw_engine_print(hwe, p);
+fw_put:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
- return 0;
+ return ret;
}
static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index c5ad9a0a89c2..0c22b3a36655 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -435,9 +435,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
XE_MAX_EU_FUSE_BITS) * num_dss;
- /* user can issue separate page faults per EU and per CS */
+ /*
+ * user can issue separate page faults per EU and per CS
+ *
+ * XXX: Multiplier required as compute UMD are getting PF queue errors
+ * without it. Follow on why this multiplier is required.
+ */
+#define PF_MULTIPLIER 8
pf_queue->num_dw =
- (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
+ (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
+#undef PF_MULTIPLIER
pf_queue->gt = gt;
pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index f6d523e4c5fe..9095618648bc 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -359,7 +359,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
ext->reg = XE_REG(extlist->reg.__reg.addr);
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
- ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
ext->regname = extlist->name;
}
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index f8c128524d9f..24c578e1170e 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -79,7 +79,7 @@ xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (!range)
- return ERR_PTR(-ENOMEM);
+ return NULL;
INIT_LIST_HEAD(&range->garbage_collector_link);
xe_vm_get(gpusvm_to_vm(gpusvm));
@@ -947,3 +947,15 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
return 0;
}
#endif
+
+/**
+ * xe_svm_flush() - SVM flush
+ * @vm: The VM.
+ *
+ * Flush all SVM actions.
+ */
+void xe_svm_flush(struct xe_vm *vm)
+{
+ if (xe_vm_in_fault_mode(vm))
+ flush_work(&vm->svm.garbage_collector.work);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index e059590e5076..be306fe7aaa4 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -72,6 +72,9 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+
+void xe_svm_flush(struct xe_vm *vm);
+
#else
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
@@ -124,6 +127,11 @@ static inline
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
+
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
+
#endif
/**
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index c14bd2282044..3a8751a8b92d 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -244,7 +244,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc)
void xe_uc_stop_prepare(struct xe_uc *uc)
{
- xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_gsc_stop_prepare(&uc->gsc);
xe_guc_stop_prepare(&uc->guc);
}
@@ -278,6 +278,12 @@ again:
goto again;
}
+void xe_uc_suspend_prepare(struct xe_uc *uc)
+{
+ xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_guc_stop_prepare(&uc->guc);
+}
+
int xe_uc_suspend(struct xe_uc *uc)
{
/* GuC submission not enabled, nothing to do */
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 3813c1ede450..c23e6f5e2514 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -18,6 +18,7 @@ int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
+void xe_uc_suspend_prepare(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
void xe_uc_declare_wedged(struct xe_uc *uc);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 60303998bd61..367c84b90e9e 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3312,8 +3312,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
/* Ensure all UNMAPs visible */
- if (xe_vm_in_fault_mode(vm))
- flush_work(&vm->svm.garbage_collector.work);
+ xe_svm_flush(vm);
err = down_write_killable(&vm->lock);
if (err)
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index 17c9660da450..ab0e5a72a059 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -93,7 +93,7 @@ impl Chipset {
// For now, redirect to fmt::Debug for convenience.
impl fmt::Display for Chipset {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{:?}", self)
+ write!(f, "{self:?}")
}
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index b3b11be11650..59792e00cecf 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -307,7 +307,7 @@ void __init hv_get_partition_id(void)
local_irq_save(flags);
output = *this_cpu_ptr(hyperv_pcpu_input_arg);
- status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output);
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output);
pt_id = output->partition_id;
local_irq_restore(flags);
@@ -566,9 +566,11 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
- synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
- kfree(*synic_eventring_tail);
- *synic_eventring_tail = NULL;
+ if (hv_root_partition()) {
+ synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
+ kfree(*synic_eventring_tail);
+ *synic_eventring_tail = NULL;
+ }
return 0;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 29780f3a7478..0b450e53161e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -477,4 +477,10 @@ static inline int hv_debug_add_dev_dir(struct hv_device *dev)
#endif /* CONFIG_HYPERV_TESTING */
+/* Create and remove sysfs entry for memory mapped ring buffers for a channel */
+int hv_create_ring_sysfs(struct vmbus_channel *channel,
+ int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ struct vm_area_struct *vma));
+int hv_remove_ring_sysfs(struct vmbus_channel *channel);
+
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8d3cff42bdbb..e3d51a316316 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1802,6 +1802,26 @@ static ssize_t subchannel_id_show(struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR_RO(subchannel_id);
+static int hv_mmap_ring_buffer_wrapper(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj);
+
+ /*
+ * hv_(create|remove)_ring_sysfs implementation ensures that mmap_ring_buffer
+ * is not NULL.
+ */
+ return channel->mmap_ring_buffer(channel, vma);
+}
+
+static struct bin_attribute chan_attr_ring_buffer = {
+ .attr = {
+ .name = "ring",
+ .mode = 0600,
+ },
+ .mmap = hv_mmap_ring_buffer_wrapper,
+};
static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_out_mask.attr,
&chan_attr_in_mask.attr,
@@ -1821,6 +1841,11 @@ static struct attribute *vmbus_chan_attrs[] = {
NULL
};
+static struct bin_attribute *vmbus_chan_bin_attrs[] = {
+ &chan_attr_ring_buffer,
+ NULL
+};
+
/*
* Channel-level attribute_group callback function. Returns the permission for
* each attribute, and returns 0 if an attribute is not visible.
@@ -1841,9 +1866,34 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
+static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj,
+ const struct bin_attribute *attr, int idx)
+{
+ const struct vmbus_channel *channel =
+ container_of(kobj, struct vmbus_channel, kobj);
+
+ /* Hide ring attribute if channel's ring_sysfs_visible is set to false */
+ if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible)
+ return 0;
+
+ return attr->attr.mode;
+}
+
+static size_t vmbus_chan_bin_size(struct kobject *kobj,
+ const struct bin_attribute *bin_attr, int a)
+{
+ const struct vmbus_channel *channel =
+ container_of(kobj, struct vmbus_channel, kobj);
+
+ return channel->ringbuffer_pagecount << PAGE_SHIFT;
+}
+
static const struct attribute_group vmbus_chan_group = {
.attrs = vmbus_chan_attrs,
- .is_visible = vmbus_chan_attr_is_visible
+ .bin_attrs = vmbus_chan_bin_attrs,
+ .is_visible = vmbus_chan_attr_is_visible,
+ .is_bin_visible = vmbus_chan_bin_attr_is_visible,
+ .bin_size = vmbus_chan_bin_size,
};
static const struct kobj_type vmbus_chan_ktype = {
@@ -1851,6 +1901,63 @@ static const struct kobj_type vmbus_chan_ktype = {
.release = vmbus_chan_release,
};
+/**
+ * hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel.
+ * @channel: Pointer to vmbus_channel structure
+ * @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of
+ * channel's "ring" sysfs node, which is for the ring buffer of that channel.
+ * Function pointer is of below type:
+ * int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ * struct vm_area_struct *vma))
+ * This has a pointer to the channel and a pointer to vm_area_struct,
+ * used for mmap, as arguments.
+ *
+ * Sysfs node for ring buffer of a channel is created along with other fields, however its
+ * visibility is disabled by default. Sysfs creation needs to be controlled when the use-case
+ * is running.
+ * For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
+ * time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
+ * exposing the ring buffer by default, this function is reponsible to enable visibility of
+ * ring for userspace to use.
+ * Note: Race conditions can happen with userspace and it is not encouraged to create new
+ * use-cases for this. This was added to maintain backward compatibility, while solving
+ * one of the race conditions in uio_hv_generic while creating sysfs.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int hv_create_ring_sysfs(struct vmbus_channel *channel,
+ int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ struct vm_area_struct *vma))
+{
+ struct kobject *kobj = &channel->kobj;
+
+ channel->mmap_ring_buffer = hv_mmap_ring_buffer;
+ channel->ring_sysfs_visible = true;
+
+ return sysfs_update_group(kobj, &vmbus_chan_group);
+}
+EXPORT_SYMBOL_GPL(hv_create_ring_sysfs);
+
+/**
+ * hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel.
+ * @channel: Pointer to vmbus_channel structure
+ *
+ * Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int hv_remove_ring_sysfs(struct vmbus_channel *channel)
+{
+ struct kobject *kobj = &channel->kobj;
+ int ret;
+
+ channel->ring_sysfs_visible = false;
+ ret = sysfs_update_group(kobj, &vmbus_chan_group);
+ channel->mmap_ring_buffer = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_remove_ring_sysfs);
+
/*
* vmbus_add_channel_kobj - setup a sub-directory under device/channels
*/
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 0d4b3935e687..342d47e67586 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -1380,9 +1380,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
return 0;
rpm_disable:
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 16afb9ca19bb..876791d20ed5 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1454,7 +1454,7 @@ omap_i2c_probe(struct platform_device *pdev)
(1000 * omap->speed / 8);
}
- if (of_property_read_bool(node, "mux-states")) {
+ if (of_property_present(node, "mux-states")) {
struct mux_state *mux_state;
mux_state = devm_mux_state_get(&pdev->dev, NULL);
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index 8601b9a8b8e7..5127e58eebc7 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = {
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
IIO_CHAN_SOFT_TIMESTAMP(7)
};
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
index e8cd21fa77a6..cbac622ef821 100644
--- a/drivers/iio/accel/adxl355_core.c
+++ b/drivers/iio/accel/adxl355_core.c
@@ -231,7 +231,7 @@ struct adxl355_data {
u8 transf_buf[3];
struct {
u8 buf[14];
- s64 ts;
+ aligned_s64 ts;
} buffer;
} __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index add4053e7a02..0c04b2bb7efb 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -601,18 +601,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
if (ret)
return ret;
+ st->odr = odr;
+
/* Activity timers depend on ODR */
ret = _adxl367_set_act_time_ms(st, st->act_time_ms);
if (ret)
return ret;
- ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms);
- if (ret)
- return ret;
-
- st->odr = odr;
-
- return 0;
+ return _adxl367_set_inact_time_ms(st, st->inact_time_ms);
}
static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 48e4282964a0..bf1d3923a181 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -1226,8 +1226,11 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
if (ret)
return ret;
- if (device_property_read_bool(dev, "wakeup-source"))
- device_init_wakeup(dev, true);
+ if (device_property_read_bool(dev, "wakeup-source")) {
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
+ }
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 18559757f908..7fef2727f89e 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -45,7 +45,7 @@ struct ad7266_state {
*/
struct {
__be16 sample[2];
- s64 timestamp;
+ aligned_s64 timestamp;
} data __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index 4fcb49fdf566..aef85093eb16 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -1211,6 +1211,9 @@ static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev)
struct ad7380_state *st = iio_priv(indio_dev);
int ret;
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+ spi_unoptimize_message(&st->offload_msg);
+
if (st->seq) {
ret = regmap_update_bits(st->regmap,
AD7380_REG_ADDR_CONFIG1,
@@ -1222,10 +1225,6 @@ static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev)
st->seq = false;
}
- spi_offload_trigger_disable(st->offload, st->offload_trigger);
-
- spi_unoptimize_message(&st->offload_msg);
-
return 0;
}
@@ -1611,11 +1610,25 @@ static int ad7380_write_event_config(struct iio_dev *indio_dev,
return ret;
}
-static int ad7380_get_alert_th(struct ad7380_state *st,
+static int ad7380_get_alert_th(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
enum iio_event_direction dir,
int *val)
{
- int ret, tmp;
+ struct ad7380_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+ int ret, tmp, shift;
+
+ scan_type = iio_get_current_scan_type(indio_dev, chan);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ /*
+ * The register value is 12-bits and is compared to the most significant
+ * bits of raw value, therefore a shift is required to convert this to
+ * the same scale as the raw value.
+ */
+ shift = scan_type->realbits - 12;
switch (dir) {
case IIO_EV_DIR_RISING:
@@ -1625,7 +1638,7 @@ static int ad7380_get_alert_th(struct ad7380_state *st,
if (ret)
return ret;
- *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp);
+ *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp) << shift;
return IIO_VAL_INT;
case IIO_EV_DIR_FALLING:
ret = regmap_read(st->regmap,
@@ -1634,7 +1647,7 @@ static int ad7380_get_alert_th(struct ad7380_state *st,
if (ret)
return ret;
- *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp);
+ *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp) << shift;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -1648,7 +1661,6 @@ static int ad7380_read_event_value(struct iio_dev *indio_dev,
enum iio_event_info info,
int *val, int *val2)
{
- struct ad7380_state *st = iio_priv(indio_dev);
int ret;
switch (info) {
@@ -1656,7 +1668,7 @@ static int ad7380_read_event_value(struct iio_dev *indio_dev,
if (!iio_device_claim_direct(indio_dev))
return -EBUSY;
- ret = ad7380_get_alert_th(st, dir, val);
+ ret = ad7380_get_alert_th(indio_dev, chan, dir, val);
iio_device_release_direct(indio_dev);
return ret;
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 1a314fddd7eb..703556eb7257 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -1236,9 +1236,11 @@ static int ad7616_sw_mode_setup(struct iio_dev *indio_dev)
st->write_scale = ad7616_write_scale_sw;
st->write_os = &ad7616_write_os_sw;
- ret = st->bops->sw_mode_config(indio_dev);
- if (ret)
- return ret;
+ if (st->bops->sw_mode_config) {
+ ret = st->bops->sw_mode_config(indio_dev);
+ if (ret)
+ return ret;
+ }
/* Activate Burst mode and SEQEN MODE */
return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER,
@@ -1268,6 +1270,9 @@ static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev)
st->write_scale = ad7606_write_scale_sw;
st->write_os = &ad7606_write_os_sw;
+ if (!st->bops->sw_mode_config)
+ return 0;
+
return st->bops->sw_mode_config(indio_dev);
}
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index 885bf0b68e77..179115e90988 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -131,7 +131,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr)
{
.tx_buf = &st->d16[0],
.len = 2,
- .cs_change = 0,
+ .cs_change = 1,
}, {
.rx_buf = &st->d16[1],
.len = 2,
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 5a863005aca6..5e0be36af0c5 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -168,7 +168,7 @@ struct ad7768_state {
union {
struct {
__be32 chan;
- s64 timestamp;
+ aligned_s64 timestamp;
} scan;
__be32 d32;
u8 d8[2];
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index a1e48a756a7b..359e26e3f5bc 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -466,7 +466,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct {
__le16 values[DLN2_ADC_MAX_CHANNELS];
- int64_t timestamp_space;
+ aligned_s64 timestamp_space;
} data;
struct dln2_adc_get_all_vals dev_data;
struct dln2_adc *dln2 = iio_priv(indio_dev);
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index 7fb8b2499a1d..b64a8a407168 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -543,7 +543,9 @@ static int iadc_probe(struct platform_device *pdev)
else
return ret;
} else {
- device_init_wakeup(iadc->dev, 1);
+ ret = devm_device_init_wakeup(iadc->dev);
+ if (ret)
+ return dev_err_probe(iadc->dev, ret, "Failed to init wakeup\n");
}
ret = iadc_update_offset(iadc);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 9a099df79518..5e28bd28b81a 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -520,15 +520,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (info->reset)
rockchip_saradc_reset_controller(info->reset);
- /*
- * Use a default value for the converter clock.
- * This may become user-configurable in the future.
- */
- ret = clk_set_rate(info->clk, info->data->clk_rate);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
- "failed to set adc clk rate\n");
-
ret = regulator_enable(info->vref);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
@@ -555,6 +546,14 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (IS_ERR(info->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
"failed to get adc clock\n");
+ /*
+ * Use a default value for the converter clock.
+ * This may become user-configurable in the future.
+ */
+ ret = clk_set_rate(info->clk, info->data->clk_rate);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to set adc clk rate\n");
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index d0bd94912e0a..e05ce1f12065 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -5,7 +5,6 @@
* Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
*/
-#include <linux/unaligned.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/errno.h>
@@ -19,6 +18,8 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/serdev.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
#define PMS7003_DRIVER_NAME "pms7003"
@@ -76,7 +77,7 @@ struct pms7003_state {
/* Used to construct scan to push to the IIO buffer */
struct {
u16 data[3]; /* PM1, PM2P5, PM10 */
- s64 ts;
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index 6f4f2ba2c09d..a7888146188d 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
int ret;
struct {
s32 data[4]; /* PM1, PM2P5, PM4, PM10 */
- s64 ts;
+ aligned_s64 ts;
} scan;
mutex_lock(&state->lock);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index ad1882f608c0..2055a03cbeb1 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -66,6 +66,10 @@ static struct {
{HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0},
{HID_USAGE_SENSOR_HINGE, 0, 0, 17453293},
{HID_USAGE_SENSOR_HINGE, HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
+
+ {HID_USAGE_SENSOR_HUMAN_PRESENCE, 0, 1, 0},
+ {HID_USAGE_SENSOR_HUMAN_PROXIMITY, 0, 1, 0},
+ {HID_USAGE_SENSOR_HUMAN_ATTENTION, 0, 1, 0},
};
static void simple_div(int dividend, int divisor, int *whole,
diff --git a/drivers/iio/imu/adis16550.c b/drivers/iio/imu/adis16550.c
index b14ea8937c7f..28f0dbd0226c 100644
--- a/drivers/iio/imu/adis16550.c
+++ b/drivers/iio/imu/adis16550.c
@@ -836,7 +836,7 @@ static irqreturn_t adis16550_trigger_handler(int irq, void *p)
u16 dummy;
bool valid;
struct iio_poll_func *pf = p;
- __be32 data[ADIS16550_MAX_SCAN_DATA];
+ __be32 data[ADIS16550_MAX_SCAN_DATA] __aligned(8);
struct iio_dev *indio_dev = pf->indio_dev;
struct adis16550 *st = iio_priv(indio_dev);
struct adis *adis = iio_device_get_drvdata(indio_dev);
diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c
index a86be5af5ccb..2e4469f30d53 100644
--- a/drivers/iio/imu/bmi270/bmi270_core.c
+++ b/drivers/iio/imu/bmi270/bmi270_core.c
@@ -918,8 +918,7 @@ static int bmi270_configure_imu(struct bmi270_data *data)
FIELD_PREP(BMI270_ACC_CONF_ODR_MSK,
BMI270_ACC_CONF_ODR_100HZ) |
FIELD_PREP(BMI270_ACC_CONF_BWP_MSK,
- BMI270_ACC_CONF_BWP_NORMAL_MODE) |
- BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
+ BMI270_ACC_CONF_BWP_NORMAL_MODE));
if (ret)
return dev_err_probe(dev, ret, "Failed to configure accelerometer");
@@ -927,8 +926,7 @@ static int bmi270_configure_imu(struct bmi270_data *data)
FIELD_PREP(BMI270_GYR_CONF_ODR_MSK,
BMI270_GYR_CONF_ODR_200HZ) |
FIELD_PREP(BMI270_GYR_CONF_BWP_MSK,
- BMI270_GYR_CONF_BWP_NORMAL_MODE) |
- BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
+ BMI270_GYR_CONF_BWP_NORMAL_MODE));
if (ret)
return dev_err_probe(dev, ret, "Failed to configure gyroscope");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 3d3b27f28c9d..273196e647a2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -50,7 +50,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
u16 fifo_count;
u32 fifo_period;
s64 timestamp;
- u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(8);
size_t i, nb;
mutex_lock(&st->lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 0a7cd8c1aa33..8a9d2593576a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -392,6 +392,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
return 0;
+ if (!pattern_len)
+ pattern_len = ST_LSM6DSX_SAMPLE_SIZE;
+
fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) *
ST_LSM6DSX_CHAN_SIZE;
fifo_len = (fifo_len / pattern_len) * pattern_len;
@@ -623,6 +626,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
if (!fifo_len)
return 0;
+ if (!pattern_len)
+ pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
+
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw,
ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 4fdcc2acc94e..96c6106b95ee 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2719,8 +2719,11 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
}
if (device_property_read_bool(dev, "wakeup-source") ||
- (pdata && pdata->wakeup_source))
- device_init_wakeup(dev, true);
+ (pdata && pdata->wakeup_source)) {
+ err = devm_device_init_wakeup(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 76b76d12b388..4c65b32d34ce 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -34,9 +34,9 @@ struct prox_state {
struct iio_chan_spec channels[MAX_CHANNELS];
u32 channel2usage[MAX_CHANNELS];
u32 human_presence[MAX_CHANNELS];
- int scale_pre_decml;
- int scale_post_decml;
- int scale_precision;
+ int scale_pre_decml[MAX_CHANNELS];
+ int scale_post_decml[MAX_CHANNELS];
+ int scale_precision[MAX_CHANNELS];
unsigned long scan_mask[2]; /* One entry plus one terminator. */
int num_channels;
};
@@ -116,13 +116,15 @@ static int prox_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- *val = prox_state->scale_pre_decml;
- *val2 = prox_state->scale_post_decml;
- ret_type = prox_state->scale_precision;
+ if (chan->scan_index >= prox_state->num_channels)
+ return -EINVAL;
+
+ *val = prox_state->scale_pre_decml[chan->scan_index];
+ *val2 = prox_state->scale_post_decml[chan->scan_index];
+ ret_type = prox_state->scale_precision[chan->scan_index];
break;
case IIO_CHAN_INFO_OFFSET:
- *val = hid_sensor_convert_exponent(
- prox_state->prox_attr[chan->scan_index].unit_expo);
+ *val = 0;
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
@@ -249,6 +251,10 @@ static int prox_parse_report(struct platform_device *pdev,
st->prox_attr[index].size);
dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr[index].index,
st->prox_attr[index].report_id);
+ st->scale_precision[index] =
+ hid_sensor_format_scale(usage_id, &st->prox_attr[index],
+ &st->scale_pre_decml[index],
+ &st->scale_post_decml[index]);
index++;
}
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 65b295877b41..393a3d2fbe1d 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -788,8 +788,9 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
int ret;
bool wake_result_ready_queue = false;
enum iio_chan_type chan_type = opt->chip_info->chan_type;
+ bool ok_to_ignore_lock = opt->ok_to_ignore_lock;
- if (!opt->ok_to_ignore_lock)
+ if (!ok_to_ignore_lock)
mutex_lock(&opt->lock);
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
@@ -826,7 +827,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
}
out:
- if (!opt->ok_to_ignore_lock)
+ if (!ok_to_ignore_lock)
mutex_unlock(&opt->lock);
if (wake_result_ready_queue)
diff --git a/drivers/iio/pressure/mprls0025pa.h b/drivers/iio/pressure/mprls0025pa.h
index 9d5c30afa9d6..d62a018eaff3 100644
--- a/drivers/iio/pressure/mprls0025pa.h
+++ b/drivers/iio/pressure/mprls0025pa.h
@@ -34,16 +34,6 @@ struct iio_dev;
struct mpr_data;
struct mpr_ops;
-/**
- * struct mpr_chan
- * @pres: pressure value
- * @ts: timestamp
- */
-struct mpr_chan {
- s32 pres;
- s64 ts;
-};
-
enum mpr_func_id {
MPR_FUNCTION_A,
MPR_FUNCTION_B,
@@ -69,6 +59,8 @@ enum mpr_func_id {
* reading in a loop until data is ready
* @completion: handshake from irq to read
* @chan: channel values for buffered mode
+ * @chan.pres: pressure value
+ * @chan.ts: timestamp
* @buffer: raw conversion data
*/
struct mpr_data {
@@ -87,7 +79,10 @@ struct mpr_data {
struct gpio_desc *gpiod_reset;
int irq;
struct completion completion;
- struct mpr_chan chan;
+ struct {
+ s32 pres;
+ aligned_s64 ts;
+ } chan;
u8 buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index c28a7a6dea5f..555a61e2f3fd 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = {
struct maxim_thermocouple_data {
struct spi_device *spi;
const struct maxim_thermocouple_chip *chip;
+ char tc_type;
u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
- char tc_type;
};
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index d73389af4dd5..7622638e5bb8 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -48,7 +48,7 @@ struct magellan {
static int magellan_crunch_nibbles(unsigned char *data, int count)
{
- static unsigned char nibbles[16] __nonstring = "0AB3D56GH9:K<MN?";
+ static const unsigned char nibbles[16] __nonstring = "0AB3D56GH9:K<MN?";
do {
if (data[count] == nibbles[data[count] & 0xf])
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index c33e6f33265b..57a5ff3d1992 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -77,12 +77,13 @@
* xbox d-pads should map to buttons, as is required for DDR pads
* but we map them to axes when possible to simplify things
*/
-#define MAP_DPAD_TO_BUTTONS (1 << 0)
-#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
-#define MAP_STICKS_TO_NULL (1 << 2)
-#define MAP_SELECT_BUTTON (1 << 3)
-#define MAP_PADDLES (1 << 4)
-#define MAP_PROFILE_BUTTON (1 << 5)
+#define MAP_DPAD_TO_BUTTONS BIT(0)
+#define MAP_TRIGGERS_TO_BUTTONS BIT(1)
+#define MAP_STICKS_TO_NULL BIT(2)
+#define MAP_SHARE_BUTTON BIT(3)
+#define MAP_PADDLES BIT(4)
+#define MAP_PROFILE_BUTTON BIT(5)
+#define MAP_SHARE_OFFSET BIT(6)
#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
@@ -135,14 +136,14 @@ static const struct xpad_device {
{ 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
{ 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
- { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */
+ { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, /* v2 */
{ 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
- { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
+ { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
@@ -159,7 +160,7 @@ static const struct xpad_device {
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
{ 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE },
- { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+ { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
@@ -205,13 +206,13 @@ static const struct xpad_device {
{ 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
+ { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
- { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
+ { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
@@ -240,7 +241,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
- { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
+ { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
@@ -281,6 +282,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
{ 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x01b2, "HORI Taiko No Tatsujin Drum Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
@@ -353,6 +355,8 @@ static const struct xpad_device {
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x400b, "PowerA FUSION Pro 4 Wired Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ { 0x20d6, 0x890b, "PowerA MOGA XP-Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x2345, 0xe00b, "Machenike G5 Pro Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
@@ -384,13 +388,16 @@ static const struct xpad_device {
{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
{ 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+ { 0x2dc8, 0x200f, "8BitDo Ultimate 3-mode Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0423, "Hyperkin DuchesS Xbox One pad", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
- { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+ { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
@@ -714,8 +721,10 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
+ XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_led_on),
XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_auth),
XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
@@ -1027,7 +1036,7 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
* The report format was gleaned from
* https://github.com/kylelemons/xbox/blob/master/xbox.go
*/
-static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
+static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len)
{
struct input_dev *dev = xpad->dev;
bool do_sync = false;
@@ -1068,8 +1077,12 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & BIT(2));
input_report_key(dev, BTN_SELECT, data[4] & BIT(3));
- if (xpad->mapping & MAP_SELECT_BUTTON)
- input_report_key(dev, KEY_RECORD, data[22] & BIT(0));
+ if (xpad->mapping & MAP_SHARE_BUTTON) {
+ if (xpad->mapping & MAP_SHARE_OFFSET)
+ input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0));
+ else
+ input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0));
+ }
/* buttons A,B,X,Y */
input_report_key(dev, BTN_A, data[4] & BIT(4));
@@ -1217,7 +1230,7 @@ static void xpad_irq_in(struct urb *urb)
xpad360w_process_packet(xpad, 0, xpad->idata);
break;
case XTYPE_XBOXONE:
- xpadone_process_packet(xpad, 0, xpad->idata);
+ xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length);
break;
default:
xpad_process_packet(xpad, 0, xpad->idata);
@@ -1944,7 +1957,7 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad->xtype == XTYPE_XBOXONE) {
for (i = 0; xpad360_btn[i] >= 0; i++)
input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
- if (xpad->mapping & MAP_SELECT_BUTTON)
+ if (xpad->mapping & MAP_SHARE_BUTTON)
input_set_capability(input_dev, EV_KEY, KEY_RECORD);
} else {
for (i = 0; xpad_btn[i] >= 0; i++)
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 5ad6be914160..061d48350df6 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -147,8 +147,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
u32 value, mask;
int error;
- kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
- kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
+ kregs_home = &regs->keys_regs[MTK_PMIC_HOMEKEY_INDEX];
+ kregs_pwr = &regs->keys_regs[MTK_PMIC_PWRKEY_INDEX];
error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
&long_press_debounce);
diff --git a/drivers/input/misc/hisi_powerkey.c b/drivers/input/misc/hisi_powerkey.c
index d3c293a95d32..d315017324d9 100644
--- a/drivers/input/misc/hisi_powerkey.c
+++ b/drivers/input/misc/hisi_powerkey.c
@@ -30,7 +30,7 @@ static irqreturn_t hi65xx_power_press_isr(int irq, void *q)
{
struct input_dev *input = q;
- pm_wakeup_event(input->dev.parent, MAX_HELD_TIME);
+ pm_wakeup_dev_event(input->dev.parent, MAX_HELD_TIME, true);
input_report_key(input, KEY_POWER, 1);
input_sync(input);
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 8d7303fc13bc..1cfadd73829f 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -74,9 +74,14 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int
return -1;
switch (code) {
- case SND_BELL: if (value) value = 1000;
- case SND_TONE: break;
- default: return -1;
+ case SND_BELL:
+ if (value)
+ value = 1000;
+ break;
+ case SND_TONE:
+ break;
+ default:
+ return -1;
}
if (value > 20 && value < 32767)
@@ -109,9 +114,14 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned
return -1;
switch (code) {
- case SND_BELL: if (value) value = 1000;
- case SND_TONE: break;
- default: return -1;
+ case SND_BELL:
+ if (value)
+ value = 1000;
+ break;
+ case SND_TONE:
+ break;
+ default:
+ return -1;
}
if (value > 20 && value < 32767)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 309c360aab55..c5c88a75a019 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -164,6 +164,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS
static const char * const smbus_pnp_ids[] = {
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
+ "DLL060d", /* Dell Precision M3800 */
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
"LEN0049", /* Yoga 11e */
@@ -190,11 +191,15 @@ static const char * const smbus_pnp_ids[] = {
"LEN2054", /* E480 */
"LEN2055", /* E580 */
"LEN2068", /* T14 Gen 1 */
+ "SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */
+ "SYN3003", /* HP EliteBook 850 G1 */
"SYN3015", /* HP EliteBook 840 G2 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
+ "TOS01f6", /* Dynabook Portege X30L-G */
+ "TOS0213", /* Dynabook Portege X30-D */
NULL
};
#endif
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index eafe5a9b8964..071b7c9bf566 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -580,7 +580,7 @@ static int cyttsp5_power_control(struct cyttsp5 *ts, bool on)
int rc;
SET_CMD_REPORT_TYPE(cmd[0], 0);
- SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP);
+ SET_CMD_REPORT_ID(cmd[0], state);
SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER);
rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd));
@@ -870,13 +870,16 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
ts->input->phys = ts->phys;
input_set_drvdata(ts->input, ts);
- /* Reset the gpio to be in a reset state */
+ /* Assert gpio to be in a reset state */
ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ts->reset_gpio)) {
error = PTR_ERR(ts->reset_gpio);
dev_err(dev, "Failed to request reset gpio, error %d\n", error);
return error;
}
+
+ fsleep(10); /* Ensure long-enough reset pulse (minimum 10us). */
+
gpiod_set_value_cansleep(ts->reset_gpio, 0);
/* Need a delay to have device up */
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index a94a1997f96b..af0fb38bcfdc 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -366,12 +366,7 @@ static struct platform_driver stmpe_ts_driver = {
};
module_platform_driver(stmpe_ts_driver);
-static const struct of_device_id stmpe_ts_ids[] = {
- { .compatible = "st,stmpe-ts", },
- { },
-};
-MODULE_DEVICE_TABLE(of, stmpe_ts_ids);
-
+MODULE_ALIAS("platform:stmpe-ts");
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("STMPEXXX touchscreen driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index dd9e26b7b718..14aa0d77df26 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -3664,6 +3664,14 @@ found:
while (*uid == '0' && *(uid + 1))
uid++;
+ if (strlen(hid) >= ACPIHID_HID_LEN) {
+ pr_err("Invalid command line: hid is too long\n");
+ return 1;
+ } else if (strlen(uid) >= ACPIHID_UID_LEN) {
+ pr_err("Invalid command line: uid is too long\n");
+ return 1;
+ }
+
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 9ba596430e7c..980cc6b33c43 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -411,6 +411,12 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
return ERR_CAST(smmu_domain);
smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
+
+ /*
+ * Choose page_size as the leaf page size for invalidation when
+ * ARM_SMMU_FEAT_RANGE_INV is present
+ */
+ smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
smmu_domain->smmu = smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index b4c21aaed126..48d910399a1b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3388,6 +3388,7 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
mutex_lock(&smmu->streams_mutex);
for (i = 0; i < fwspec->num_ids; i++) {
struct arm_smmu_stream *new_stream = &master->streams[i];
+ struct rb_node *existing;
u32 sid = fwspec->ids[i];
new_stream->id = sid;
@@ -3398,11 +3399,21 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
break;
/* Insert into SID tree */
- if (rb_find_add(&new_stream->node, &smmu->streams,
- arm_smmu_streams_cmp_node)) {
- dev_warn(master->dev, "stream %u already in tree\n",
- sid);
- ret = -EINVAL;
+ existing = rb_find_add(&new_stream->node, &smmu->streams,
+ arm_smmu_streams_cmp_node);
+ if (existing) {
+ struct arm_smmu_master *existing_master =
+ rb_entry(existing, struct arm_smmu_stream, node)
+ ->master;
+
+ /* Bridged PCI devices may end up with duplicated IDs */
+ if (existing_master == master)
+ continue;
+
+ dev_warn(master->dev,
+ "Aliasing StreamID 0x%x (from %s) unsupported, expect DMA to be broken\n",
+ sid, dev_name(existing_master->dev));
+ ret = -ENODEV;
break;
}
}
@@ -4429,6 +4440,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
if (FIELD_GET(IDR3_RIL, reg))
smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
+ if (FIELD_GET(IDR3_FWB, reg))
+ smmu->features |= ARM_SMMU_FEAT_S2FWB;
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index b29da2d96d0b..cb0b993bebb4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3785,6 +3785,22 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
intel_iommu_debugfs_create_dev(info);
+ return &iommu->iommu;
+free_table:
+ intel_pasid_free_table(dev);
+clear_rbtree:
+ device_rbtree_remove(info);
+free:
+ kfree(info);
+
+ return ERR_PTR(ret);
+}
+
+static void intel_iommu_probe_finalize(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
/*
* The PCIe spec, in its wisdom, declares that the behaviour of the
* device is undefined if you enable PASID support after ATS support.
@@ -3792,22 +3808,12 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
* we can't yet know if we're ever going to use it.
*/
if (info->pasid_supported &&
- !pci_enable_pasid(pdev, info->pasid_supported & ~1))
+ !pci_enable_pasid(to_pci_dev(dev), info->pasid_supported & ~1))
info->pasid_enabled = 1;
- if (sm_supported(iommu))
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev))
iommu_enable_pci_ats(info);
iommu_enable_pci_pri(info);
-
- return &iommu->iommu;
-free_table:
- intel_pasid_free_table(dev);
-clear_rbtree:
- device_rbtree_remove(info);
-free:
- kfree(info);
-
- return ERR_PTR(ret);
}
static void intel_iommu_release_device(struct device *dev)
@@ -4391,6 +4397,7 @@ const struct iommu_ops intel_iommu_ops = {
.domain_alloc_sva = intel_svm_domain_alloc,
.domain_alloc_nested = intel_iommu_domain_alloc_nested,
.probe_device = intel_iommu_probe_device,
+ .probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.device_group = intel_iommu_device_group,
@@ -4432,6 +4439,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
+/* QM57/QS57 integrated gfx malfunctions with dmar */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
+
/* Broadwell igfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
@@ -4509,7 +4519,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index 7942d8eb3d00..f772deb9cba5 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -227,6 +227,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
+ if (pin == GPIO_NO_WAKE_IRQ)
+ return irq_domain_disconnect_hierarchy(domain, virq);
+
ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
&qcom_mpm_chip, priv);
if (ret)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9c8ed65cd87e..f0b5a6931161 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -68,6 +68,8 @@
#define LIST_DIRTY 1
#define LIST_SIZE 2
+#define SCAN_RESCHED_CYCLE 16
+
/*--------------------------------------------------------------*/
/*
@@ -2424,7 +2426,12 @@ static void __scan(struct dm_bufio_client *c)
atomic_long_dec(&c->need_shrink);
freed++;
- cond_resched();
+
+ if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
+ dm_bufio_unlock(c);
+ cond_resched();
+ dm_bufio_lock(c);
+ }
}
}
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 2a283feb3319..cc3d3897ef42 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -5164,7 +5164,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
BUG_ON(!list_empty(&ic->wait_list));
- if (ic->mode == 'B')
+ if (ic->mode == 'B' && ic->bitmap_flush_work.work.func)
cancel_delayed_work_sync(&ic->bitmap_flush_work);
if (ic->metadata_wq)
destroy_workqueue(ic->metadata_wq);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 35100a435c88..6b23e777e10e 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -523,8 +523,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv)
gfp = GFP_NOIO;
}
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
- if (argv && old_argv) {
- memcpy(argv, old_argv, *size * sizeof(*argv));
+ if (argv) {
+ if (old_argv)
+ memcpy(argv, old_argv, *size * sizeof(*argv));
*size = new_size;
}
@@ -1049,7 +1050,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
unsigned int min_pool_size = 0, pool_size;
struct dm_md_mempools *pools;
unsigned int bioset_flags = 0;
- bool mempool_needs_integrity = t->integrity_supported;
if (unlikely(type == DM_TYPE_NONE)) {
DMERR("no table type is set, can't allocate mempools");
@@ -1074,8 +1074,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
min_pool_size = max(min_pool_size, ti->num_flush_bios);
-
- mempool_needs_integrity |= ti->mempool_needs_integrity;
}
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size,
@@ -1175,7 +1173,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
t = dm_get_live_table(md, &srcu_idx);
if (!t)
- return 0;
+ goto put_live_table;
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
@@ -1186,6 +1184,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
(void *)key);
}
+put_live_table:
dm_put_live_table(md, srcu_idx);
return 0;
}
diff --git a/drivers/media/cec/i2c/Kconfig b/drivers/media/cec/i2c/Kconfig
index b9d21643eef1..c31abc26f602 100644
--- a/drivers/media/cec/i2c/Kconfig
+++ b/drivers/media/cec/i2c/Kconfig
@@ -16,6 +16,7 @@ config CEC_CH7322
config CEC_NXP_TDA9950
tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC"
+ depends on I2C
select CEC_NOTIFIER
select CEC_CORE
default DRM_I2C_NXP_TDA998X
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index e576b213084d..e45ba127069f 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -1149,8 +1149,11 @@ config VIDEO_ISL7998X
config VIDEO_LT6911UXE
tristate "Lontium LT6911UXE decoder"
- depends on ACPI && VIDEO_DEV
+ depends on ACPI && VIDEO_DEV && I2C
select V4L2_FWNODE
+ select V4L2_CCI_I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor-level driver for the Lontium
LT6911UXE HDMI to MIPI CSI-2 bridge.
diff --git a/drivers/media/platform/synopsys/hdmirx/Kconfig b/drivers/media/platform/synopsys/hdmirx/Kconfig
index 27e6706f84a3..4321f985f632 100644
--- a/drivers/media/platform/synopsys/hdmirx/Kconfig
+++ b/drivers/media/platform/synopsys/hdmirx/Kconfig
@@ -2,6 +2,7 @@
config VIDEO_SYNOPSYS_HDMIRX
tristate "Synopsys DesignWare HDMI Receiver driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/test-drivers/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig
index e95edc0f22bf..cc470070a7a5 100644
--- a/drivers/media/test-drivers/vivid/Kconfig
+++ b/drivers/media/test-drivers/vivid/Kconfig
@@ -32,7 +32,8 @@ config VIDEO_VIVID_CEC
config VIDEO_VIVID_OSD
bool "Enable Framebuffer for testing Output Overlay"
- depends on VIDEO_VIVID && FB
+ depends on VIDEO_VIVID && FB_CORE
+ depends on VIDEO_VIVID=m || FB_CORE=y
default y
select FB_IOMEM_HELPERS
help
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 6824131b69b1..264e11fa58ea 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -691,8 +691,8 @@ config MMC_TMIO_CORE
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ depends on (RESET_CONTROLLER && REGULATOR) || !OF
select MMC_TMIO_CORE
- select RESET_CONTROLLER if ARCH_RENESAS
help
This provides support for the SDHI SD/SDIO controller found in
Renesas SuperH, ARM and ARM64 based SoCs
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fa6526be3638..8c83e203c516 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -1179,7 +1179,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(rdev)) {
dev_err(dev, "regulator register failed err=%ld", PTR_ERR(rdev));
ret = PTR_ERR(rdev);
- goto efree;
+ goto edisclk;
}
priv->rdev = rdev;
}
@@ -1243,26 +1243,26 @@ int renesas_sdhi_probe(struct platform_device *pdev,
num_irqs = platform_irq_count(pdev);
if (num_irqs < 0) {
ret = num_irqs;
- goto eirq;
+ goto edisclk;
}
/* There must be at least one IRQ source */
if (!num_irqs) {
ret = -ENXIO;
- goto eirq;
+ goto edisclk;
}
for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
- goto eirq;
+ goto edisclk;
}
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq;
+ goto edisclk;
}
ret = tmio_mmc_host_probe(host);
@@ -1274,8 +1274,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
return ret;
-eirq:
- tmio_mmc_host_remove(host);
edisclk:
renesas_sdhi_clk_disable(host);
efree:
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 884a6352c42b..c2c116ce1087 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -2379,6 +2379,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
SET_NETDEV_DEV(net_dev, dev);
m_can_of_parse_mram(class_dev, mram_config_vals);
+ spin_lock_init(&class_dev->tx_handling_spinlock);
out:
return class_dev;
}
@@ -2462,9 +2463,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
+ unregister_candev(cdev->net);
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
- unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index 7107a37da36c..c3fb3176ce42 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -937,8 +937,8 @@ static void rkcanfd_remove(struct platform_device *pdev)
struct rkcanfd_priv *priv = platform_get_drvdata(pdev);
struct net_device *ndev = priv->ndev;
- can_rx_offload_del(&priv->offload);
rkcanfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
free_candev(ndev);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 3bc56517fe7a..c30b04f8fc0d 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
.brp_inc = 1,
};
+/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
+ * [-64,63] for TDCO, indicating a relative TDCO.
+ *
+ * Manual tests have shown, that using a relative TDCO configuration
+ * results in bus off, while an absolute configuration works.
+ *
+ * For TDCO use the max value (63) from the data sheet, but 0 as the
+ * minimum.
+ */
+static const struct can_tdc_const mcp251xfd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 63,
+ .tdco_min = 0,
+ .tdco_max = 63,
+ .tdcf_min = 0,
+ .tdcf_max = 0,
+};
+
static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
{
switch (model) {
@@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
{
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
- u32 val = 0;
- s8 tdco;
+ u32 tdcmod, val = 0;
int err;
/* CAN Control Register
@@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
return err;
/* Transmitter Delay Compensation */
- tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
- -64, 63);
- val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
- MCP251XFD_REG_TDC_TDCMOD_AUTO) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
+ else
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
+
+ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco);
return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
}
@@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.tdc_const = &mcp251xfd_tdc_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
@@ -2174,8 +2198,8 @@ static void mcp251xfd_remove(struct spi_device *spi)
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
- can_rx_offload_del(&priv->offload);
mcp251xfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e5ba71897906..9eb39cfa5fb2 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -373,15 +373,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
+ vc1 &= ~VC1_RX_MCST_FWD_EN;
+
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc1 |= VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
if (enable_filtering) {
vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
vc5 |= VC5_DROP_VTABLE_MISS;
} else {
- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
vc5 &= ~VC5_DROP_VTABLE_MISS;
}
@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
} else {
vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc1 &= ~VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
vc5 &= ~VC5_DROP_VTABLE_MISS;
@@ -576,6 +578,18 @@ static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
}
+int b53_setup_port(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_setup_port);
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -588,10 +602,6 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
-
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
if (ret)
@@ -722,10 +732,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port);
-
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -761,6 +767,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
}
+static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (!dev->vlan_filtering)
+ return true;
+
+ dp = dsa_to_port(ds, port);
+
+ if (dsa_port_is_cpu(dp))
+ return true;
+
+ return dp->bridge == NULL;
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -779,7 +801,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
+ b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
/* Create an untagged VLAN entry for the default PVID in case
* CONFIG_VLAN_8021Q is disabled and there are no calls to
@@ -787,26 +809,39 @@ int b53_configure_vlan(struct dsa_switch *ds)
* entry. Do this only when the tagging protocol is not
* DSA_TAG_PROTO_NONE
*/
+ v = &dev->vlans[def_vid];
b53_for_each_port(dev, i) {
- v = &dev->vlans[def_vid];
- v->members |= BIT(i);
+ if (!b53_vlan_port_may_join_untagged(ds, i))
+ continue;
+
+ vl.members |= BIT(i);
if (!b53_vlan_port_needs_forced_tagged(ds, i))
- v->untag = v->members;
- b53_write16(dev, B53_VLAN_PAGE,
- B53_VLAN_PORT_DEF_TAG(i), def_vid);
+ vl.untag = vl.members;
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
+ def_vid);
}
+ b53_set_vlan_entry(dev, def_vid, &vl);
- /* Upon initial call we have not set-up any VLANs, but upon
- * system resume, we need to restore all VLAN entries.
- */
- for (vid = def_vid; vid < dev->num_vlans; vid++) {
- v = &dev->vlans[vid];
+ if (dev->vlan_filtering) {
+ /* Upon initial call we have not set-up any VLANs, but upon
+ * system resume, we need to restore all VLAN entries.
+ */
+ for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
+ v = &dev->vlans[vid];
- if (!v->members)
- continue;
+ if (!v->members)
+ continue;
+
+ b53_set_vlan_entry(dev, vid, v);
+ b53_fast_age_vlan(dev, vid);
+ }
- b53_set_vlan_entry(dev, vid, v);
- b53_fast_age_vlan(dev, vid);
+ b53_for_each_port(dev, i) {
+ if (!dsa_is_cpu_port(ds, i))
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i),
+ dev->ports[i].pvid);
+ }
}
return 0;
@@ -1125,7 +1160,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources);
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
unsigned int port;
+ u16 pvid;
int ret;
/* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
@@ -1133,12 +1170,26 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+ /* The switch does not tell us the original VLAN for untagged
+ * packets, so keep the CPU port always tagged.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
return ret;
}
+ /* setup default vlan for filtering mode */
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+ b53_for_each_port(dev, port) {
+ vl->members |= BIT(port);
+ if (!b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag |= BIT(port);
+ }
+
b53_reset_mib(dev);
ret = b53_apply_config(dev);
@@ -1492,7 +1543,10 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
{
struct b53_device *dev = ds->priv;
- b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
+ if (dev->vlan_filtering != vlan_filtering) {
+ dev->vlan_filtering = vlan_filtering;
+ b53_apply_config(dev);
+ }
return 0;
}
@@ -1517,7 +1571,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid >= dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, port, true, ds->vlan_filtering);
+ b53_enable_vlan(dev, port, true, dev->vlan_filtering);
return 0;
}
@@ -1530,18 +1584,29 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
+ u16 old_pvid, new_pvid;
int err;
err = b53_vlan_prepare(ds, port, vlan);
if (err)
return err;
- vl = &dev->vlans[vlan->vid];
+ if (vlan->vid == 0)
+ return 0;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ old_pvid = dev->ports[port].pvid;
+ if (pvid)
+ new_pvid = vlan->vid;
+ else if (!pvid && vlan->vid == old_pvid)
+ new_pvid = b53_default_pvid(dev);
+ else
+ new_pvid = old_pvid;
+ dev->ports[port].pvid = new_pvid;
+
+ vl = &dev->vlans[vlan->vid];
- if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
- untagged = true;
+ if (dsa_is_cpu_port(ds, port))
+ untagged = false;
vl->members |= BIT(port);
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
@@ -1549,13 +1614,16 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
else
vl->untag &= ~BIT(port);
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
- if (pvid && !dsa_is_cpu_port(ds, port)) {
+ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- vlan->vid);
- b53_fast_age_vlan(dev, vlan->vid);
+ new_pvid);
+ b53_fast_age_vlan(dev, old_pvid);
}
return 0;
@@ -1570,20 +1638,25 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
struct b53_vlan *vl;
u16 pvid;
- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+ if (vlan->vid == 0)
+ return 0;
- vl = &dev->vlans[vlan->vid];
+ pvid = dev->ports[port].pvid;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ vl = &dev->vlans[vlan->vid];
vl->members &= ~BIT(port);
if (pvid == vlan->vid)
pvid = b53_default_pvid(dev);
+ dev->ports[port].pvid = pvid;
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag &= ~(BIT(port));
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
@@ -1916,8 +1989,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- u16 pvlan, reg;
+ u16 pvlan, reg, pvid;
unsigned int i;
/* On 7278, port 7 which connects to the ASP should only receive
@@ -1926,15 +2000,29 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
return -EINVAL;
- /* Make this port leave the all VLANs join since we will have proper
- * VLAN entries from now on
- */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg &= ~BIT(port);
- if ((reg & BIT(cpu_port)) == BIT(cpu_port))
- reg &= ~BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port leave the all VLANs join since we will have
+ * proper VLAN entries from now on
+ */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ &reg);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ reg);
+ }
+
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members &= ~BIT(port);
+ if (vl->members == BIT(cpu_port))
+ vl->members &= ~BIT(cpu_port);
+ vl->untag = vl->members;
+ b53_set_vlan_entry(dev, pvid, vl);
}
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
@@ -1967,7 +2055,7 @@ EXPORT_SYMBOL(b53_br_join);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
- struct b53_vlan *vl = &dev->vlans[0];
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1993,15 +2081,18 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
dev->ports[port].vlan_ctl_mask = pvlan;
pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
- /* Make this port join all VLANs without VLAN entries */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg |= BIT(port);
- if (!(reg & BIT(cpu_port)))
- reg |= BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
- } else {
b53_get_vlan_entry(dev, pvid, vl);
vl->members |= BIT(port) | BIT(cpu_port);
vl->untag |= BIT(port) | BIT(cpu_port);
@@ -2300,6 +2391,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.phylink_get_caps = b53_phylink_get_caps,
+ .port_setup = b53_setup_port,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
.support_eee = b53_support_eee,
@@ -2757,6 +2849,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
ds->ops = &b53_switch_ops;
ds->phylink_mac_ops = &b53_phylink_mac_ops;
dev->vlan_enabled = true;
+ dev->vlan_filtering = false;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
* would be breaking filtering semantics for any of the other bridge
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 0166c37a13a7..2cf3e6a81e37 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -96,6 +96,7 @@ struct b53_pcs {
struct b53_port {
u16 vlan_ctl_mask;
+ u16 pvid;
struct ethtool_keee eee;
};
@@ -147,6 +148,7 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
bool vlan_enabled;
+ bool vlan_filtering;
unsigned int num_ports;
struct b53_port *ports;
@@ -382,6 +384,7 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int b53_setup_port(struct dsa_switch *ds, int port);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index fa2bf3fa9019..454a8c7fd7ee 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1230,6 +1230,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.resume = bcm_sf2_sw_resume,
.get_wol = bcm_sf2_sw_get_wol,
.set_wol = bcm_sf2_sw_set_wol,
+ .port_setup = b53_setup_port,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
.support_eee = b53_support_eee,
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 940f1b71226d..7b35d24c38d7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
struct tc_taprio_qopt_offload *taprio;
struct ocelot_port *ocelot_port;
struct timespec64 base_ts;
- int port;
+ int i, port;
u32 val;
mutex_lock(&ocelot->fwd_domain_lock);
@@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
QSYS_PARAM_CFG_REG_3);
+ for (i = 0; i < taprio->num_entries; i++)
+ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
+
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 7a5710f9ccf6..ead0625e781f 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -104,12 +104,14 @@ struct ppe_mbox_data {
u8 xpon_hal_api;
u8 wan_xsi;
u8 ct_joyme4;
- int ppe_type;
- int wan_mode;
- int wan_sel;
+ u8 max_packet;
+ u8 rsv[3];
+ u32 ppe_type;
+ u32 wan_mode;
+ u32 wan_sel;
} init_info;
struct {
- int func_id;
+ u32 func_id;
u32 size;
u32 data;
} set_info;
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index c9aac27883a3..92f359f2b449 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -186,7 +186,6 @@ void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
pds_client_unregister(pf, padev->client_id);
auxiliary_device_delete(&padev->aux_dev);
auxiliary_device_uninit(&padev->aux_dev);
- padev->client_id = 0;
*pd_ptr = NULL;
mutex_unlock(&pf->config_lock);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 230726d7b74f..d41b58fad37b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
}
/* Set up the header page info */
- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
- XGBE_SKB_ALLOC_SIZE);
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+ } else {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ pdata->rx_buf_size);
+ }
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f393228d41c7..f1b0fb02b3cd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
+static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
+ }
+}
+
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
unsigned int index, unsigned int val)
{
@@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
- xgbe_config_sph_mode(pdata);
- xgbe_config_rss(pdata);
+
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ }
+
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->disable_vxlan = xgbe_disable_vxlan;
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+ /* For Split Header*/
+ hw_if->enable_sph = xgbe_config_sph_mode;
+ hw_if->disable_sph = xgbe_disable_sph_mode;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index d84a310dfcd4..8e09ad8fa022 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev,
if (ret)
return ret;
- if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ if ((features & NETIF_F_RXCSUM) && !rxcsum) {
+ hw_if->enable_sph(pdata);
+ hw_if->enable_vxlan(pdata);
hw_if->enable_rx_csum(pdata);
- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ schedule_work(&pdata->restart_work);
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
+ hw_if->disable_sph(pdata);
+ hw_if->disable_vxlan(pdata);
hw_if->disable_rx_csum(pdata);
+ schedule_work(&pdata->restart_work);
+ }
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d85386cac8d1..ed5d43c16d0e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -865,6 +865,10 @@ struct xgbe_hw_if {
void (*enable_vxlan)(struct xgbe_prv_data *);
void (*disable_vxlan)(struct xgbe_prv_data *);
void (*set_vxlan_id)(struct xgbe_prv_data *);
+
+ /* For Split Header */
+ void (*enable_sph)(struct xgbe_prv_data *pdata);
+ void (*disable_sph)(struct xgbe_prv_data *pdata);
};
/* This structure represents implementation specific routines for an
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c8e3468eee61..86a5de44b6f3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2015,6 +2015,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
}
return skb;
vlan_err:
+ skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
@@ -3414,6 +3415,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
bnxt_free_one_tx_ring_skbs(bp, txr, i);
}
+
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
}
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
@@ -11599,6 +11603,9 @@ static void bnxt_init_napi(struct bnxt *bp)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
+
+ set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
+
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
@@ -12318,12 +12325,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
struct hwrm_func_drv_if_change_input *req;
- bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
bool caps_change = false;
int rc, retry = 0;
+ bool fw_reset;
u32 flags = 0;
+ fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
+ bp->fw_reset_state = 0;
+
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -12392,13 +12402,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
+ /* IRQ will be initialized later in bnxt_request_irq()*/
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
- if (rc) {
- clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
- netdev_err(bp->dev, "init int mode failed\n");
- return rc;
- }
}
rc = bnxt_cancel_reservations(bp, fw_reset);
}
@@ -12797,8 +12802,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
- if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
- WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
@@ -14833,7 +14836,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
- bp->fw_reset_state = 0;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
netif_close(bp->dev);
}
@@ -16003,8 +16006,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_rdma_aux_device_del(bp);
- bnxt_ptp_clear(bp);
unregister_netdev(dev);
+ bnxt_ptp_clear(bp);
bnxt_rdma_aux_device_uninit(bp);
@@ -16931,10 +16934,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
+ /* IRQ will be initialized later in bnxt_io_resume */
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
- err = bnxt_init_int_mode(bp);
- bnxt_ulp_irq_restart(bp, err);
}
reset_exit:
@@ -16963,10 +16965,13 @@ static void bnxt_io_resume(struct pci_dev *pdev)
err = bnxt_hwrm_func_qcaps(bp);
if (!err) {
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
err = bnxt_open(netdev);
- else
+ } else {
err = bnxt_reserve_rings(bp, true);
+ if (!err)
+ err = bnxt_init_int_mode(bp);
+ }
}
if (!err)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 21726cf56586..bc8b3b7e915d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2614,6 +2614,7 @@ struct bnxt {
#define BNXT_FW_RESET_STATE_POLL_FW 4
#define BNXT_FW_RESET_STATE_OPENING 5
#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6
+#define BNXT_FW_RESET_STATE_ABORT 7
u16 fw_reset_min_dsecs;
#define BNXT_DFLT_FW_RST_MIN_DSECS 20
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 5576e7cf8463..a000d3f630bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -110,20 +110,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
}
}
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
+ u16 copylen = min_t(u16, len,
+ info->dest_buf_size - off);
+
+ memcpy(info->dest_buf + off, dma_buf, copylen);
+ if (copylen < len)
+ break;
} else {
rc = -ENOBUFS;
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ kfree(info->dest_buf);
+ info->dest_buf = NULL;
+ }
break;
}
}
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 48dd5922e4dd..f5d490bf997e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2062,6 +2062,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
return reg_len;
}
+#define BNXT_PCIE_32B_ENTRY(start, end) \
+ { offsetof(struct pcie_ctx_hw_stats, start), \
+ offsetof(struct pcie_ctx_hw_stats, end) }
+
+static const struct {
+ u16 start;
+ u16 end;
+} bnxt_pcie_32b_entries[] = {
+ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
+};
+
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
@@ -2094,12 +2105,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
rc = hwrm_req_send(bp, req);
if (!rc) {
- __le64 *src = (__le64 *)hw_pcie_stats;
- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
- int i;
-
- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
- dst[i] = le64_to_cpu(src[i]);
+ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
+ u8 *src = (u8 *)hw_pcie_stats;
+ int i, j;
+
+ for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
+ if (i >= bnxt_pcie_32b_entries[j].start &&
+ i <= bnxt_pcie_32b_entries[j].end) {
+ u32 *dst32 = (u32 *)(dst + i);
+
+ *dst32 = le32_to_cpu(*(__le32 *)(src + i));
+ i += 4;
+ if (i > bnxt_pcie_32b_entries[j].end &&
+ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
+ j++;
+ } else {
+ u64 *dst64 = (u64 *)(dst + i);
+
+ *dst64 = le64_to_cpu(*(__le64 *)(src + i));
+ i += 8;
+ }
+ }
}
hwrm_req_drop(bp, req);
}
@@ -4991,6 +5017,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+ memset(buf, 0, sizeof(u64) * bp->num_tests);
if (etest->flags & ETH_TEST_FL_OFFLINE &&
bnxt_ulp_registered(bp->edev)) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -4998,7 +5025,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
- memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 2d4e19b96ee7..0669d43472f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -794,6 +794,27 @@ next_slot:
return HZ;
}
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
+{
+ struct bnxt_ptp_tx_req *txts_req;
+ u16 cons = ptp->txts_cons;
+
+ /* make sure ptp aux worker finished with
+ * possible BNXT_STATE_OPEN set
+ */
+ ptp_cancel_worker_sync(ptp->ptp_clock);
+
+ ptp->tx_avail = BNXT_MAX_TX_TS;
+ while (cons != ptp->txts_prod) {
+ txts_req = &ptp->txts_req[cons];
+ if (!IS_ERR_OR_NULL(txts_req->tx_skb))
+ dev_kfree_skb_any(txts_req->tx_skb);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+}
+
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
{
spin_lock_bh(&ptp->ptp_tx_lock);
@@ -1105,7 +1126,6 @@ out:
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- int i;
if (!ptp)
return;
@@ -1117,12 +1137,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- for (i = 0; i < BNXT_MAX_TX_TS; i++) {
- if (ptp->txts_req[i].tx_skb) {
- dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
- ptp->txts_req[i].tx_skb = NULL;
- }
- }
-
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index a95f05e9c579..0481161d26ef 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -162,6 +162,7 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index d88fbecdab4b..232e839a9d07 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev)
eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
- np->led_mode = psrom->led_mode;
+ np->led_mode = le16_to_cpu(psrom->led_mode);
return 0;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 195dc6cfd895..0e33e2eaae96 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -335,7 +335,7 @@ typedef struct t_SROM {
u16 sub_system_id; /* 0x06 */
u16 pci_base_1; /* 0x08 (IP1000A only) */
u16 pci_base_2; /* 0x0a (IP1000A only) */
- u16 led_mode; /* 0x0c (IP1000A only) */
+ __le16 led_mode; /* 0x0c (IP1000A only) */
u16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a86cfebedaa8..17e9bddb9ddd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 09749e9f7398..4e5d8bc39a1b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -61,7 +61,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 9ff797fb36c4..b03b8758c777 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
-
- /* enable vector */
- hns3_mask_vector_irq(tqp_vector, 1);
}
-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
- /* disable vector */
- hns3_mask_vector_irq(tqp_vector, 0);
-
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
+static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+}
+
+static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_disable(&priv->tqp_vector[i]);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- /* enable the vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- /* enable rcb */
- for (j = 0; j < h->kinfo.num_tqps; j++)
- hns3_tqp_enable(h->kinfo.tqp[j]);
+ hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
-
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
- int i;
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- /* disable rcb */
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -5864,8 +5871,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5876,11 +5881,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5896,7 +5897,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5912,11 +5912,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_enable(h->kinfo.tqp[i]);
+ hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 59cc9221185f..ec581d4b696f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -440,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.settime64 = hclge_ptp_settime;
ptp->info.n_alarm = 0;
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
if (IS_ERR(ptp->clock)) {
dev_err(&hdev->pdev->dev,
@@ -451,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
return -ENODEV;
}
- spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hdev->ptp = ptp;
-
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 9ba767740a04..dada42e7e0ec 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1292,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
rtnl_unlock();
}
-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1303,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
+ if (ret)
+ return ret;
+
+ hdev->rxvtag_strip_en = enable;
+ return 0;
+}
+
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
@@ -2204,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
tc_valid, tc_size);
}
-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
+ bool rxvtag_strip_en)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
- ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
@@ -2879,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2994,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cccef3228461..0208425ab594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -253,6 +253,7 @@ struct hclgevf_dev {
int *vector_irq;
bool gro_en;
+ bool rxvtag_strip_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 01a08cfd0090..66e070095d1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright Red Hat
-#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -14,32 +13,16 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
-/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
-#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
-#define INDEX_FIELD_DEV GENMASK(31, 16)
-#define INDEX_FIELD_BUS GENMASK(12, 5)
-#define INDEX_FIELD_SLOT GENMASK(4, 0)
-
-static unsigned long ice_adapter_index(const struct pci_dev *pdev)
+static unsigned long ice_adapter_index(u64 dsn)
{
- unsigned int domain = pci_domain_nr(pdev->bus);
-
- WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
-
- switch (pdev->device) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
- return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
- default:
- return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
- FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
- }
+#if BITS_PER_LONG == 64
+ return dsn;
+#else
+ return (u32)dsn ^ (u32)(dsn >> 32);
+#endif
}
-static struct ice_adapter *ice_adapter_new(void)
+static struct ice_adapter *ice_adapter_new(u64 dsn)
{
struct ice_adapter *adapter;
@@ -47,6 +30,7 @@ static struct ice_adapter *ice_adapter_new(void)
if (!adapter)
return NULL;
+ adapter->device_serial_number = dsn;
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
refcount_set(&adapter->refcount, 1);
@@ -77,23 +61,26 @@ static void ice_adapter_free(struct ice_adapter *adapter)
* Return: Pointer to ice_adapter on success.
* ERR_PTR() on error. -ENOMEM is the only possible error.
*/
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
+ u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
int err;
+ index = ice_adapter_index(dsn);
scoped_guard(mutex, &ice_adapters_mutex) {
err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
if (err == -EBUSY) {
adapter = xa_load(&ice_adapters, index);
refcount_inc(&adapter->refcount);
+ WARN_ON_ONCE(adapter->device_serial_number != dsn);
return adapter;
}
if (err)
return ERR_PTR(err);
- adapter = ice_adapter_new();
+ adapter = ice_adapter_new(dsn);
if (!adapter)
return ERR_PTR(-ENOMEM);
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
@@ -110,11 +97,13 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
*
* Context: Process, may sleep.
*/
-void ice_adapter_put(const struct pci_dev *pdev)
+void ice_adapter_put(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
+ u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
+ index = ice_adapter_index(dsn);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index e233225848b3..ac15c0d2bc1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -32,6 +32,7 @@ struct ice_port_list {
* @refcount: Reference count. struct ice_pf objects hold the references.
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
+ * @device_serial_number: DSN cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
@@ -40,9 +41,10 @@ struct ice_adapter {
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
+ u64 device_serial_number;
};
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
-void ice_adapter_put(const struct pci_dev *pdev);
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
+void ice_adapter_put(struct pci_dev *pdev);
#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 69d5b1a28491..59323c019544 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2345,15 +2345,15 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
- }
- if (hw->mac_type != ICE_MAC_GENERIC_3K_E825)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ if (hw->mac_type == ICE_MAC_E810 ||
+ hw->mac_type == ICE_MAC_GENERIC)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ }
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 7752920d7a8e..1cca9b2262e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -2097,6 +2097,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err_exit;
+ }
#define ICE_VF_MAX_FDIR_FILTERS 128
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 66544faab710..aef0e9775a33 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -629,13 +629,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
-#define IDPF_CAP_RX_CSUM_L4V4 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
+#define IDPF_CAP_TX_CSUM_L4V4 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
-#define IDPF_CAP_RX_CSUM_L4V6 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+#define IDPF_CAP_TX_CSUM_L4V6 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
#define IDPF_CAP_RX_CSUM (\
VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
@@ -644,11 +644,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
-#define IDPF_CAP_SCTP_CSUM (\
+#define IDPF_CAP_TX_SCTP_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
#define IDPF_CAP_TUNNEL_TX_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index aa755dedb41d..82f09b4030bc 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -703,8 +703,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
+ netdev_features_t other_offloads = 0;
+ netdev_features_t csum_offloads = 0;
+ netdev_features_t tso_offloads = 0;
netdev_features_t dflt_features;
- netdev_features_t offloads = 0;
struct idpf_netdev_priv *np;
struct net_device *netdev;
u16 idx = vport->idx;
@@ -766,53 +768,32 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
- dflt_features |= NETIF_F_IP_CSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
- dflt_features |= NETIF_F_IPV6_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
+ csum_offloads |= NETIF_F_IP_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
+ csum_offloads |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
- dflt_features |= NETIF_F_RXCSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
- dflt_features |= NETIF_F_SCTP_CRC;
+ csum_offloads |= NETIF_F_RXCSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
+ csum_offloads |= NETIF_F_SCTP_CRC;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
- dflt_features |= NETIF_F_TSO;
+ tso_offloads |= NETIF_F_TSO;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
- dflt_features |= NETIF_F_TSO6;
+ tso_offloads |= NETIF_F_TSO6;
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_IPV4_UDP |
VIRTCHNL2_CAP_SEG_IPV6_UDP))
- dflt_features |= NETIF_F_GSO_UDP_L4;
+ tso_offloads |= NETIF_F_GSO_UDP_L4;
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
- offloads |= NETIF_F_GRO_HW;
- /* advertise to stack only if offloads for encapsulated packets is
- * supported
- */
- if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
- VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
- offloads |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM |
- NETIF_F_GSO_PARTIAL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_IPXIP6 |
- 0;
-
- if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
- IDPF_CAP_TUNNEL_TX_CSUM))
- netdev->gso_partial_features |=
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
- offloads |= NETIF_F_TSO_MANGLEID;
- }
+ other_offloads |= NETIF_F_GRO_HW;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
- offloads |= NETIF_F_LOOPBACK;
+ other_offloads |= NETIF_F_LOOPBACK;
- netdev->features |= dflt_features;
- netdev->hw_features |= dflt_features | offloads;
- netdev->hw_enc_features |= dflt_features | offloads;
+ netdev->features |= dflt_features | csum_offloads | tso_offloads;
+ netdev->hw_features |= netdev->features | other_offloads;
+ netdev->vlan_features |= netdev->features | other_offloads;
+ netdev->hw_enc_features |= dflt_features | other_offloads;
idpf_set_ethtool_ops(netdev);
netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
@@ -1132,11 +1113,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs) {
- kfree(vport);
+ if (!vport->q_vector_idxs)
+ goto free_vport;
- return NULL;
- }
idpf_vport_init(vport, max_q);
/* This alloc is done separate from the LUT because it's not strictly
@@ -1146,11 +1125,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
- if (!rss_data->rss_key) {
- kfree(vport);
+ if (!rss_data->rss_key)
+ goto free_vector_idxs;
- return NULL;
- }
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
@@ -1163,6 +1140,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
adapter->next_vport = idpf_get_free_slot(adapter);
return vport;
+
+free_vector_idxs:
+ kfree(vport->q_vector_idxs);
+free_vport:
+ kfree(vport);
+
+ return NULL;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index bec4a02c5373..b35713036a54 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -89,6 +89,7 @@ static void idpf_shutdown(struct pci_dev *pdev)
{
struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+ cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->vc_event_task);
idpf_vc_core_deinit(adapter);
idpf_deinit_dflt_mbx(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 612ed26a29c5..efc7b30e4211 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -1290,6 +1290,8 @@ void igc_ptp_reset(struct igc_adapter *adapter)
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+ mutex_lock(&adapter->ptm_lock);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
@@ -1308,7 +1310,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
if (!igc_is_crosststamp_supported(adapter))
break;
- mutex_lock(&adapter->ptm_lock);
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
@@ -1332,7 +1333,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
igc_ptm_reset(hw);
- mutex_unlock(&adapter->ptm_lock);
break;
default:
/* No work to do. */
@@ -1349,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ mutex_unlock(&adapter->ptm_lock);
+
wrfl();
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 0a679e95196f..24499bb36c00 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1223,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
miss_cnt);
rtnl_lock();
if (netif_running(oct->netdev))
- octep_stop(oct->netdev);
+ dev_close(oct->netdev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index 18c922dd5fc6..ccb69bc5c952 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -835,7 +835,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_hold(netdev, NULL, GFP_ATOMIC);
- schedule_work(&oct->tx_timeout_task);
+ if (!schedule_work(&oct->tx_timeout_task))
+ netdev_put(netdev, NULL);
+
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 47807b202310..22a532695fb0 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -269,12 +269,8 @@ static const char * const mtk_clks_source_name[] = {
"ethwarp_wocpu2",
"ethwarp_wocpu1",
"ethwarp_wocpu0",
- "top_usxgmii0_sel",
- "top_usxgmii1_sel",
"top_sgm0_sel",
"top_sgm1_sel",
- "top_xfi_phy0_xtal_sel",
- "top_xfi_phy1_xtal_sel",
"top_eth_gmii_sel",
"top_eth_refck_50m_sel",
"top_eth_sys_200m_sel",
@@ -2252,14 +2248,18 @@ skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+ addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
+ rxd->rxd2);
+ else
+ addr64 = RX_DMA_PREP_ADDR64(dma_addr);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
- likely(dma_addr != DMA_MAPPING_ERROR))
- rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
ring->calc_idx = idx;
done++;
@@ -3186,11 +3186,19 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
- int i;
+ int i, j, txqs = 1;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ txqs = MTK_QDMA_NUM_QUEUES;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ for (j = 0; j < txqs; j++)
+ netdev_tx_reset_subqueue(eth->netdev[i], j);
+ }
- for (i = 0; i < MTK_MAX_DEVS; i++)
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
MTK_QDMA_RING_SIZE * soc->tx.desc_size,
@@ -3465,9 +3473,6 @@ static int mtk_open(struct net_device *dev)
}
mtk_gdm_config(eth, target_mac->id, gdm_config);
}
- /* Reset and enable PSE */
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 76f202d7f055..b175119a6a7d 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
unsigned int head = ring->head;
unsigned int entry = ring->tail;
+ unsigned long flags;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
netif_wake_queue(ndev);
if (napi_complete(napi)) {
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, false, true);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
@@ -1341,16 +1342,16 @@ push_new_skb:
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
+ unsigned long flags;
int work_done = 0;
priv = container_of(napi, struct mtk_star_priv, rx_napi);
work_done = mtk_star_rx(priv, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- spin_lock(&priv->lock);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, true, false);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return work_done;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 532c7fa94d17..dbd9482359e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -176,6 +176,7 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
priv = ptpsq->txqsq.priv;
+ rtnl_lock();
mutex_lock(&priv->state_lock);
chs = &priv->channels;
netdev = priv->netdev;
@@ -183,22 +184,19 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
- rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
- rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
- rtnl_lock();
mlx5e_activate_priv_channels(priv);
- rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
netif_carrier_on(netdev);
mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 5c762a71818d..7a18a469961d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
struct flow_match_enc_keyid enc_keyid;
void *misc_c, *misc_v;
- misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
@@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
if (err)
return err;
+
+ /* We can't mix custom tunnel headers with symbolic ones and we
+ * don't have a symbolic field name for GBP, so we use custom
+ * tunnel headers in this case. We need hardware support to
+ * match on custom tunnel headers, but we already know it's
+ * supported because the previous call successfully checked for
+ * that.
+ */
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_5);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_5);
+
+ /* Shift by 8 to account for the reserved bits in the vxlan
+ * header after the VNI.
+ */
+ MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1,
+ be32_to_cpu(enc_keyid.mask->keyid) << 8);
+ MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1,
+ be32_to_cpu(enc_keyid.key->keyid) << 8);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ return 0;
}
/* match on VNI is required */
@@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9ba99609999f..f1d908f61134 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1750,9 +1750,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
!list_is_first(&attr->list, &flow->attrs))
return 0;
- if (flow_flag_test(flow, SLOW))
- return 0;
-
esw_attr = attr->esw_attr;
if (!esw_attr->split_count ||
esw_attr->split_count == esw_attr->out_count - 1)
@@ -1766,7 +1763,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
/* external dest with encap is considered as internal by firmware */
if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
- !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
+ !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
ext_dest = true;
else
int_dest = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a6a8eea5980c..0e3a977d5332 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3533,7 +3533,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
int err;
mutex_init(&esw->offloads.termtbl_mutex);
- mlx5_rdma_enable_roce(esw->dev);
+ err = mlx5_rdma_enable_roce(esw->dev);
+ if (err)
+ goto err_roce;
err = mlx5_esw_host_number_init(esw);
if (err)
@@ -3594,6 +3596,7 @@ err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
+err_roce:
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index a42f6cd99b74..5c552b71e371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
+ u8 mac[ETH_ALEN] = {};
union ib_gid gid;
- u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
mlx5_nic_vport_disable_roce(dev);
}
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, roce))
- return;
+ return 0;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
- return;
+ return err;
}
err = mlx5_rdma_add_roce_addr(dev);
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
goto del_roce_addr;
}
- return;
+ return err;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
index 750cff2a71a4..3d9e76c3d42f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
@@ -8,12 +8,12 @@
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 4ca7b99ef131..de6b1a340f55 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -154,14 +154,14 @@ struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
void fbnic_devlink_register(struct fbnic_dev *fbd);
void fbnic_devlink_unregister(struct fbnic_dev *fbd);
-int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
-void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+int fbnic_fw_request_mbx(struct fbnic_dev *fbd);
+void fbnic_fw_free_mbx(struct fbnic_dev *fbd);
void fbnic_hwmon_register(struct fbnic_dev *fbd);
void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
-int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
-void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+int fbnic_pcs_request_irq(struct fbnic_dev *fbd);
+void fbnic_pcs_free_irq(struct fbnic_dev *fbd);
void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
int fbnic_napi_request_irq(struct fbnic_dev *fbd,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 3b12a0ab5906..51bee8072420 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -796,8 +796,10 @@ enum {
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19)
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
0x3106e /* 0xc41b8 */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 88db3dacb940..3d9636a6c968 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ /* Write the upper 32b and then the lower 32b. Doing this the
+ * FW can then read lower, upper, lower to verify that the state
+ * of the descriptor wasn't changed mid-transaction.
+ */
fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
fw_wrfl(fbd);
fw_wr32(fbd, desc_offset, lower_32_bits(desc));
}
+static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u32 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ /* For initialization we write the lower 32b of the descriptor first.
+ * This way we can set the state to mark it invalid before we clear the
+ * upper 32b.
+ */
+ fw_wr32(fbd, desc_offset, desc);
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset + 1, 0);
+}
+
static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
@@ -33,29 +51,41 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
return desc;
}
-static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
int desc_idx;
+ /* Disable DMA transactions from the device,
+ * and flush any transactions triggered during cleaning
+ */
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
+ break;
+ }
+
+ wrfl(fbd);
+
/* Initialize first descriptor to all 0s. Doing this gives us a
* solid stop for the firmware to hit when it is done looping
* through the ring.
*/
- __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
-
- fw_wrfl(fbd);
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
/* We then fill the rest of the ring starting at the end and moving
* back toward descriptor 0 with skip descriptors that have no
* length nor address, and tell the firmware that they can skip
* them and just move past them to the one we initialized to 0.
*/
- for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
- __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
- FBNIC_IPC_MBX_DESC_FW_CMPL |
- FBNIC_IPC_MBX_DESC_HOST_CMPL);
- fw_wrfl(fbd);
- }
+ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
+ FBNIC_IPC_MBX_DESC_FW_CMPL |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
}
void fbnic_mbx_init(struct fbnic_dev *fbd)
@@ -76,7 +106,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_init_desc_ring(fbd, i);
+ fbnic_mbx_reset_desc_ring(fbd, i);
}
static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
@@ -141,7 +171,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
int i;
- fbnic_mbx_init_desc_ring(fbd, mbx_idx);
+ fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
@@ -322,67 +352,41 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
return err;
}
-/**
- * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
- * @fbd: FBNIC device structure
- *
- * Return: NULL on failure to allocate, error pointer on error, or pointer
- * to new TLV test message.
- *
- * Sends a single TLV header indicating the host wants the firmware to
- * confirm the capabilities and version.
- **/
-static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
-{
- int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
-
- /* Return 0 if we are not calling this on ASIC */
- return (err == -EOPNOTSUPP) ? 0 : err;
-}
-
-static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
- /* This is a one time init, so just exit if it is completed */
- if (mbx->ready)
- return;
-
mbx->ready = true;
switch (mbx_idx) {
case FBNIC_IPC_MBX_RX_IDX:
+ /* Enable DMA writes from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+
/* Make sure we have a page for the FW to write to */
fbnic_mbx_alloc_rx_msgs(fbd);
break;
case FBNIC_IPC_MBX_TX_IDX:
- /* Force version to 1 if we successfully requested an update
- * from the firmware. This should be overwritten once we get
- * the actual version from the firmware in the capabilities
- * request message.
- */
- if (!fbnic_fw_xmit_cap_msg(fbd) &&
- !fbd->fw_cap.running.mgmt.version)
- fbd->fw_cap.running.mgmt.version = 1;
+ /* Enable DMA reads from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
break;
}
}
-static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+static bool fbnic_mbx_event(struct fbnic_dev *fbd)
{
- int i;
-
- /* We only need to do this on the first interrupt following init.
+ /* We only need to do this on the first interrupt following reset.
* this primes the mailbox so that we will have cleared all the
* skip descriptors.
*/
if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
- return;
+ return false;
wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
- for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_postinit_desc_ring(fbd, i);
+ return true;
}
/**
@@ -859,7 +863,7 @@ next_page:
void fbnic_mbx_poll(struct fbnic_dev *fbd)
{
- fbnic_mbx_postinit(fbd);
+ fbnic_mbx_event(fbd);
fbnic_mbx_process_tx_msgs(fbd);
fbnic_mbx_process_rx_msgs(fbd);
@@ -867,60 +871,97 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
{
- struct fbnic_fw_mbx *tx_mbx;
- int attempts = 50;
+ unsigned long timeout = jiffies + 10 * HZ + 1;
+ int err, i;
- /* Immediate fail if BAR4 isn't there */
- if (!fbnic_fw_present(fbd))
- return -ENODEV;
+ do {
+ if (!time_is_after_jiffies(timeout))
+ return -ETIMEDOUT;
- tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- while (!tx_mbx->ready && --attempts) {
/* Force the firmware to trigger an interrupt response to
* avoid the mailbox getting stuck closed if the interrupt
* is reset.
*/
- fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+ fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
- msleep(200);
+ /* Immediate fail if BAR4 went away */
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
- fbnic_mbx_poll(fbd);
- }
+ msleep(20);
+ } while (!fbnic_mbx_event(fbd));
+
+ /* FW has shown signs of life. Enable DMA and start Tx/Rx */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_init_desc_ring(fbd, i);
+
+ /* Request an update from the firmware. This should overwrite
+ * mgmt.version once we get the actual version from the firmware
+ * in the capabilities request message.
+ */
+ err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+ if (err)
+ goto clean_mbx;
+
+ /* Use "1" to indicate we entered the state waiting for a response */
+ fbd->fw_cap.running.mgmt.version = 1;
+
+ return 0;
+clean_mbx:
+ /* Cleanup Rx buffers and disable mailbox */
+ fbnic_mbx_clean(fbd);
+ return err;
+}
+
+static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
+{
+ cmpl_data->result = -EPIPE;
+ complete(&cmpl_data->done);
+}
- return attempts ? 0 : -ETIMEDOUT;
+static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
+{
+ if (fbd->cmpl_data) {
+ __fbnic_fw_evict_cmpl(fbd->cmpl_data);
+ fbd->cmpl_data = NULL;
+ }
}
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
{
+ unsigned long timeout = jiffies + 10 * HZ + 1;
struct fbnic_fw_mbx *tx_mbx;
- int attempts = 50;
- u8 count = 0;
-
- /* Nothing to do if there is no mailbox */
- if (!fbnic_fw_present(fbd))
- return;
+ u8 tail;
/* Record current Rx stats */
tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- /* Nothing to do if mailbox never got to ready */
- if (!tx_mbx->ready)
- return;
+ spin_lock_irq(&fbd->fw_tx_lock);
+
+ /* Clear ready to prevent any further attempts to transmit */
+ tx_mbx->ready = false;
+
+ /* Read tail to determine the last tail state for the ring */
+ tail = tx_mbx->tail;
+
+ /* Flush any completions as we are no longer processing Rx */
+ fbnic_mbx_evict_all_cmpl(fbd);
+
+ spin_unlock_irq(&fbd->fw_tx_lock);
/* Give firmware time to process packet,
- * we will wait up to 10 seconds which is 50 waits of 200ms.
+ * we will wait up to 10 seconds which is 500 waits of 20ms.
*/
do {
u8 head = tx_mbx->head;
- if (head == tx_mbx->tail)
+ /* Tx ring is empty once head == tail */
+ if (head == tail)
break;
- msleep(200);
+ msleep(20);
fbnic_mbx_process_tx_msgs(fbd);
-
- count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
- } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
+ } while (time_is_after_jiffies(timeout));
}
void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
index 1bbc0e56f3a0..1c88a2bf3a7a 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -19,69 +19,105 @@ static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static int __fbnic_fw_enable_mbx(struct fbnic_dev *fbd, int vector)
+{
+ int err;
+
+ /* Initialize mailbox and attempt to poll it into ready state */
+ fbnic_mbx_init(fbd);
+ err = fbnic_mbx_poll_tx_ready(fbd);
+ if (err) {
+ dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ return err;
+ }
+
+ /* Enable interrupt and unmask the vector */
+ enable_irq(vector);
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
+
/**
- * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox
+ * fbnic_fw_request_mbx - Configure and initialize Firmware Mailbox
* @fbd: Pointer to device to initialize
*
- * This function will initialize the firmware mailbox rings, enable the IRQ
- * and initialize the communication between the Firmware and the host. The
- * firmware is expected to respond to the initialization by sending an
- * interrupt essentially notifying the host that it has seen the
- * initialization and is now synced up.
+ * This function will allocate the IRQ and then reinitialize the mailbox
+ * starting communication between the host and firmware.
*
* Return: non-zero on failure.
**/
-int fbnic_fw_enable_mbx(struct fbnic_dev *fbd)
+int fbnic_fw_request_mbx(struct fbnic_dev *fbd)
{
- u32 vector = fbd->fw_msix_vector;
- int err;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int vector, err;
+
+ WARN_ON(fbd->fw_msix_vector);
+
+ vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
+ if (vector < 0)
+ return vector;
/* Request the IRQ for FW Mailbox vector. */
err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr,
- IRQF_ONESHOT, dev_name(fbd->dev), fbd);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ dev_name(fbd->dev), fbd);
if (err)
return err;
/* Initialize mailbox and attempt to poll it into ready state */
- fbnic_mbx_init(fbd);
- err = fbnic_mbx_poll_tx_ready(fbd);
- if (err) {
- dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ err = __fbnic_fw_enable_mbx(fbd, vector);
+ if (err)
free_irq(vector, fbd);
- return err;
- }
- /* Enable interrupts */
- fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+ fbd->fw_msix_vector = vector;
- return 0;
+ return err;
}
/**
- * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state
- * @fbd: Pointer to device to disable
+ * fbnic_fw_disable_mbx - Temporarily place mailbox in standby state
+ * @fbd: Pointer to device
*
- * This function will disable the mailbox interrupt, free any messages still
- * in the mailbox and place it into a standby state. The firmware is
- * expected to see the update and assume that the host is in the reset state.
+ * Shutdown the mailbox by notifying the firmware to stop sending us logs, mask
+ * and synchronize the IRQ, and then clean up the rings.
**/
-void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
+static void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
{
- /* Disable interrupt and free vector */
- fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
+ /* Disable interrupt and synchronize the IRQ */
+ disable_irq(fbd->fw_msix_vector);
- /* Free the vector */
- free_irq(fbd->fw_msix_vector, fbd);
+ /* Mask the vector */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
/* Make sure disabling logs message is sent, must be done here to
* avoid risk of completing without a running interrupt.
*/
fbnic_mbx_flush_tx(fbd);
-
- /* Reset the mailboxes to the initialized state */
fbnic_mbx_clean(fbd);
}
+/**
+ * fbnic_fw_free_mbx - Disable mailbox and place it in standby state
+ * @fbd: Pointer to device to disable
+ *
+ * This function will disable the mailbox interrupt, free any messages still
+ * in the mailbox and place it into a disabled state. The firmware is
+ * expected to see the update and assume that the host is in the reset state.
+ **/
+void fbnic_fw_free_mbx(struct fbnic_dev *fbd)
+{
+ /* Vector has already been freed */
+ if (!fbd->fw_msix_vector)
+ return;
+
+ fbnic_fw_disable_mbx(fbd);
+
+ /* Free the vector */
+ free_irq(fbd->fw_msix_vector, fbd);
+ fbd->fw_msix_vector = 0;
+}
+
static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
{
struct fbnic_dev *fbd = data;
@@ -101,7 +137,7 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
}
/**
- * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link
+ * fbnic_pcs_request_irq - Configure the PCS to enable it to advertise link
* @fbd: Pointer to device to initialize
*
* This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ
@@ -109,41 +145,61 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
*
* Return: non-zero on failure.
**/
-int fbnic_pcs_irq_enable(struct fbnic_dev *fbd)
+int fbnic_pcs_request_irq(struct fbnic_dev *fbd)
{
- u32 vector = fbd->pcs_msix_vector;
- int err;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int vector, err;
- /* Request the IRQ for MAC link vector.
- * Map MAC cause to it, and unmask it
+ WARN_ON(fbd->pcs_msix_vector);
+
+ vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
+ if (vector < 0)
+ return vector;
+
+ /* Request the IRQ for PCS link vector.
+ * Map PCS cause to it, and unmask it
*/
err = request_irq(vector, &fbnic_pcs_msix_intr, 0,
fbd->netdev->name, fbd);
if (err)
return err;
+ /* Map and enable interrupt, unmask vector after link is configured */
fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
+ fbd->pcs_msix_vector = vector;
+
return 0;
}
/**
- * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping
+ * fbnic_pcs_free_irq - Teardown the PCS IRQ to prepare for stopping
* @fbd: Pointer to device that is stopping
*
- * This function undoes the work done in fbnic_pcs_irq_enable and prepares
+ * This function undoes the work done in fbnic_pcs_request_irq and prepares
* the device to no longer receive traffic on the host interface.
**/
-void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
+void fbnic_pcs_free_irq(struct fbnic_dev *fbd)
{
+ /* Vector has already been freed */
+ if (!fbd->pcs_msix_vector)
+ return;
+
/* Disable interrupt */
fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
FBNIC_PCS_MSIX_ENTRY);
+ fbnic_wrfl(fbd);
+
+ /* Synchronize IRQ to prevent race that would unmask vector */
+ synchronize_irq(fbd->pcs_msix_vector);
+
+ /* Mask the vector */
fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
/* Free the vector */
free_irq(fbd->pcs_msix_vector, fbd);
+ fbd->pcs_msix_vector = 0;
}
void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
@@ -226,9 +282,6 @@ void fbnic_free_irqs(struct fbnic_dev *fbd)
{
struct pci_dev *pdev = to_pci_dev(fbd->dev);
- fbd->pcs_msix_vector = 0;
- fbd->fw_msix_vector = 0;
-
fbd->num_irqs = 0;
pci_free_irq_vectors(pdev);
@@ -254,8 +307,5 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd)
fbd->num_irqs = num_irqs;
- fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
- fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
-
return 0;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 14291401f463..dde4a37116e2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -79,12 +79,6 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
-
- /* Enable XALI AR/AW outbound */
- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
- FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
- FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
}
static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 79a01fdd1dd1..2524d9b88d59 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -44,9 +44,10 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto time_stop;
- err = fbnic_pcs_irq_enable(fbd);
+ err = fbnic_pcs_request_irq(fbd);
if (err)
goto time_stop;
+
/* Pull the BMC config and initialize the RPC */
fbnic_bmc_rpc_init(fbd);
fbnic_rss_reinit(fbd, fbn);
@@ -82,7 +83,7 @@ static int fbnic_stop(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
fbnic_down(fbn);
- fbnic_pcs_irq_disable(fbn->fbd);
+ fbnic_pcs_free_irq(fbn->fbd);
fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 6cbbc2ee3e1f..4e8595239c0f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -283,7 +283,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto free_irqs;
}
- err = fbnic_fw_enable_mbx(fbd);
+ err = fbnic_fw_request_mbx(fbd);
if (err) {
dev_err(&pdev->dev,
"Firmware mailbox initialization failure\n");
@@ -363,7 +363,7 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
- fbnic_fw_disable_mbx(fbd);
+ fbnic_fw_free_mbx(fbd);
fbnic_free_irqs(fbd);
fbnic_devlink_free(fbd);
@@ -387,7 +387,7 @@ static int fbnic_pm_suspend(struct device *dev)
rtnl_unlock();
null_uc_addr:
- fbnic_fw_disable_mbx(fbd);
+ fbnic_fw_free_mbx(fbd);
/* Free the IRQs so they aren't trying to occupy sleeping CPUs */
fbnic_free_irqs(fbd);
@@ -420,7 +420,7 @@ static int __fbnic_pm_resume(struct device *dev)
fbd->mac->init_regs(fbd);
/* Re-enable mailbox */
- err = fbnic_fw_enable_mbx(fbd);
+ err = fbnic_fw_request_mbx(fbd);
if (err)
goto err_free_irqs;
@@ -438,15 +438,15 @@ static int __fbnic_pm_resume(struct device *dev)
if (netif_running(netdev)) {
err = __fbnic_open(fbn);
if (err)
- goto err_disable_mbx;
+ goto err_free_mbx;
}
rtnl_unlock();
return 0;
-err_disable_mbx:
+err_free_mbx:
rtnl_unlock();
- fbnic_fw_disable_mbx(fbd);
+ fbnic_fw_free_mbx(fbd);
err_free_irqs:
fbnic_free_irqs(fbd);
err_invalidate_uc_addr:
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 23760b613d3e..e2d6bfb5d693 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1815,6 +1815,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
if (nr_frags <= 0) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_first;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
@@ -1884,6 +1885,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
tx->frame_first = 0;
tx->frame_data0 = 0;
tx->frame_tail = 0;
+ tx->frame_last = 0;
return -ENOMEM;
}
@@ -1924,16 +1926,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_tail;
}
- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- buffer_info = &tx->buffer_info[tx->frame_tail];
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
+ buffer_info = &tx->buffer_info[tx->frame_last];
buffer_info->skb = skb;
if (time_stamp)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 7f73d66854be..db5fc73e41cc 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -980,6 +980,7 @@ struct lan743x_tx {
u32 frame_first;
u32 frame_data0;
u32 frame_tail;
+ u32 frame_last;
struct lan743x_tx_buffer_info *buffer_info;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ef93df520887..08bee56aea35 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -830,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
/* Ignore VID 0 added to our RX filter by the 8021q module, since
@@ -849,6 +850,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
ocelot_bridge_vlan_find(ocelot, vid));
if (err)
return err;
+ } else if (ocelot_port->pvid_vlan &&
+ ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
+ err = ocelot_port_set_pvid(ocelot, port, NULL);
+ if (err)
+ return err;
}
/* Untagged egress vlan clasification */
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 2aacc1996796..55b8d3666153 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1925,8 +1925,8 @@ static u16 rtase_calc_time_mitigation(u32 time_us)
time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
- msb = fls(time_us);
- if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
+ msb = fls(time_us);
time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
} else {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index b4be76e13a2f..d88a0180294e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -187,7 +187,6 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
xdp_return_frame(xdpf);
break;
default:
- netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type);
prueth_xmit_free(tx_chn, desc_tx);
ndev->stats.tx_dropped++;
continue;
@@ -567,6 +566,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
{
struct cppi5_host_desc_t *first_desc;
struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
struct prueth_swdata *swdata;
@@ -620,12 +620,17 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
swdata->data.xdpf = xdpf;
}
+ /* Report BQL before sending the packet */
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ netdev_tx_sent_queue(netif_txq, xdpf->len);
+
cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
if (ret) {
netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
+ netdev_tx_completed_queue(netif_txq, 1, xdpf->len);
goto drop_free_descs;
}
@@ -650,6 +655,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
struct page *page, u32 *len)
{
struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
u32 pkt_len = *len;
@@ -669,8 +676,11 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
goto drop;
}
- q_idx = smp_processor_id() % emac->tx_ch_num;
+ q_idx = cpu % emac->tx_ch_num;
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+ __netif_tx_lock(netif_txq, cpu);
result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ __netif_tx_unlock(netif_txq);
if (result == ICSSG_XDP_CONSUMED) {
ndev->stats.tx_dropped++;
goto drop;
@@ -979,6 +989,7 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
if (ret) {
netdev_err(ndev, "tx: push failed: %d\n", ret);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
goto drop_free_descs;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 443f90fa6557..86fc1278127c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1075,17 +1075,21 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame
{
struct prueth_emac *emac = netdev_priv(dev);
struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
struct xdp_frame *xdpf;
unsigned int q_idx;
int nxmit = 0;
u32 err;
int i;
- q_idx = smp_processor_id() % emac->tx_ch_num;
+ q_idx = cpu % emac->tx_ch_num;
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
+ __netif_tx_lock(netif_txq, cpu);
for (i = 0; i < n; i++) {
xdpf = frames[i];
err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
@@ -1095,6 +1099,7 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame
}
nxmit++;
}
+ __netif_tx_unlock(netif_txq);
return nxmit;
}
@@ -1109,11 +1114,6 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame
static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
{
struct bpf_prog *prog = bpf->prog;
- xdp_features_t val;
-
- val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT;
- xdp_set_features_flag(emac->ndev, val);
if (!emac->xdpi.prog && !prog)
return 0;
@@ -1291,6 +1291,10 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features = NETIF_F_SG;
ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
+ xdp_set_features_flag(ndev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT);
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index 89dc4c401a8d..e4d993f31374 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -33,7 +34,7 @@
#define CMD_CTR (0x2 << CMD_SHIFT)
#define CMD_MASK GENMASK(15, CMD_SHIFT)
-#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
+#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0)
#define DET_CMD_LEN 4
#define DET_SOF_LEN 2
@@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
}
static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
- unsigned int frame_len)
+ unsigned int frame_len, bool drop)
{
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
struct spi_transfer *xfer = &mses->spi_xfer;
@@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
__func__, ret);
mse->stats.xfer_err++;
+ } else if (drop) {
+ netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__);
+ ret = -EINVAL;
} else if (*sof != cpu_to_be16(DET_SOF)) {
netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
__func__, *sof);
@@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
struct sk_buff *skb;
unsigned int rxalign;
unsigned int rxlen;
+ bool drop = false;
__be16 rx = 0;
u16 cmd_resp;
u8 *rxpkt;
@@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
__func__, cmd_resp);
mse->stats.invalid_rts++;
- return;
+ drop = true;
+ goto drop;
}
net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
@@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
}
rxlen = cmd_resp & LEN_MASK;
- if (!rxlen) {
- net_dbg_ratelimited("%s: No frame length defined\n", __func__);
+ if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) {
+ net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
+ rxlen);
mse->stats.invalid_len++;
- return;
+ drop = true;
}
+ /* In case of a invalid CMD_RTS, the frame must be consumed anyway.
+ * So assume the maximum possible frame length.
+ */
+drop:
+ if (drop)
+ rxlen = VLAN_ETH_FRAME_LEN;
+
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
if (!skb)
@@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
* They are copied, but ignored.
*/
rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
- if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
+ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
mse->ndev->stats.rx_errors++;
dev_kfree_skb(skb);
return;
@@ -509,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
static int mse102x_net_open(struct net_device *ndev)
{
struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
int ret;
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
@@ -524,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev)
netif_carrier_on(ndev);
+ /* The SPI interrupt can stuck in case of pending packet(s).
+ * So poll for possible packet(s) to re-arm the interrupt.
+ */
+ mutex_lock(&mses->lock);
+ mse102x_rx_pkt_spi(mse);
+ mutex_unlock(&mses->lock);
+
netif_dbg(mse, ifup, ndev, "network device up\n");
return 0;
diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c
index 00c66240136b..3dd12a8c8b03 100644
--- a/drivers/net/mdio/mdio-mux-meson-gxl.c
+++ b/drivers/net/mdio/mdio-mux-meson-gxl.c
@@ -17,6 +17,7 @@
#define REG2_LEDACT GENMASK(23, 22)
#define REG2_LEDLINK GENMASK(25, 24)
#define REG2_DIV4SEL BIT(27)
+#define REG2_REVERSED BIT(28)
#define REG2_ADCBYPASS BIT(30)
#define REG2_CLKINSEL BIT(31)
#define ETH_REG3 0x4
@@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
* The only constraint is that it must match the one in
* drivers/net/phy/meson-gxl.c to properly match the PHY.
*/
- writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
+ writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
priv->regs + ETH_REG2);
/* Enable the internal phy */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index bb0bf1415872..7b3739b29c8f 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -630,16 +630,6 @@ static const struct driver_info zte_rndis_info = {
.tx_fixup = rndis_tx_fixup,
};
-static const struct driver_info wwan_rndis_info = {
- .description = "Mobile Broadband RNDIS device",
- .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
- .bind = rndis_bind,
- .unbind = rndis_unbind,
- .status = rndis_status,
- .rx_fixup = rndis_rx_fixup,
- .tx_fixup = rndis_tx_fixup,
-};
-
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
@@ -676,11 +666,9 @@ static const struct usb_device_id products [] = {
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
.driver_info = (unsigned long) &rndis_info,
}, {
- /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and
- * Telit FN990A (RNDIS)
- */
+ /* Novatel Verizon USB730L */
USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
- .driver_info = (unsigned long)&wwan_rndis_info,
+ .driver_info = (unsigned long) &rndis_info,
},
{ }, // END
};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 848fab51dfa1..e53ba600605a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3383,12 +3383,15 @@ static void __virtnet_rx_resume(struct virtnet_info *vi,
bool refill)
{
bool running = netif_running(vi->dev);
+ bool schedule_refill = false;
if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
+ schedule_refill = true;
if (running)
virtnet_napi_enable(rq);
+
+ if (schedule_refill)
+ schedule_delayed_work(&vi->refill, 0);
}
static void virtnet_rx_resume_all(struct virtnet_info *vi)
@@ -3728,8 +3731,10 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
succ:
vi->curr_queue_pairs = queue_pairs;
/* virtnet_open() will refill when device is going to up. */
- if (dev->flags & IFF_UP)
+ spin_lock_bh(&vi->refill_lock);
+ if (dev->flags & IFF_UP && vi->refill_enabled)
schedule_delayed_work(&vi->refill, 0);
+ spin_unlock_bh(&vi->refill_lock);
return 0;
}
@@ -5673,6 +5678,10 @@ static void virtnet_get_base_stats(struct net_device *dev,
if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
tx->hw_drop_ratelimits = 0;
+
+ netdev_stat_queue_sum(dev,
+ dev->real_num_rx_queues, vi->max_queue_pairs, rx,
+ dev->real_num_tx_queues, vi->max_queue_pairs, tx);
}
static const struct netdev_stat_ops virtnet_stat_ops = {
@@ -5885,8 +5894,10 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
DMA_TO_DEVICE, 0);
- if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
- return -ENOMEM;
+ if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) {
+ err = -ENOMEM;
+ goto err_free_buffs;
+ }
err = xsk_pool_dma_map(pool, dma_dev, 0);
if (err)
@@ -5914,6 +5925,8 @@ err_rq:
err_xsk_map:
virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
DMA_TO_DEVICE, 0);
+err_free_buffs:
+ kvfree(rq->xsk_buffs);
return err;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 616ecc38d172..5f470499e600 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -397,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
- rbi->len, false);
+ rcd->len, false);
xdp_buff_clear_frags_flag(&xdp);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 6e6e9f05509a..06d19e90eadb 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
* default dst remote_ip previously added for this vni
*/
if (!vxlan_addr_any(&vninode->remote_ip) ||
- !vxlan_addr_any(&dst->remote_ip))
+ !vxlan_addr_any(&dst->remote_ip)) {
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac,
+ vninode->vni);
+
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
__vxlan_fdb_delete(vxlan, all_zeros_mac,
(vxlan_addr_any(&vninode->remote_ip) ?
dst->remote_ip : vninode->remote_ip),
@@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
vninode->vni, vninode->vni,
dst->remote_ifindex,
true);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ }
if (vxlan->dev->flags & IFF_UP) {
if (vxlan_addr_multicast(&vninode->remote_ip) &&
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 2821c27f317e..d06c724f63d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -896,14 +896,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
}
/* 1) Prepare USB boot loader for runtime image */
- brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
+ err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
+ if (err)
+ goto fail;
rdlstate = le32_to_cpu(state.state);
rdlbytes = le32_to_cpu(state.bytes);
/* 2) Check we are in the Waiting state */
if (rdlstate != DL_WAITING) {
- brcmf_err("Failed to DL_START\n");
+ brcmf_err("Invalid DL state: %u\n", rdlstate);
err = -EINVAL;
goto fail;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 670031fd60dc..59af36960f9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -142,8 +142,6 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const char iwl_sp_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz";
-
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
IWL_DEVICE_SC,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index b9bd89bfdd74..acafee538b8a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2025 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -451,8 +451,11 @@ struct iwl_cfg {
#define IWL_CFG_RF_ID_HR 0x7
#define IWL_CFG_RF_ID_HR1 0x4
-#define IWL_CFG_BW_NO_LIM (U16_MAX - 1)
-#define IWL_CFG_BW_ANY U16_MAX
+#define IWL_CFG_NO_160 0x1
+#define IWL_CFG_160 0x0
+
+#define IWL_CFG_NO_320 0x1
+#define IWL_CFG_320 0x0
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -464,7 +467,7 @@ struct iwl_cfg {
#define IWL_CFG_IS_JACKET 0x1
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
+#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
@@ -472,10 +475,10 @@ struct iwl_dev_info {
u16 subdevice;
u16 mac_type;
u16 rf_type;
- u16 bw_limit;
u8 mac_step;
u8 rf_step;
u8 rf_id;
+ u8 no_160;
u8 cores;
u8 cdb;
u8 jacket;
@@ -489,7 +492,7 @@ extern const unsigned int iwl_dev_info_table_size;
const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step);
+ u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
extern const struct pci_device_id iwl_hw_card_ids[];
#endif
@@ -550,7 +553,6 @@ extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_fm_name[];
extern const char iwl_wh_name[];
-extern const char iwl_sp_name[];
extern const char iwl_gl_name[];
extern const char iwl_mtp_name[];
extern const char iwl_dr_name[];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index be9e464c9b7b..3ff493e920d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -148,6 +148,7 @@
* during a error FW error.
*/
#define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101)
+#define CSR_FUNC_SCRATCH_POWER_OFF_MASK 0xFFFF
/* Bits for CSR_HW_IF_CONFIG_REG */
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index cd1b0048bb6d..c381511e9ec6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -944,8 +944,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
break;
case NL80211_BAND_6GHZ:
- if (!trans->reduced_cap_sku &&
- trans->bw_limit >= 320) {
+ if (!trans->reduced_cap_sku) {
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |=
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |=
@@ -1095,22 +1094,19 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
}
- if (trans->bw_limit < 160)
+ if (trans->no_160)
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- if (trans->bw_limit < 320 || trans->reduced_cap_sku) {
+ if (trans->reduced_cap_sku) {
memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
- iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
- }
-
- if (trans->reduced_cap_sku) {
iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index c1607b6d0759..e7b2e08645ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -21,6 +21,7 @@ struct iwl_trans_dev_restart_data {
struct list_head list;
unsigned int restart_count;
time64_t last_error;
+ bool backoff;
char name[];
};
@@ -125,13 +126,20 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
if (!data)
return at_least;
- if (ktime_get_boottime_seconds() - data->last_error >=
+ if (!data->backoff &&
+ ktime_get_boottime_seconds() - data->last_error >=
IWL_TRANS_RESET_OK_TIME)
data->restart_count = 0;
index = data->restart_count;
- if (index >= ARRAY_SIZE(escalation_list))
+ if (index >= ARRAY_SIZE(escalation_list)) {
index = ARRAY_SIZE(escalation_list) - 1;
+ if (!data->backoff) {
+ data->backoff = true;
+ return IWL_RESET_MODE_BACKOFF;
+ }
+ data->backoff = false;
+ }
return max(at_least, escalation_list[index]);
}
@@ -140,7 +148,8 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
static void iwl_trans_restart_wk(struct work_struct *wk)
{
- struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk);
+ struct iwl_trans *trans = container_of(wk, typeof(*trans),
+ restart.wk.work);
struct iwl_trans_reprobe *reprobe;
enum iwl_reset_mode mode;
@@ -168,6 +177,12 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
return;
mode = iwl_trans_determine_restart_mode(trans);
+ if (mode == IWL_RESET_MODE_BACKOFF) {
+ IWL_ERR(trans, "Too many device errors - delay next reset\n");
+ queue_delayed_work(system_unbound_wq, &trans->restart.wk,
+ IWL_TRANS_RESET_DELAY);
+ return;
+ }
iwl_trans_inc_restart_count(trans->dev);
@@ -227,7 +242,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
trans->dev = dev;
trans->num_rx_queues = 1;
- INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk);
+ INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
return trans;
}
@@ -271,7 +286,7 @@ int iwl_trans_init(struct iwl_trans *trans)
void iwl_trans_free(struct iwl_trans *trans)
{
- cancel_work_sync(&trans->restart.wk);
+ cancel_delayed_work_sync(&trans->restart.wk);
kmem_cache_destroy(trans->dev_cmd_pool);
}
@@ -403,7 +418,7 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans)
iwl_trans_pcie_op_mode_leave(trans);
- cancel_work_sync(&trans->restart.wk);
+ cancel_delayed_work_sync(&trans->restart.wk);
trans->op_mode = NULL;
@@ -540,7 +555,6 @@ void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans)
{
iwl_trans_pcie_release_nic_access(trans);
- __release(nic_access);
}
IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 25fb4c50e38b..ce4954b0d524 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -876,7 +876,7 @@ struct iwl_txq {
* only valid for discrete (not integrated) NICs
* @invalid_tx_cmd: invalid TX command buffer
* @reduced_cap_sku: reduced capability supported SKU
- * @bw_limit: the max bandwidth
+ * @no_160: device not supporting 160 MHz
* @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
* @restart: restart worker data
* @restart.wk: restart worker
@@ -911,8 +911,7 @@ struct iwl_trans {
char hw_id_str[52];
u32 sku_id[3];
bool reduced_cap_sku;
- u16 bw_limit;
- bool step_urm;
+ u8 no_160:1, step_urm:1;
u8 dsbr_urm_fw_dependent:1,
dsbr_urm_permanent:1;
@@ -962,7 +961,7 @@ struct iwl_trans {
struct iwl_dma_ptr invalid_tx_cmd;
struct {
- struct work_struct wk;
+ struct delayed_work wk;
struct iwl_fw_error_dump_mode mode;
bool during_reset;
} restart;
@@ -1163,7 +1162,7 @@ static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
*/
trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,
&trans->status);
- queue_work(system_unbound_wq, &trans->restart.wk);
+ queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0);
}
static inline void iwl_trans_fw_error(struct iwl_trans *trans,
@@ -1262,6 +1261,9 @@ enum iwl_reset_mode {
IWL_RESET_MODE_RESCAN,
IWL_RESET_MODE_FUNC_RESET,
IWL_RESET_MODE_PROD_RESET,
+
+ /* keep last - special backoff value */
+ IWL_RESET_MODE_BACKOFF,
};
void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
index db9e0f04f4b7..687a9450ac98 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/agg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
@@ -124,9 +124,9 @@ void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
rcu_read_lock();
baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
- if (!IWL_FW_CHECK(mld, !baid_data,
- "Got valid BAID %d but not allocated, invalid BAR release!\n",
- baid))
+ if (IWL_FW_CHECK(mld, !baid_data,
+ "Got valid BAID %d but not allocated, invalid BAR release!\n",
+ baid))
goto out_unlock;
if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index 89d95e9b4f30..93f9f78e4276 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -949,8 +949,9 @@ void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
snprintf(name, sizeof(name), "%pd", vif->debugfs_dir);
snprintf(target, sizeof(target), "../../../%pd3/iwlmld",
vif->debugfs_dir);
- mld_vif->dbgfs_slink =
- debugfs_create_symlink(name, mld->debugfs_dir, target);
+ if (!mld_vif->dbgfs_slink)
+ mld_vif->dbgfs_slink =
+ debugfs_create_symlink(name, mld->debugfs_dir, target);
if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
vif->type == NL80211_IFTYPE_STATION) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
index 62da137e1024..4b083d447ee2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
@@ -333,19 +333,22 @@ int iwl_mld_load_fw(struct iwl_mld *mld)
ret = iwl_trans_start_hw(mld->trans);
if (ret)
- return ret;
+ goto err;
ret = iwl_mld_run_fw_init_sequence(mld);
if (ret)
- return ret;
+ goto err;
ret = iwl_mld_init_mcc(mld);
if (ret)
- return ret;
+ goto err;
mld->fw_status.running = true;
return 0;
+err:
+ iwl_mld_stop_fw(mld);
+ return ret;
}
void iwl_mld_stop_fw(struct iwl_mld *mld)
@@ -358,6 +361,10 @@ void iwl_mld_stop_fw(struct iwl_mld *mld)
iwl_trans_stop_device(mld->trans);
+ wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
+
+ iwl_mld_purge_async_handlers_list(mld);
+
mld->fw_status.running = false;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index 99e13cfd1e5f..68d97d3b8f02 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -651,6 +651,7 @@ void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw,
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink);
+ iwl_mld_vif_from_mac80211(vif)->dbgfs_slink = NULL;
#endif
iwl_mld_rm_vif(mld, vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index d4a99ae64074..73d2166a4c25 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -75,6 +75,7 @@ void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
/* Setup async RX handling */
spin_lock_init(&mld->async_handlers_lock);
+ INIT_LIST_HEAD(&mld->async_handlers_list);
wiphy_work_init(&mld->async_handlers_wk,
iwl_mld_async_handlers_wk);
@@ -414,9 +415,14 @@ iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
wiphy_unlock(mld->wiphy);
rtnl_unlock();
iwl_fw_flush_dumps(&mld->fwrt);
- goto free_hw;
+ goto err;
}
+ /* We are about to stop the FW. Notifications may require an
+ * operational FW, so handle them all here before we stop.
+ */
+ wiphy_work_flush(mld->wiphy, &mld->async_handlers_wk);
+
iwl_mld_stop_fw(mld);
wiphy_unlock(mld->wiphy);
@@ -455,7 +461,8 @@ leds_exit:
iwl_mld_leds_exit(mld);
free_nvm:
kfree(mld->nvm_data);
-free_hw:
+err:
+ iwl_trans_op_mode_leave(mld->trans);
ieee80211_free_hw(mld->hw);
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
index 5eceaaf7696d..a4a16da6ebf3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
@@ -298,11 +298,6 @@ iwl_cleanup_mld(struct iwl_mld *mld)
#endif
iwl_mld_low_latency_restart_cleanup(mld);
-
- /* Empty the list of async notification handlers so we won't process
- * notifications from the dead fw after the reconfig flow.
- */
- iwl_mld_purge_async_handlers_list(mld);
}
enum iwl_power_scheme {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 93446c374008..00056e76ea3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -552,17 +552,16 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
- _rf_id, _rf_step, _bw_limit, _cores, _cdb, _cfg, _name) \
+ _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \
{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
.name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \
- .bw_limit = _bw_limit, .cores = _cores, .rf_id = _rf_id, \
+ .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
.mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY }
#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
_IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, \
- _cfg, _name)
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
+ IWL_CFG_ANY, _cfg, _name)
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -589,6 +588,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name),
@@ -725,66 +726,66 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9560_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9270_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9270_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9162_160_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9162_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9260_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9260_name),
/* Qu with Jf */
@@ -792,132 +793,132 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu with Hr */
@@ -925,189 +926,189 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr_b0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr_b0, iwl_ax201_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_quz_a0_hr_b0, iwl_ax201_name),
/* Ma */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_ma, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax231_name),
/* So with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Gf */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* SoF with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* So with GF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* So with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* So with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
#endif /* CONFIG_IWLMVM */
@@ -1116,13 +1117,13 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1134,119 +1135,104 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_wh_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_wh_name),
/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_gl_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- 160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_mtp_name),
/* Sc */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_wh_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- 160, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_sp_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc2, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc2, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc2, iwl_wh_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- 160, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_sp_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc2f, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc2f, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc2f, iwl_wh_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- 160, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_sp_name),
/* Dr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_dr, iwl_dr_name),
/* Br */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_br, iwl_br_name),
#endif /* CONFIG_IWLMLD */
};
@@ -1398,7 +1384,7 @@ out:
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step)
+ u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
{
int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
@@ -1441,15 +1427,8 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
dev_info->rf_id != rf_id)
continue;
- /*
- * Check that bw_limit have the same "boolean" value since
- * IWL_SUBDEVICE_BW_LIM can only return a boolean value and
- * dev_info->bw_limit encodes a non-boolean value.
- * dev_info->bw_limit == IWL_CFG_BW_NO_LIM must be equal to
- * !bw_limit to have a match.
- */
- if (dev_info->bw_limit != IWL_CFG_BW_ANY &&
- (dev_info->bw_limit == IWL_CFG_BW_NO_LIM) == !!bw_limit)
+ if (dev_info->no_160 != (u8)IWL_CFG_ANY &&
+ dev_info->no_160 != no_160)
continue;
if (dev_info->cores != (u8)IWL_CFG_ANY &&
@@ -1587,13 +1566,13 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id),
CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id),
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
- IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device),
+ IWL_SUBDEVICE_NO_160(pdev->subsystem_device),
IWL_SUBDEVICE_CORES(pdev->subsystem_device),
CSR_HW_RFID_STEP(iwl_trans->hw_rf_id));
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
- iwl_trans->bw_limit = dev_info->bw_limit;
+ iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
}
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -1759,11 +1738,27 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* Scratch value was altered, this means the device was powered off, we
* need to reset it completely.
* Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
- * so assume that any bits there mean that the device is usable.
+ * but not bits [15:8]. So if we have bits set in lower word, assume
+ * the device is alive.
+ * For older devices, just try silently to grab the NIC.
*/
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
- !iwl_read32(trans, CSR_FUNC_SCRATCH))
- device_was_powered_off = true;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) &
+ CSR_FUNC_SCRATCH_POWER_OFF_MASK))
+ device_was_powered_off = true;
+ } else {
+ /*
+ * bh are re-enabled by iwl_trans_pcie_release_nic_access,
+ * so re-enable them if _iwl_trans_pcie_grab_nic_access fails.
+ */
+ local_bh_disable();
+ if (_iwl_trans_pcie_grab_nic_access(trans, true)) {
+ iwl_trans_pcie_release_nic_access(trans);
+ } else {
+ device_was_powered_off = true;
+ local_bh_enable();
+ }
+ }
if (restore || device_was_powered_off) {
trans->state = IWL_TRANS_NO_FW;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 45460f93d24a..114a9195ad7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -558,10 +558,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
struct device *dev);
-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
-#define _iwl_trans_pcie_grab_nic_access(trans) \
+bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
+#define _iwl_trans_pcie_grab_nic_access(trans, silent) \
__cond_lock(nic_access_nobh, \
- likely(__iwl_trans_pcie_grab_nic_access(trans)))
+ likely(__iwl_trans_pcie_grab_nic_access(trans, silent)))
void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
@@ -1105,7 +1105,8 @@ void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
u32 *val);
bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
-void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+void __releases(nic_access_nobh)
+iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
/* transport gen 1 exported functions */
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index c917ed4c19bc..102a6123bba0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2351,7 +2351,8 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
struct iwl_trans_pcie_removal *removal;
char _msg = 0, *msg = &_msg;
- if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY))
+ if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY ||
+ mode == IWL_RESET_MODE_BACKOFF))
return;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
@@ -2405,7 +2406,7 @@ EXPORT_SYMBOL(iwl_trans_pcie_reset);
* This version doesn't disable BHs but rather assumes they're
* already disabled.
*/
-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
{
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2457,6 +2458,11 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
+ if (silent) {
+ spin_unlock(&trans_pcie->reg_lock);
+ return false;
+ }
+
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
cntrl);
@@ -2488,7 +2494,7 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
bool ret;
local_bh_disable();
- ret = __iwl_trans_pcie_grab_nic_access(trans);
+ ret = __iwl_trans_pcie_grab_nic_access(trans, false);
if (ret) {
/* keep BHs disabled until iwl_trans_pcie_release_nic_access */
return ret;
@@ -2497,7 +2503,8 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
return false;
}
-void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+void __releases(nic_access_nobh)
+iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2524,6 +2531,7 @@ void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
* scheduled on different CPUs (after we drop reg_lock).
*/
out:
+ __release(nic_access_nobh);
spin_unlock_bh(&trans_pcie->reg_lock);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index bb90bcfc6763..9fc4e98693eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1021,7 +1021,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
* returned. This needs to be done only on NICs that have
* apmg_wake_up_wa set (see above.)
*/
- if (!_iwl_trans_pcie_grab_nic_access(trans))
+ if (!_iwl_trans_pcie_grab_nic_access(trans, false))
return -EIO;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
index 7ef5e89c6af2..d0bda23c628a 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for the iwlwifi device info table
*
- * Copyright (C) 2023-2025 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <kunit/test.h>
#include <linux/pci.h>
@@ -13,9 +13,9 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
{
- printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,bw_limit=%d,cores=%.2x\n",
+ printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
- di->rf_type, di->cdb, di->jacket, di->rf_id, di->bw_limit,
+ di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
di->cores);
}
@@ -31,13 +31,8 @@ static void devinfo_table_order(struct kunit *test)
di->mac_type, di->mac_step,
di->rf_type, di->cdb,
di->jacket, di->rf_id,
- di->bw_limit != IWL_CFG_BW_NO_LIM,
- di->cores, di->rf_step);
- if (!ret) {
- iwl_pci_print_dev_info("No entry found for: ", di);
- KUNIT_FAIL(test,
- "No entry found for entry at index %d\n", idx);
- } else if (ret != di) {
+ di->no_160, di->cores, di->rf_step);
+ if (ret != di) {
iwl_pci_print_dev_info("searched: ", di);
iwl_pci_print_dev_info("found: ", ret);
KUNIT_FAIL(test,
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index eae93efa6150..82d1bf7edba2 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -102,7 +102,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
void plfxlc_mac_release(struct plfxlc_mac *mac)
{
plfxlc_chip_release(&mac->chip);
- lockdep_assert_held(&mac->lock);
}
int plfxlc_op_start(struct ieee80211_hw *hw)
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index d47dfa80fb95..4d64b6935bb9 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -102,6 +102,7 @@ config NVME_TCP_TLS
depends on NVME_TCP
select NET_HANDSHAKE
select KEYS
+ select TLS
help
Enables TLS encryption for NVMe TCP using the netlink handshake API.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index eb6ea8acb3cc..ac53629fce68 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4493,7 +4493,8 @@ static void nvme_fw_act_work(struct work_struct *work)
msleep(100);
}
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
+ !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
return;
nvme_unquiesce_io_queues(ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b178d52eac1b..2e30e9be7408 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3575,7 +3575,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
- if (!nvme_try_sched_reset(&dev->ctrl))
+ if (nvme_try_sched_reset(&dev->ctrl))
nvme_unquiesce_io_queues(&dev->ctrl);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -3623,6 +3623,9 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
.driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
+ { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_BOGUS_NID, },
@@ -3646,6 +3649,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
.driver_data = NVME_QUIRK_BROKEN_MSI },
+ { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */
+ .driver_data = NVME_QUIRK_BROKEN_MSI |
+ NVME_QUIRK_NO_DEEPEST_PS },
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 72d260201d8c..aba365f97cf6 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1946,7 +1946,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
cancel_work_sync(&queue->io_work);
}
-static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1965,6 +1965,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_unlock(&queue->queue_lock);
}
+static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ int timeout = 100;
+
+ while (timeout > 0) {
+ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
+ !sk_wmem_alloc_get(queue->sock->sk))
+ return;
+ msleep(2);
+ timeout -= 2;
+ }
+ dev_warn(nctrl->device,
+ "qid %d: timeout draining sock wmem allocation expired\n",
+ qid);
+}
+
+static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ nvme_tcp_stop_queue_nowait(nctrl, qid);
+ nvme_tcp_wait_queue(nctrl, qid);
+}
+
+
static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
{
write_lock_bh(&queue->sock->sk->sk_callback_lock);
@@ -2032,7 +2057,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
int i;
for (i = 1; i < ctrl->queue_count; i++)
- nvme_tcp_stop_queue(ctrl, i);
+ nvme_tcp_stop_queue_nowait(ctrl, i);
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_tcp_wait_queue(ctrl, i);
}
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index fb7446d6d682..4c253b433bf7 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -98,6 +98,7 @@ config NVME_TARGET_TCP_TLS
bool "NVMe over Fabrics TCP target TLS encryption support"
depends on NVME_TARGET_TCP
select NET_HANDSHAKE
+ select TLS
help
Enables TLS encryption for the NVMe TCP target using the netlink handshake API.
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index cef8d77f477b..9429b8218408 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -600,13 +600,12 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
__func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
tls_key = NULL;
- kfree_sensitive(tls_psk);
}
if (sq->ctrl->tls_key)
key_put(sq->ctrl->tls_key);
sq->ctrl->tls_key = tls_key;
#endif
-
+ kfree_sensitive(tls_psk);
out_free_digest:
kfree_sensitive(digest);
out_free_psk:
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index f2d0c920269b..12a5cb8641ca 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1560,6 +1560,9 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
{
struct socket *sock = queue->sock;
+ if (!queue->state_change)
+ return;
+
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_data_ready = queue->data_ready;
sock->sk->sk_state_change = queue->state_change;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 055518ee354d..e9e9aaa91770 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -59,7 +59,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev && pci_num_vf(pdev)) {
- pci_dev_put(pdev);
rc = -EBUSY;
goto out;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 842a1e6cbfc4..18de31328540 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -37,16 +37,16 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name(
struct pinctrl_dev *pctldev,
const char *name)
{
- const struct group_desc *grp = NULL;
+ const struct group_desc *grp;
int i;
for (i = 0; i < pctldev->num_groups; i++) {
grp = pinctrl_generic_get_group(pctldev, i);
if (grp && !strcmp(grp->grp.name, name))
- break;
+ return grp;
}
- return grp;
+ return NULL;
}
static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index ced4ee509b5b..b4eb2beab691 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -449,7 +449,7 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
return -EOPNOTSUPP;
virq = irq_find_mapping(eint->domain, eint_num);
- eint_offset = (eint_num % 4) * 8;
+ eint_offset = (idx % 4) * 8;
d = irq_get_irq_data(virq);
set_offset = (idx / 4) * 4 + eint->regs->dbnc_set;
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index 547a798b71c8..5d84a778683d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -6,6 +6,7 @@
*/
#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
@@ -112,39 +113,19 @@
#define REG_LAN_LED1_MAPPING 0x0280
#define LAN4_LED_MAPPING_MASK GENMASK(18, 16)
-#define LAN4_PHY4_LED_MAP BIT(18)
-#define LAN4_PHY2_LED_MAP BIT(17)
-#define LAN4_PHY1_LED_MAP BIT(16)
-#define LAN4_PHY0_LED_MAP 0
-#define LAN4_PHY3_LED_MAP GENMASK(17, 16)
+#define LAN4_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN4_LED_MAPPING_MASK, (_n))
#define LAN3_LED_MAPPING_MASK GENMASK(14, 12)
-#define LAN3_PHY4_LED_MAP BIT(14)
-#define LAN3_PHY2_LED_MAP BIT(13)
-#define LAN3_PHY1_LED_MAP BIT(12)
-#define LAN3_PHY0_LED_MAP 0
-#define LAN3_PHY3_LED_MAP GENMASK(13, 12)
+#define LAN3_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN3_LED_MAPPING_MASK, (_n))
#define LAN2_LED_MAPPING_MASK GENMASK(10, 8)
-#define LAN2_PHY4_LED_MAP BIT(12)
-#define LAN2_PHY2_LED_MAP BIT(11)
-#define LAN2_PHY1_LED_MAP BIT(10)
-#define LAN2_PHY0_LED_MAP 0
-#define LAN2_PHY3_LED_MAP GENMASK(11, 10)
+#define LAN2_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN2_LED_MAPPING_MASK, (_n))
#define LAN1_LED_MAPPING_MASK GENMASK(6, 4)
-#define LAN1_PHY4_LED_MAP BIT(6)
-#define LAN1_PHY2_LED_MAP BIT(5)
-#define LAN1_PHY1_LED_MAP BIT(4)
-#define LAN1_PHY0_LED_MAP 0
-#define LAN1_PHY3_LED_MAP GENMASK(5, 4)
+#define LAN1_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN1_LED_MAPPING_MASK, (_n))
#define LAN0_LED_MAPPING_MASK GENMASK(2, 0)
-#define LAN0_PHY4_LED_MAP BIT(3)
-#define LAN0_PHY2_LED_MAP BIT(2)
-#define LAN0_PHY1_LED_MAP BIT(1)
-#define LAN0_PHY0_LED_MAP 0
-#define LAN0_PHY3_LED_MAP GENMASK(2, 1)
+#define LAN0_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN0_LED_MAPPING_MASK, (_n))
/* CONF */
#define REG_I2C_SDA_E2 0x001c
@@ -1476,8 +1457,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY1_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1491,8 +1472,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY1_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1506,8 +1487,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY1_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1521,8 +1502,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY1_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(0)
},
.regmap_size = 2,
},
@@ -1540,8 +1521,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY2_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1555,8 +1536,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY2_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1570,8 +1551,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY2_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1585,8 +1566,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY2_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(1)
},
.regmap_size = 2,
},
@@ -1604,8 +1585,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY3_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1619,8 +1600,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY3_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1634,8 +1615,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY3_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1649,8 +1630,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY3_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(2)
},
.regmap_size = 2,
},
@@ -1668,8 +1649,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY4_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1683,8 +1664,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY4_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1698,8 +1679,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY4_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1713,8 +1694,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY4_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(3)
},
.regmap_size = 2,
},
@@ -1732,8 +1713,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY1_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1747,8 +1728,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY1_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1762,8 +1743,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY1_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1777,8 +1758,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY1_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(0)
},
.regmap_size = 2,
},
@@ -1796,8 +1777,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY2_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1811,8 +1792,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY2_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1826,8 +1807,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY2_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1841,8 +1822,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY2_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(1)
},
.regmap_size = 2,
},
@@ -1860,8 +1841,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY3_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1875,8 +1856,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY3_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1890,8 +1871,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY3_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1905,8 +1886,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY3_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(2)
},
.regmap_size = 2,
},
@@ -1924,8 +1905,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY4_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1939,8 +1920,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY4_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1954,8 +1935,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY4_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1969,8 +1950,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY4_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(3)
},
.regmap_size = 2,
},
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 91edb539925a..8596f3541265 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1015,9 +1015,15 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
if (!pctl->eint)
return -ENOMEM;
- pctl->eint->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pctl->eint->base))
- return PTR_ERR(pctl->eint->base);
+ pctl->eint->nbase = 1;
+ /* mtk-eint expects an array */
+ pctl->eint->base = devm_kzalloc(pctl->dev, sizeof(pctl->eint->base), GFP_KERNEL);
+ if (!pctl->eint->base)
+ return -ENOMEM;
+
+ pctl->eint->base[0] = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pctl->eint->base[0]))
+ return PTR_ERR(pctl->eint->base[0]);
pctl->eint->irq = irq_of_parse_and_map(np, 0);
if (!pctl->eint->irq)
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 253a0cc57e39..e5a32a0532ee 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -487,7 +487,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_UP:
if (meson_pinconf_get_pull(pc, pin) == param)
- arg = 1;
+ arg = 60000;
else
return -EINVAL;
break;
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c
index 1af11cd95fb0..b94fb4ee0ec3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8750.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c
@@ -46,7 +46,9 @@
.out_bit = 1, \
.intr_enable_bit = 0, \
.intr_status_bit = 0, \
- .intr_target_bit = 5, \
+ .intr_wakeup_present_bit = 6, \
+ .intr_wakeup_enable_bit = 7, \
+ .intr_target_bit = 8, \
.intr_target_kpss_val = 3, \
.intr_raw_status_bit = 4, \
.intr_polarity_bit = 1, \
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index e5c4e6be37f7..37c7a57afee5 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -644,10 +644,9 @@ static void amd_pmc_s2idle_check(void)
struct smu_metrics table;
int rc;
- /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
- if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
- table.s0i3_last_entry_status)
- usleep_range(10000, 20000);
+ /* Avoid triggering OVP */
+ if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status)
+ msleep(2500);
/* Dump the IdleMask before we add to the STB */
amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 38ef778e8c19..0c697b46f436 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -304,6 +304,7 @@ struct asus_wmi {
u32 kbd_rgb_dev;
bool kbd_rgb_state_available;
+ bool oobe_state_available;
u8 throttle_thermal_policy_mode;
u32 throttle_thermal_policy_dev;
@@ -1826,7 +1827,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
goto error;
}
- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) {
+ if (asus->oobe_state_available) {
/*
* Disable OOBE state, so that e.g. the keyboard backlight
* works.
@@ -4723,6 +4724,7 @@ static int asus_wmi_add(struct platform_device *pdev)
asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
+ asus->oobe_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE);
asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
&& dmi_check_system(asus_ally_mcu_quirk);
@@ -4970,6 +4972,13 @@ static int asus_hotk_restore(struct device *device)
}
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus);
+ if (asus->oobe_state_available) {
+ /*
+ * Disable OOBE state, so that e.g. the keyboard backlight
+ * works.
+ */
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL);
+ }
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 0c3be03385f8..08b82c151e07 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -70,6 +70,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &generic_quirks,
},
{
+ .ident = "Alienware m15 R7",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R7"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
.ident = "Alienware m16 R1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -655,12 +663,10 @@ static int thermal_profile_probe(void *drvdata, unsigned long *choices)
for (u32 i = 0; i < sys_desc[3]; i++) {
ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_LIST_IDS,
i + first_mode, &out_data);
-
- if (ret == -EIO)
- return ret;
-
if (ret == -EBADRQC)
break;
+ if (ret)
+ return ret;
if (!is_wmax_thermal_code(out_data))
continue;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 17a09b7784ed..ede483573fe0 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1294,6 +1294,16 @@ static const struct key_entry ideapad_keymap[] = {
/* Specific to some newer models */
{ KE_KEY, 0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } },
{ KE_KEY, 0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } },
+ /* Star- (User Assignable Key) */
+ { KE_KEY, 0x44 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
+ /* Eye */
+ { KE_KEY, 0x45 | IDEAPAD_WMI_KEY, { KEY_PROG3 } },
+ /* Performance toggle also Fn+Q, handled inside ideapad_wmi_notify() */
+ { KE_KEY, 0x3d | IDEAPAD_WMI_KEY, { KEY_PROG4 } },
+ /* shift + prtsc */
+ { KE_KEY, 0x2d | IDEAPAD_WMI_KEY, { KEY_CUT } },
+ { KE_KEY, 0x29 | IDEAPAD_WMI_KEY, { KEY_TOUCHPAD_TOGGLE } },
+ { KE_KEY, 0x2a | IDEAPAD_WMI_KEY, { KEY_ROOT_MENU } },
{ KE_END },
};
@@ -2080,6 +2090,12 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n",
data->integer.value);
+ /* performance button triggered by 0x3d */
+ if (data->integer.value == 0x3d && priv->dytc) {
+ platform_profile_cycle();
+ break;
+ }
+
/* 0x02 FnLock, 0x03 Esc */
if (data->integer.value == 0x02 || data->integer.value == 0x03)
ideapad_fn_lock_led_notify(priv, data->integer.value == 0x02);
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 88a1a9ff2f34..0b5e43444ed6 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -44,16 +44,17 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = {
- {"INT33D5", 0},
- {"INTC1051", 0},
- {"INTC1054", 0},
- {"INTC1070", 0},
- {"INTC1076", 0},
- {"INTC1077", 0},
- {"INTC1078", 0},
- {"INTC107B", 0},
- {"INTC10CB", 0},
- {"", 0},
+ { "INT33D5" },
+ { "INTC1051" },
+ { "INTC1054" },
+ { "INTC1070" },
+ { "INTC1076" },
+ { "INTC1077" },
+ { "INTC1078" },
+ { "INTC107B" },
+ { "INTC10CB" },
+ { "INTC10CC" },
+ { }
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index 40bbf8e45fa4..bdee5d00f30b 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -146,15 +146,13 @@ static int uncore_event_cpu_online(unsigned int cpu)
{
struct uncore_data *data;
int target;
+ int ret;
/* Check if there is an online cpu in the package for uncore MSR */
target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
if (target < nr_cpu_ids)
return 0;
- /* Use this CPU on this die as a control CPU */
- cpumask_set_cpu(cpu, &uncore_cpu_mask);
-
data = uncore_get_instance(cpu);
if (!data)
return 0;
@@ -163,7 +161,14 @@ static int uncore_event_cpu_online(unsigned int cpu)
data->die_id = topology_die_id(cpu);
data->domain_id = UNCORE_DOMAIN_ID_INVALID;
- return uncore_freq_add_entry(data, cpu);
+ ret = uncore_freq_add_entry(data, cpu);
+ if (ret)
+ return ret;
+
+ /* Use this CPU on this die as a control CPU */
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+
+ return 0;
}
static int uncore_event_cpu_offline(unsigned int cpu)
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index faf6e027f89a..2ccdca4f6960 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -2578,12 +2578,60 @@ static const struct ocp_sma_op ocp_fb_sma_op = {
.set_output = ptp_ocp_sma_fb_set_output,
};
+static int
+ptp_ocp_sma_adva_set_output(struct ptp_ocp *bp, int sma_nr, u32 val)
+{
+ u32 reg, mask, shift;
+ unsigned long flags;
+ u32 __iomem *gpio;
+
+ gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
+ shift = sma_nr & 1 ? 0 : 16;
+
+ mask = 0xffff << (16 - shift);
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ reg = ioread32(gpio);
+ reg = (reg & mask) | (val << shift);
+
+ iowrite32(reg, gpio);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return 0;
+}
+
+static int
+ptp_ocp_sma_adva_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val)
+{
+ u32 reg, mask, shift;
+ unsigned long flags;
+ u32 __iomem *gpio;
+
+ gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
+ shift = sma_nr & 1 ? 0 : 16;
+
+ mask = 0xffff << (16 - shift);
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ reg = ioread32(gpio);
+ reg = (reg & mask) | (val << shift);
+
+ iowrite32(reg, gpio);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return 0;
+}
+
static const struct ocp_sma_op ocp_adva_sma_op = {
.tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out },
.init = ptp_ocp_sma_fb_init,
.get = ptp_ocp_sma_fb_get,
- .set_inputs = ptp_ocp_sma_fb_set_inputs,
- .set_output = ptp_ocp_sma_fb_set_output,
+ .set_inputs = ptp_ocp_sma_adva_set_inputs,
+ .set_output = ptp_ocp_sma_adva_set_output,
};
static int
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 4bfe469c04aa..8c1c908d2c6e 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -5,7 +5,7 @@ comment "S/390 block device drivers"
config DCSSBLK
def_tristate m
prompt "DCSSBLK support"
- depends on S390 && BLOCK
+ depends on S390 && BLOCK && (DAX || DAX=n)
help
Support for dcss block device
@@ -14,7 +14,6 @@ config DCSSBLK_DAX
depends on DCSSBLK
# requires S390 ZONE_DEVICE support
depends on BROKEN
- select DAX
prompt "DCSSBLK DAX support"
help
Enable DAX operation for the dcss block device
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index dc4bd422b601..486db5b2f05d 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -891,7 +891,7 @@ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
status = mmio_init_fn(pdev, base, &mbox);
if (status != MYRB_STATUS_SUCCESS) {
dev_err(&pdev->dev,
- "Failed to enable mailbox, statux %02X\n",
+ "Failed to enable mailbox, status %02X\n",
status);
return false;
}
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 5ea6399e6c9b..10a602d4843a 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -353,9 +353,6 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
/* use generic bandwidth allocation algorithm */
sdw->cdns.bus.compute_params = sdw_compute_params;
- /* avoid resuming from pm_runtime suspend if it's not required */
- dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
-
ret = sdw_bus_master_add(bus, dev, dev->fwnode);
if (ret) {
dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
@@ -640,7 +637,10 @@ static int __maybe_unused intel_suspend(struct device *dev)
return 0;
}
- if (pm_runtime_suspended(dev)) {
+ /* Prevent runtime PM from racing with the code below. */
+ pm_runtime_disable(dev);
+
+ if (pm_runtime_status_suspended(dev)) {
dev_dbg(dev, "pm_runtime status: suspended\n");
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
@@ -648,7 +648,7 @@ static int __maybe_unused intel_suspend(struct device *dev)
if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) ||
!clock_stop_quirks) {
- if (pm_runtime_suspended(dev->parent)) {
+ if (pm_runtime_status_suspended(dev->parent)) {
/*
* paranoia check: this should not happen with the .prepare
* resume to full power
@@ -715,7 +715,6 @@ static int __maybe_unused intel_resume(struct device *dev)
struct sdw_cdns *cdns = dev_get_drvdata(dev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
- int link_flags;
int ret;
if (bus->prop.hw_disabled || !sdw->startup_done) {
@@ -724,23 +723,6 @@ static int __maybe_unused intel_resume(struct device *dev)
return 0;
}
- if (pm_runtime_suspended(dev)) {
- dev_dbg(dev, "pm_runtime status was suspended, forcing active\n");
-
- /* follow required sequence from runtime_pm.rst */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_enable(dev);
-
- pm_runtime_resume(bus->dev);
-
- link_flags = md_flags >> (bus->link_id * 8);
-
- if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
- pm_runtime_idle(dev);
- }
-
ret = sdw_intel_link_power_up(sdw);
if (ret) {
dev_err(dev, "%s failed: %d\n", __func__, ret);
@@ -761,6 +743,14 @@ static int __maybe_unused intel_resume(struct device *dev)
}
/*
+ * Runtime PM has been disabled in intel_suspend(), so set the status
+ * to active because the device has just been resumed and re-enable
+ * runtime PM.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /*
* after system resume, the pm_runtime suspend() may kick in
* during the enumeration, before any children device force the
* master device to remain active. Using pm_runtime_get()
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index a31a1db07aa4..5db0639d3b01 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -596,7 +596,11 @@ u64 spi_mem_calc_op_duration(struct spi_mem_op *op)
ns_per_cycles = 1000000000 / op->max_freq;
ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1);
ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1);
- ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+
+ /* Dummy bytes are optional for some SPI flash memory operations */
+ if (op->dummy.nbytes)
+ ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+
ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1);
return ncycles * ns_per_cycles;
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index 17eb67e19132..94948c8781e8 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -142,7 +142,7 @@ static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
else if (reg == NAND_READ_LOCATION_1)
snandc->regs->read_location1 = locreg_val;
else if (reg == NAND_READ_LOCATION_2)
- snandc->regs->read_location1 = locreg_val;
+ snandc->regs->read_location2 = locreg_val;
else if (reg == NAND_READ_LOCATION_3)
snandc->regs->read_location3 = locreg_val;
}
@@ -1307,8 +1307,7 @@ static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
snandc->qspi->cmd = cpu_to_le32(cmd);
- qcom_spi_block_erase(snandc);
- return 0;
+ return qcom_spi_block_erase(snandc);
default:
break;
}
diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c
index 668022098b1e..9ec9823409cc 100644
--- a/drivers/spi/spi-stm32-ospi.c
+++ b/drivers/spi/spi-stm32-ospi.c
@@ -960,6 +960,10 @@ err_pm_resume:
err_pm_enable:
pm_runtime_force_suspend(ospi->dev);
mutex_destroy(&ospi->lock);
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
return ret;
}
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 3822d7c8d8ed..2a8bb798e95b 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
u32 inactive_cycles;
u8 cs_state;
- if (setup->unit != SPI_DELAY_UNIT_SCK ||
- hold->unit != SPI_DELAY_UNIT_SCK ||
- inactive->unit != SPI_DELAY_UNIT_SCK) {
+ if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) ||
+ (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) ||
+ (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) {
dev_err(&spi->dev,
"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
SPI_DELAY_UNIT_SCK);
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 7540c20090c7..351f983ef914 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -393,16 +393,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
if (!bytes_available) {
- dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
- reset_ip_core(fifo);
+ dev_err(fifo->dt_device, "received a packet of length 0\n");
ret = -EIO;
goto end_unlock;
}
if (bytes_available > len) {
- dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
+ dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n",
bytes_available, len);
- reset_ip_core(fifo);
ret = -EINVAL;
goto end_unlock;
}
@@ -411,8 +409,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
/* this probably can't happen unless IP
* registers were previously mishandled
*/
- dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
- reset_ip_core(fifo);
+ dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n");
ret = -EIO;
goto end_unlock;
}
@@ -433,7 +430,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
copy * sizeof(u32))) {
- reset_ip_core(fifo);
ret = -EFAULT;
goto end_unlock;
}
@@ -542,7 +538,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
copy * sizeof(u32))) {
- reset_ip_core(fifo);
ret = -EFAULT;
goto end_unlock;
}
@@ -775,9 +770,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo)
goto end;
}
- /* IP sets TDFV to fifo depth - 4 so we will do the same */
- fifo->tx_fifo_depth -= 4;
-
ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 6c14d7bcdd67..081b17f49863 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
- if (strcmp(buf, "full")) {
+ if (strcmp(buf, "full") == 0) {
gpiod_set_value(chip->rdwr_pin, 1);
chip->mode = AD7816_FULL;
} else {
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index b839b50ac26a..fa7ea4ca4c36 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1900,6 +1900,7 @@ static int bcm2835_mmal_probe(struct vchiq_device *device)
__func__, ret);
goto free_dev;
}
+ dev->v4l2_dev.dev = &device->dev;
/* setup v4l controls */
ret = bcm2835_mmal_init_controls(dev, &dev->ctrl_handler);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 5cb6132b8147..7735421e3991 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -7265,8 +7265,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
err = -EINVAL;
}
}
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
return err;
}
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 1b19b5647495..69c1df0f4ca5 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -131,15 +131,12 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
vmbus_device_unregister(channel->device_obj);
}
-/* Sysfs API to allow mmap of the ring buffers
+/* Function used for mmap of ring buffer sysfs interface.
* The ring buffer is allocated as contiguous memory by vmbus_open
*/
-static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int
+hv_uio_ring_mmap(struct vmbus_channel *channel, struct vm_area_struct *vma)
{
- struct vmbus_channel *channel
- = container_of(kobj, struct vmbus_channel, kobj);
void *ring_buffer = page_address(channel->ringbuffer_page);
if (channel->state != CHANNEL_OPENED_STATE)
@@ -149,15 +146,6 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
channel->ringbuffer_pagecount << PAGE_SHIFT);
}
-static const struct bin_attribute ring_buffer_bin_attr = {
- .attr = {
- .name = "ring",
- .mode = 0600,
- },
- .size = 2 * SZ_2M,
- .mmap = hv_uio_ring_mmap,
-};
-
/* Callback from VMBUS subsystem when new channel created. */
static void
hv_uio_new_channel(struct vmbus_channel *new_sc)
@@ -178,8 +166,7 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
/* Disable interrupts on sub channel */
new_sc->inbound.ring_buffer->interrupt_mask = 1;
set_channel_read_mode(new_sc, HV_CALL_ISR);
-
- ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr);
+ ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap);
if (ret) {
dev_err(device, "sysfs create ring bin file failed; %d\n", ret);
vmbus_close(new_sc);
@@ -350,10 +337,18 @@ hv_uio_probe(struct hv_device *dev,
goto fail_close;
}
- ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
- if (ret)
- dev_notice(&dev->device,
- "sysfs create ring bin file failed; %d\n", ret);
+ /*
+ * This internally calls sysfs_update_group, which returns a non-zero value if it executes
+ * before sysfs_create_group. This is expected as the 'ring' will be created later in
+ * vmbus_device_register() -> vmbus_add_channel_kobj(). Thus, no need to check the return
+ * value and print warning.
+ *
+ * Creating/exposing sysfs in driver probe is not encouraged as it can lead to race
+ * conditions with userspace. For backward compatibility, "ring" sysfs could not be removed
+ * or decoupled from uio_hv_generic probe. Userspace programs can make use of inotify
+ * APIs to make sure that ring is created.
+ */
+ hv_create_ring_sysfs(channel, hv_uio_ring_mmap);
hv_set_drvdata(dev, pdata);
@@ -375,7 +370,7 @@ hv_uio_remove(struct hv_device *dev)
if (!pdata)
return;
- sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
+ hv_remove_ring_sysfs(dev->channel);
uio_unregister_device(&pdata->info);
hv_uio_cleanup(dev, pdata);
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 87f310841735..4824a10df07e 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -139,6 +139,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
(portsc & PORT_CHANGE_BITS), port_regs);
}
+static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev)
+{
+ struct cdns *cdns = dev_get_drvdata(pdev->dev);
+ __le32 __iomem *reg;
+ void __iomem *base;
+ u32 offset = 0;
+ u32 val;
+
+ if (!cdns->override_apb_timeout)
+ return;
+
+ base = &pdev->cap_regs->hc_capbase;
+ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
+ reg = base + offset + REG_CHICKEN_BITS_3_OFFSET;
+
+ val = le32_to_cpu(readl(reg));
+ val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout);
+ writel(cpu_to_le32(val), reg);
+}
+
static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
{
__le32 __iomem *reg;
@@ -1773,6 +1793,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
pdev->rev_cap = reg;
+ pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision);
+
dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
readl(&pdev->rev_cap->ctrl_revision),
readl(&pdev->rev_cap->rtl_revision),
@@ -1798,6 +1820,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev)
pdev->hci_version = HC_VERSION(pdev->hcc_params);
pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
+ /*
+ * Override the APB timeout value to give the controller more time for
+ * enabling UTMI clock and synchronizing APB and UTMI clock domains.
+ * This fix is platform specific and is required to fixes issue with
+ * reading incorrect value from PORTSC register after resuming
+ * from L1 state.
+ */
+ cdnsp_set_apb_timeout_value(pdev);
+
cdnsp_get_rev_cap(pdev);
/* Make sure the Device Controller is halted. */
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
index 84887dfea763..12534be52f39 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.h
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -520,6 +520,9 @@ struct cdnsp_rev_cap {
#define REG_CHICKEN_BITS_2_OFFSET 0x48
#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28)
+#define REG_CHICKEN_BITS_3_OFFSET 0x4C
+#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val))
+
/* XBUF Extended Capability ID. */
#define XBUF_CAP_ID 0xCB
#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C
@@ -1357,6 +1360,7 @@ struct cdnsp_port {
* @rev_cap: Controller Capabilities Registers.
* @hcs_params1: Cached register copies of read-only HCSPARAMS1
* @hcc_params: Cached register copies of read-only HCCPARAMS1
+ * @rtl_revision: Cached controller rtl revision.
* @setup: Temporary buffer for setup packet.
* @ep0_preq: Internal allocated request used during enumeration.
* @ep0_stage: ep0 stage during enumeration process.
@@ -1411,6 +1415,8 @@ struct cdnsp_device {
__u32 hcs_params1;
__u32 hcs_params3;
__u32 hcc_params;
+ #define RTL_REVISION_NEW_LPM 0x2700
+ __u32 rtl_revision;
/* Lock used in interrupt thread context. */
spinlock_t lock;
struct usb_ctrlrequest setup;
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index a51144504ff3..8c361b8394e9 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -28,6 +28,8 @@
#define PCI_DRIVER_NAME "cdns-pci-usbssp"
#define PLAT_DRIVER_NAME "cdns-usbssp"
+#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20
+
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
/*
@@ -139,6 +141,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
cdnsp->otg_irq = pdev->irq;
}
+ /*
+ * Cadence PCI based platform require some longer timeout for APB
+ * to fixes domain clock synchronization issue after resuming
+ * controller from L1 state.
+ */
+ cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE;
+ pci_set_drvdata(pdev, cdnsp);
+
if (pci_is_enabled(func)) {
cdnsp->dev = dev;
cdnsp->gadget_init = cdnsp_gadget_init;
@@ -148,8 +158,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
goto free_cdnsp;
}
- pci_set_drvdata(pdev, cdnsp);
-
device_wakeup_enable(&pdev->dev);
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 46852529499d..fd06cb85c4ea 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
writel(db_value, reg_addr);
- cdnsp_force_l0_go(pdev);
+ if (pdev->rtl_revision < RTL_REVISION_NEW_LPM)
+ cdnsp_force_l0_go(pdev);
/* Doorbell was set. */
return true;
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 921cccf1ca9d..801be9e61340 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -79,6 +79,8 @@ struct cdns3_platform_data {
* @pdata: platform data from glue layer
* @lock: spinlock structure
* @xhci_plat_data: xhci private data structure pointer
+ * @override_apb_timeout: hold value of APB timeout. For value 0 the default
+ * value in CHICKEN_BITS_3 will be preserved.
* @gadget_init: pointer to gadget initialization function
*/
struct cdns {
@@ -117,6 +119,7 @@ struct cdns {
struct cdns3_platform_data *pdata;
spinlock_t lock;
struct xhci_plat_priv *xhci_plat_data;
+ u32 override_apb_timeout;
int (*gadget_init)(struct cdns *cdns);
};
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 34e46ef308ab..740d2d2b19fb 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -482,6 +482,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
u8 *buffer;
u8 tag;
int rv;
+ long wait_rv;
dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
data->iin_ep_present);
@@ -511,16 +512,17 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
}
if (data->iin_ep_present) {
- rv = wait_event_interruptible_timeout(
+ wait_rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&data->iin_data_valid) != 0,
file_data->timeout);
- if (rv < 0) {
- dev_dbg(dev, "wait interrupted %d\n", rv);
+ if (wait_rv < 0) {
+ dev_dbg(dev, "wait interrupted %ld\n", wait_rv);
+ rv = wait_rv;
goto exit;
}
- if (rv == 0) {
+ if (wait_rv == 0) {
dev_dbg(dev, "wait timed out\n");
rv = -ETIMEDOUT;
goto exit;
@@ -539,6 +541,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv);
+ rv = 0;
+
exit:
/* bump interrupt bTag */
data->iin_bTag += 1;
@@ -602,9 +606,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
- int rv;
u32 timeout;
unsigned long expire;
+ long wait_rv;
if (!data->iin_ep_present) {
dev_dbg(dev, "no interrupt endpoint present\n");
@@ -618,25 +622,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
mutex_unlock(&data->io_mutex);
- rv = wait_event_interruptible_timeout(
- data->waitq,
- atomic_read(&file_data->srq_asserted) != 0 ||
- atomic_read(&file_data->closing),
- expire);
+ wait_rv = wait_event_interruptible_timeout(
+ data->waitq,
+ atomic_read(&file_data->srq_asserted) != 0 ||
+ atomic_read(&file_data->closing),
+ expire);
mutex_lock(&data->io_mutex);
/* Note! disconnect or close could be called in the meantime */
if (atomic_read(&file_data->closing) || data->zombie)
- rv = -ENODEV;
+ return -ENODEV;
- if (rv < 0) {
- /* dev can be invalid now! */
- pr_debug("%s - wait interrupted %d\n", __func__, rv);
- return rv;
+ if (wait_rv < 0) {
+ dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv);
+ return wait_rv;
}
- if (rv == 0) {
+ if (wait_rv == 0) {
dev_dbg(dev, "%s - wait timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -830,6 +833,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
unsigned long expire;
int bufcount = 1;
int again = 0;
+ long wait_rv;
/* mutex already locked */
@@ -942,19 +946,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
if (!(flags & USBTMC_FLAG_ASYNC)) {
dev_dbg(dev, "%s: before wait time %lu\n",
__func__, expire);
- retval = wait_event_interruptible_timeout(
+ wait_rv = wait_event_interruptible_timeout(
file_data->wait_bulk_in,
usbtmc_do_transfer(file_data),
expire);
- dev_dbg(dev, "%s: wait returned %d\n",
- __func__, retval);
+ dev_dbg(dev, "%s: wait returned %ld\n",
+ __func__, wait_rv);
+
+ if (wait_rv < 0) {
+ retval = wait_rv;
+ goto error;
+ }
- if (retval <= 0) {
- if (retval == 0)
- retval = -ETIMEDOUT;
+ if (wait_rv == 0) {
+ retval = -ETIMEDOUT;
goto error;
}
+
}
urb = usb_get_from_anchor(&file_data->in_anchor);
@@ -1380,7 +1389,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
if (!buffer)
return -ENOMEM;
- mutex_lock(&data->io_mutex);
+ retval = mutex_lock_interruptible(&data->io_mutex);
+ if (retval < 0)
+ goto exit_nolock;
+
if (data->zombie) {
retval = -ENODEV;
goto exit;
@@ -1503,6 +1515,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
exit:
mutex_unlock(&data->io_mutex);
+exit_nolock:
kfree(buffer);
return retval;
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index aaa39e663f60..27eae4cf223d 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1164,6 +1164,9 @@ struct dwc3_scratchpad_array {
* @gsbuscfg0_reqinfo: store GSBUSCFG0.DATRDREQINFO, DESRDREQINFO,
* DATWRREQINFO, and DESWRREQINFO value passed from
* glue driver.
+ * @wakeup_pending_funcs: Indicates whether any interface has requested for
+ * function wakeup in bitmap format where bit position
+ * represents interface_id.
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1394,6 +1397,7 @@ struct dwc3 {
int num_ep_resized;
struct dentry *debug_root;
u32 gsbuscfg0_reqinfo;
+ u32 wakeup_pending_funcs;
};
#define INCRX_BURST_MODE 0
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8c30d86cc4e3..321361288935 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -276,8 +276,6 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
return ret;
}
-static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async);
-
/**
* dwc3_send_gadget_ep_cmd - issue an endpoint command
* @dep: the endpoint to which the command is going to be issued
@@ -2359,10 +2357,8 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
return __dwc3_gadget_get_frame(dwc);
}
-static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
+static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
{
- int retries;
-
int ret;
u32 reg;
@@ -2390,8 +2386,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
return -EINVAL;
}
- if (async)
- dwc3_gadget_enable_linksts_evts(dwc, true);
+ dwc3_gadget_enable_linksts_evts(dwc, true);
ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
if (ret < 0) {
@@ -2410,27 +2405,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
/*
* Since link status change events are enabled we will receive
- * an U0 event when wakeup is successful. So bail out.
+ * an U0 event when wakeup is successful.
*/
- if (async)
- return 0;
-
- /* poll until Link State changes to ON */
- retries = 20000;
-
- while (retries--) {
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
-
- /* in HS, means ON */
- if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
- break;
- }
-
- if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
- dev_err(dwc->dev, "failed to send remote wakeup\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -2451,7 +2427,7 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
spin_unlock_irqrestore(&dwc->lock, flags);
return -EINVAL;
}
- ret = __dwc3_gadget_wakeup(dwc, true);
+ ret = __dwc3_gadget_wakeup(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2479,14 +2455,10 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id)
*/
link_state = dwc3_gadget_get_link_state(dwc);
if (link_state == DWC3_LINK_STATE_U3) {
- ret = __dwc3_gadget_wakeup(dwc, false);
- if (ret) {
- spin_unlock_irqrestore(&dwc->lock, flags);
- return -EINVAL;
- }
- dwc3_resume_gadget(dwc);
- dwc->suspended = false;
- dwc->link_state = DWC3_LINK_STATE_U0;
+ dwc->wakeup_pending_funcs |= BIT(intf_id);
+ ret = __dwc3_gadget_wakeup(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return ret;
}
ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION,
@@ -4353,6 +4325,8 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
{
enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
unsigned int pwropt;
+ int ret;
+ int intf_id;
/*
* WORKAROUND: DWC3 < 2.50a have an issue when configured without
@@ -4428,7 +4402,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
switch (next) {
case DWC3_LINK_STATE_U0:
- if (dwc->gadget->wakeup_armed) {
+ if (dwc->gadget->wakeup_armed || dwc->wakeup_pending_funcs) {
dwc3_gadget_enable_linksts_evts(dwc, false);
dwc3_resume_gadget(dwc);
dwc->suspended = false;
@@ -4451,6 +4425,18 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
}
dwc->link_state = next;
+
+ /* Proceed with func wakeup if any interfaces that has requested */
+ while (dwc->wakeup_pending_funcs && (next == DWC3_LINK_STATE_U0)) {
+ intf_id = ffs(dwc->wakeup_pending_funcs) - 1;
+ ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION,
+ DWC3_DGCMDPAR_DN_FUNC_WAKE |
+ DWC3_DGCMDPAR_INTF_SEL(intf_id));
+ if (ret)
+ dev_err(dwc->dev, "Failed to send DN wake for intf %d\n", intf_id);
+
+ dwc->wakeup_pending_funcs &= ~BIT(intf_id);
+ }
}
static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 869ad99afb48..8dbc132a505e 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2011,15 +2011,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (f->get_status) {
status = f->get_status(f);
+
if (status < 0)
break;
- } else {
- /* Set D0 and D1 bits based on func wakeup capability */
- if (f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP) {
- status |= USB_INTRF_STAT_FUNC_RW_CAP;
- if (f->func_wakeup_armed)
- status |= USB_INTRF_STAT_FUNC_RW;
- }
+
+ /* if D5 is not set, then device is not wakeup capable */
+ if (!(f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP))
+ status &= ~(USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW);
}
put_unaligned_le16(status & 0x0000ffff, req->buf);
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 80841de845b0..027226325039 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -892,6 +892,12 @@ static void ecm_resume(struct usb_function *f)
gether_resume(&ecm->port);
}
+static int ecm_get_status(struct usb_function *f)
+{
+ return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) |
+ USB_INTRF_STAT_FUNC_RW_CAP;
+}
+
static void ecm_free(struct usb_function *f)
{
struct f_ecm *ecm;
@@ -960,6 +966,7 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
ecm->port.func.disable = ecm_disable;
ecm->port.func.free_func = ecm_free;
ecm->port.func.suspend = ecm_suspend;
+ ecm->port.func.get_status = ecm_get_status;
ecm->port.func.resume = ecm_resume;
return &ecm->port.func;
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index c7fdbc55fb0b..2957316fd3d0 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -1749,6 +1749,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
val = xudc_readl(xudc, CTRL);
val &= ~CTRL_RUN;
xudc_writel(xudc, val, CTRL);
+
+ val = xudc_readl(xudc, ST);
+ if (val & ST_RC)
+ xudc_writel(xudc, ST_RC, ST);
}
dev_info(xudc->dev, "ep %u disabled\n", ep->index);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index a7c934404ebc..62318291f566 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
}
/* Get and enable clock if any specified */
- uhci->clk = devm_clk_get(&pdev->dev, NULL);
+ uhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(uhci->clk)) {
ret = PTR_ERR(uhci->clk);
goto err_rmr;
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index fd7895b24367..0d4ce5734165 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -823,6 +823,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
{
dma_addr_t deq;
union xhci_trb *evt;
+ enum evtreturn ret = EVT_DONE;
u32 ctrl, portsc;
bool update_erdp = false;
@@ -909,6 +910,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
break;
case TRB_TYPE(TRB_TRANSFER):
dbc_handle_xfer_event(dbc, evt);
+ ret = EVT_XFER_DONE;
break;
default:
break;
@@ -927,7 +929,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
lo_hi_writeq(deq, &dbc->regs->erdp);
}
- return EVT_DONE;
+ return ret;
}
static void xhci_dbc_handle_events(struct work_struct *work)
@@ -936,6 +938,7 @@ static void xhci_dbc_handle_events(struct work_struct *work)
struct xhci_dbc *dbc;
unsigned long flags;
unsigned int poll_interval;
+ unsigned long busypoll_timelimit;
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
poll_interval = dbc->poll_interval;
@@ -954,11 +957,21 @@ static void xhci_dbc_handle_events(struct work_struct *work)
dbc->driver->disconnect(dbc);
break;
case EVT_DONE:
- /* set fast poll rate if there are pending data transfers */
+ /*
+ * Set fast poll rate if there are pending out transfers, or
+ * a transfer was recently processed
+ */
+ busypoll_timelimit = dbc->xfer_timestamp +
+ msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT);
+
if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
- !list_empty(&dbc->eps[BULK_IN].list_pending))
+ time_is_after_jiffies(busypoll_timelimit))
poll_interval = 0;
break;
+ case EVT_XFER_DONE:
+ dbc->xfer_timestamp = jiffies;
+ poll_interval = 0;
+ break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
return;
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index 9dc8f4d8077c..47ac72c2286d 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -96,6 +96,7 @@ struct dbc_ep {
#define DBC_WRITE_BUF_SIZE 8192
#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
#define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */
+#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */
/*
* Private structure for DbC hardware state:
*/
@@ -142,6 +143,7 @@ struct xhci_dbc {
enum dbc_state state;
struct delayed_work event_work;
unsigned int poll_interval; /* ms */
+ unsigned long xfer_timestamp;
unsigned resume_required:1;
struct dbc_ep eps[2];
@@ -187,6 +189,7 @@ struct dbc_request {
enum evtreturn {
EVT_ERR = -1,
EVT_DONE,
+ EVT_XFER_DONE,
EVT_GSER,
EVT_DISC,
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index b906bc2eea5f..423bf3649570 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -699,7 +699,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
int new_cycle;
dma_addr_t addr;
u64 hw_dequeue;
- bool cycle_found = false;
+ bool hw_dequeue_found = false;
bool td_last_trb_found = false;
u32 trb_sct = 0;
int ret;
@@ -715,25 +715,24 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
- new_cycle = hw_dequeue & 0x1;
+ new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE;
/*
- * We want to find the pointer, segment and cycle state of the new trb
- * (the one after current TD's end_trb). We know the cycle state at
- * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are
- * found.
+ * Walk the ring until both the next TRB and hw_dequeue are found (don't
+ * move hw_dequeue back if it went forward due to a HW bug). Cycle state
+ * is loaded from a known good TRB, track later toggles to maintain it.
*/
do {
- if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
+ if (!hw_dequeue_found && xhci_trb_virt_to_dma(new_seg, new_deq)
== (dma_addr_t)(hw_dequeue & ~0xf)) {
- cycle_found = true;
+ hw_dequeue_found = true;
if (td_last_trb_found)
break;
}
if (new_deq == td->end_trb)
td_last_trb_found = true;
- if (cycle_found && trb_is_link(new_deq) &&
+ if (td_last_trb_found && trb_is_link(new_deq) &&
link_trb_toggles_cycle(new_deq))
new_cycle ^= 0x1;
@@ -745,7 +744,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
return -EINVAL;
}
- } while (!cycle_found || !td_last_trb_found);
+ } while (!hw_dequeue_found || !td_last_trb_found);
/* Don't update the ring cycle state for the producer (us). */
addr = xhci_trb_virt_to_dma(new_seg, new_deq);
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index b5c362c2051d..0c7af44d4dae 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1364,6 +1364,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
tegra->otg_usb2_port);
+ pm_runtime_get_sync(tegra->dev);
if (tegra->host_mode) {
/* switch to host mode */
if (tegra->otg_usb3_port >= 0) {
@@ -1393,6 +1394,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
}
tegra_xhci_set_port_power(tegra, true, true);
+ pm_runtime_mark_last_busy(tegra->dev);
} else {
if (tegra->otg_usb3_port >= 0)
@@ -1400,6 +1402,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
tegra_xhci_set_port_power(tegra, true, false);
}
+ pm_runtime_put_autosuspend(tegra->dev);
}
#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c
index 75ac3c6aa92d..f5372dfa241a 100644
--- a/drivers/usb/misc/onboard_usb_dev.c
+++ b/drivers/usb/misc/onboard_usb_dev.c
@@ -569,8 +569,14 @@ static void onboard_dev_usbdev_disconnect(struct usb_device *udev)
}
static const struct usb_device_id onboard_dev_id_table[] = {
- { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 HUB */
- { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6500) }, /* CYUSB330x 3.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6502) }, /* CYUSB330x 2.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6503) }, /* CYUSB33{0,1}x 2.0 HUB, Vendor Mode */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB331x 3.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB331x 2.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6507) }, /* CYUSB332x 2.0 HUB, Vendor Mode */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6508) }, /* CYUSB332x 3.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x650a) }, /* CYUSB332x 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 HUB */
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index a99db4e025cd..8adf6f954633 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5965,7 +5965,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
case SNK_TRY_WAIT_DEBOUNCE:
if (!tcpm_port_is_sink(port)) {
port->max_wait = 0;
- tcpm_set_state(port, SRC_TRYWAIT, 0);
+ tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE);
}
break;
case SRC_TRY_WAIT:
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index 420af5139c70..8aae80b457d7 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -54,7 +54,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
u8 cur = 0;
int ret;
- mutex_lock(&dp->con->lock);
+ if (!ucsi_con_mutex_lock(dp->con))
+ return -ENOTCONN;
if (!dp->override && dp->initialized) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
@@ -100,7 +101,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
schedule_work(&dp->work);
ret = 0;
err_unlock:
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return ret;
}
@@ -112,7 +113,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
u64 command;
int ret = 0;
- mutex_lock(&dp->con->lock);
+ if (!ucsi_con_mutex_lock(dp->con))
+ return -ENOTCONN;
if (!dp->override) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
@@ -144,7 +146,7 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
schedule_work(&dp->work);
out_unlock:
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return ret;
}
@@ -202,20 +204,21 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt,
int cmd = PD_VDO_CMD(header);
int svdm_version;
- mutex_lock(&dp->con->lock);
+ if (!ucsi_con_mutex_lock(dp->con))
+ return -ENOTCONN;
if (!dp->override && dp->initialized) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return -EOPNOTSUPP;
}
svdm_version = typec_altmode_get_svdm_version(alt);
if (svdm_version < 0) {
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return svdm_version;
}
@@ -259,7 +262,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt,
break;
}
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return 0;
}
@@ -296,6 +299,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
if (!dp)
return;
+ cancel_work_sync(&dp->work);
+
dp->data.conf = 0;
dp->data.status = 0;
dp->initialized = false;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index e8c7e9dc4930..01ce858a1a2b 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1923,6 +1923,40 @@ void ucsi_set_drvdata(struct ucsi *ucsi, void *data)
EXPORT_SYMBOL_GPL(ucsi_set_drvdata);
/**
+ * ucsi_con_mutex_lock - Acquire the connector mutex
+ * @con: The connector interface to lock
+ *
+ * Returns true on success, false if the connector is disconnected
+ */
+bool ucsi_con_mutex_lock(struct ucsi_connector *con)
+{
+ bool mutex_locked = false;
+ bool connected = true;
+
+ while (connected && !mutex_locked) {
+ mutex_locked = mutex_trylock(&con->lock) != 0;
+ connected = UCSI_CONSTAT(con, CONNECTED);
+ if (connected && !mutex_locked)
+ msleep(20);
+ }
+
+ connected = connected && con->partner;
+ if (!connected && mutex_locked)
+ mutex_unlock(&con->lock);
+
+ return connected;
+}
+
+/**
+ * ucsi_con_mutex_unlock - Release the connector mutex
+ * @con: The connector interface to unlock
+ */
+void ucsi_con_mutex_unlock(struct ucsi_connector *con)
+{
+ mutex_unlock(&con->lock);
+}
+
+/**
* ucsi_create - Allocate UCSI instance
* @dev: Device interface to the PPM (Platform Policy Manager)
* @ops: I/O routines
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 3a2c1762bec1..9c5278a0c5d4 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -94,6 +94,8 @@ int ucsi_register(struct ucsi *ucsi);
void ucsi_unregister(struct ucsi *ucsi);
void *ucsi_get_drvdata(struct ucsi *ucsi);
void ucsi_set_drvdata(struct ucsi *ucsi, void *data);
+bool ucsi_con_mutex_lock(struct ucsi_connector *con);
+void ucsi_con_mutex_unlock(struct ucsi_connector *con);
void ucsi_connector_change(struct ucsi *ucsi, u8 num);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 35f9046af315..6328c3a05bcd 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1646,14 +1646,14 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
- unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
+ unsigned long addr = vmf->address & ~((PAGE_SIZE << order) - 1);
+ unsigned long pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long pfn = vma_to_pfn(vma) + pgoff;
vm_fault_t ret = VM_FAULT_SIGBUS;
- pfn = vma_to_pfn(vma) + pgoff;
-
- if (order && (pfn & ((1 << order) - 1) ||
- vmf->address & ((PAGE_SIZE << order) - 1) ||
- vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
+ if (order && (addr < vma->vm_start ||
+ addr + (PAGE_SIZE << order) > vma->vm_end ||
+ pfn & ((1 << order) - 1))) {
ret = VM_FAULT_FALLBACK;
goto out;
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1f65795cf5d7..ef56a2500ed6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -217,6 +217,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size, true) &&
+ !dma_kmalloc_needs_bounce(dev, size, dir) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
!is_swiotlb_force_bounce(dev))
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 13821e7e825e..9ac0427724a3 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -77,6 +77,7 @@ enum xb_req_state {
struct xb_req_data {
struct list_head list;
wait_queue_head_t wq;
+ struct kref kref;
struct xsd_sockmsg msg;
uint32_t caller_req_id;
enum xsd_sockmsg_type type;
@@ -103,6 +104,7 @@ int xb_init_comms(void);
void xb_deinit_comms(void);
int xs_watch_msg(struct xs_watch_event *event);
void xs_request_exit(struct xb_req_data *req);
+void xs_free_req(struct kref *kref);
int xenbus_match(struct device *_dev, const struct device_driver *_drv);
int xenbus_dev_probe(struct device *_dev);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index e5fda0256feb..82df2da1b880 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -309,8 +309,8 @@ static int process_msg(void)
virt_wmb();
req->state = xb_req_state_got_reply;
req->cb(req);
- } else
- kfree(req);
+ }
+ kref_put(&req->kref, xs_free_req);
}
mutex_unlock(&xs_response_mutex);
@@ -386,14 +386,13 @@ static int process_writes(void)
state.req->msg.type = XS_ERROR;
state.req->err = err;
list_del(&state.req->list);
- if (state.req->state == xb_req_state_aborted)
- kfree(state.req);
- else {
+ if (state.req->state != xb_req_state_aborted) {
/* write err, then update state */
virt_wmb();
state.req->state = xb_req_state_got_reply;
wake_up(&state.req->wq);
}
+ kref_put(&state.req->kref, xs_free_req);
mutex_unlock(&xb_write_mutex);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 46f8916597e5..f5c21ba64df5 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req)
mutex_unlock(&u->reply_mutex);
kfree(req->body);
- kfree(req);
+ kref_put(&req->kref, xs_free_req);
kref_put(&u->kref, xenbus_file_free);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 6d32ffb01136..86fe6e779056 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -966,9 +966,15 @@ static int __init xenbus_init(void)
if (xen_pv_domain())
xen_store_domain_type = XS_PV;
if (xen_hvm_domain())
+ {
xen_store_domain_type = XS_HVM;
- if (xen_hvm_domain() && xen_initial_domain())
- xen_store_domain_type = XS_LOCAL;
+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ if (err)
+ goto out_error;
+ xen_store_evtchn = (int)v;
+ if (!v && xen_initial_domain())
+ xen_store_domain_type = XS_LOCAL;
+ }
if (xen_pv_domain() && !xen_start_info->store_evtchn)
xen_store_domain_type = XS_LOCAL;
if (xen_pv_domain() && xen_start_info->store_evtchn)
@@ -987,10 +993,6 @@ static int __init xenbus_init(void)
xen_store_interface = gfn_to_virt(xen_store_gfn);
break;
case XS_HVM:
- err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
- if (err)
- goto out_error;
- xen_store_evtchn = (int)v;
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err)
goto out_error;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index d32c726f7a12..dcf9182c8451 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -112,6 +112,12 @@ static void xs_suspend_exit(void)
wake_up_all(&xs_state_enter_wq);
}
+void xs_free_req(struct kref *kref)
+{
+ struct xb_req_data *req = container_of(kref, struct xb_req_data, kref);
+ kfree(req);
+}
+
static uint32_t xs_request_enter(struct xb_req_data *req)
{
uint32_t rq_id;
@@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
req->caller_req_id = req->msg.req_id;
req->msg.req_id = xs_request_enter(req);
+ /*
+ * Take 2nd ref. One for this thread, and the second for the
+ * xenbus_thread.
+ */
+ kref_get(&req->kref);
+
mutex_lock(&xb_write_mutex);
list_add_tail(&req->list, &xb_write_list);
notify = list_is_singular(&xb_write_list);
@@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg)
if (req->state == xb_req_state_queued ||
req->state == xb_req_state_wait_reply)
req->state = xb_req_state_aborted;
- else
- kfree(req);
+
+ kref_put(&req->kref, xs_free_req);
mutex_unlock(&xb_write_mutex);
return ret;
@@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
req->cb = xenbus_dev_queue_reply;
req->par = par;
req->user_req = true;
+ kref_init(&req->kref);
xs_send(req, msg);
@@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t,
req->num_vecs = num_vecs;
req->cb = xs_wake_up;
req->user_req = false;
+ kref_init(&req->kref);
msg.req_id = 0;
msg.tx_id = t.id;
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index effafc3e0ced..7ec022e9361a 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -1422,11 +1422,31 @@ alloc_done:
wp->sectors_free = UINT_MAX;
- open_bucket_for_each(c, &wp->ptrs, ob, i)
+ open_bucket_for_each(c, &wp->ptrs, ob, i) {
+ /*
+ * Ensure proper write alignment - either due to misaligned
+ * bucket sizes (from buggy bcachefs-tools), or writes that mix
+ * logical/physical alignment:
+ */
+ struct bch_dev *ca = ob_dev(c, ob);
+ u64 offset = bucket_to_sector(ca, ob->bucket) +
+ ca->mi.bucket_size -
+ ob->sectors_free;
+ unsigned align = round_up(offset, block_sectors(c)) - offset;
+
+ ob->sectors_free = max_t(int, 0, ob->sectors_free - align);
+
wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
+ }
wp->sectors_free = rounddown(wp->sectors_free, block_sectors(c));
+ /* Did alignment use up space in an open_bucket? */
+ if (unlikely(!wp->sectors_free)) {
+ bch2_alloc_sectors_done(c, wp);
+ goto retry;
+ }
+
BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
return 0;
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 7b98ba2dec64..37b69d89341f 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -47,6 +47,27 @@
#define DROP_PREV_NODE 11
#define DID_FILL_FROM_SCAN 12
+/*
+ * Returns true if it's a btree we can easily reconstruct, or otherwise won't
+ * cause data loss if it's missing:
+ */
+static bool btree_id_important(enum btree_id btree)
+{
+ if (btree_id_is_alloc(btree))
+ return false;
+
+ switch (btree) {
+ case BTREE_ID_quotas:
+ case BTREE_ID_snapshot_trees:
+ case BTREE_ID_logged_ops:
+ case BTREE_ID_rebalance_work:
+ case BTREE_ID_subvolume_children:
+ return false;
+ default:
+ return true;
+ }
+}
+
static const char * const bch2_gc_phase_strs[] = {
#define x(n) #n,
GC_PHASES()
@@ -534,8 +555,10 @@ reconstruct_root:
r->error = 0;
if (!bch2_btree_has_scanned_nodes(c, i)) {
- mustfix_fsck_err(trans, btree_root_unreadable_and_scan_found_nothing,
- "no nodes found for btree %s, continue?", buf.buf);
+ __fsck_err(trans,
+ FSCK_CAN_FIX|(!btree_id_important(i) ? FSCK_AUTOFIX : 0),
+ btree_root_unreadable_and_scan_found_nothing,
+ "no nodes found for btree %s, continue?", buf.buf);
bch2_btree_root_alloc_fake_trans(trans, i, 0);
} else {
bch2_btree_root_alloc_fake_trans(trans, i, 1);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 5fd4a58d2ad2..60782f3e5aec 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -41,6 +41,7 @@ void bch2_btree_node_io_unlock(struct btree *b)
clear_btree_node_write_in_flight_inner(b);
clear_btree_node_write_in_flight(b);
+ smp_mb__after_atomic();
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
}
@@ -1400,6 +1401,7 @@ start:
printbuf_exit(&buf);
clear_btree_node_read_in_flight(b);
+ smp_mb__after_atomic();
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
}
@@ -1595,6 +1597,7 @@ fsck_err:
printbuf_exit(&buf);
clear_btree_node_read_in_flight(b);
+ smp_mb__after_atomic();
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
}
@@ -1721,6 +1724,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
set_btree_node_read_error(b);
bch2_btree_lost_data(c, b->c.btree_id);
clear_btree_node_read_in_flight(b);
+ smp_mb__after_atomic();
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
printbuf_exit(&buf);
return;
@@ -2061,8 +2065,10 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start
if (new & (1U << BTREE_NODE_write_in_flight))
__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
- else
+ else {
+ smp_mb__after_atomic();
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
+ }
}
static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
@@ -2175,6 +2181,7 @@ static void btree_node_write_endio(struct bio *bio)
}
clear_btree_node_write_in_flight_inner(b);
+ smp_mb__after_atomic();
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
INIT_WORK(&wb->work, btree_node_write_work);
queue_work(c->btree_io_complete_wq, &wb->work);
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index 7d6c971db23c..ade3b5addd75 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -288,7 +288,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
.size = max_t(size_t, keys->size, 8) * 2,
};
- new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL);
+ new_keys.data = bch2_kvmalloc(new_keys.size * sizeof(new_keys.data[0]), GFP_KERNEL);
if (!new_keys.data) {
bch_err(c, "%s: error allocating new key array (size %zu)",
__func__, new_keys.size);
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 44b5fe430370..00307356d7c8 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1389,7 +1389,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
printbuf_exit(&buf);
}
-static void
+static int
bch2_btree_insert_keys_interior(struct btree_update *as,
struct btree_trans *trans,
struct btree_path *path,
@@ -1411,7 +1411,8 @@ bch2_btree_insert_keys_interior(struct btree_update *as,
insert = bkey_next(insert))
bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert);
- if (bch2_btree_node_check_topology(trans, b)) {
+ int ret = bch2_btree_node_check_topology(trans, b);
+ if (ret) {
struct printbuf buf = PRINTBUF;
for (struct bkey_i *k = keys->keys;
@@ -1421,11 +1422,15 @@ bch2_btree_insert_keys_interior(struct btree_update *as,
prt_newline(&buf);
}
- panic("%s(): check_topology error: inserted keys\n%s", __func__, buf.buf);
+ bch2_fs_fatal_error(as->c, "%ps -> %s(): check_topology error %s: inserted keys\n%s",
+ (void *) _RET_IP_, __func__, bch2_err_str(ret), buf.buf);
+ dump_stack();
+ return ret;
}
memmove_u64s_down(keys->keys, insert, keys->top_p - insert->_data);
keys->top_p -= insert->_data - keys->keys_p;
+ return 0;
}
static bool key_deleted_in_insert(struct keylist *insert_keys, struct bpos pos)
@@ -1559,11 +1564,11 @@ static void __btree_split_node(struct btree_update *as,
* nodes that were coalesced, and thus in the middle of a child node post
* coalescing:
*/
-static void btree_split_insert_keys(struct btree_update *as,
- struct btree_trans *trans,
- btree_path_idx_t path_idx,
- struct btree *b,
- struct keylist *keys)
+static int btree_split_insert_keys(struct btree_update *as,
+ struct btree_trans *trans,
+ btree_path_idx_t path_idx,
+ struct btree *b,
+ struct keylist *keys)
{
struct btree_path *path = trans->paths + path_idx;
@@ -1573,8 +1578,12 @@ static void btree_split_insert_keys(struct btree_update *as,
bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
- bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
+ int ret = bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static int btree_split(struct btree_update *as, struct btree_trans *trans,
@@ -1607,8 +1616,10 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
__btree_split_node(as, trans, b, n, keys);
if (keys) {
- btree_split_insert_keys(as, trans, path, n1, keys);
- btree_split_insert_keys(as, trans, path, n2, keys);
+ ret = btree_split_insert_keys(as, trans, path, n1, keys) ?:
+ btree_split_insert_keys(as, trans, path, n2, keys);
+ if (ret)
+ goto err;
BUG_ON(!bch2_keylist_empty(keys));
}
@@ -1654,7 +1665,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
n3->sib_u64s[0] = U16_MAX;
n3->sib_u64s[1] = U16_MAX;
- btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
+ ret = btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
+ if (ret)
+ goto err;
}
} else {
trace_and_count(c, btree_node_compact, trans, b);
@@ -1662,7 +1675,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
n1 = bch2_btree_node_alloc_replacement(as, trans, b);
if (keys) {
- btree_split_insert_keys(as, trans, path, n1, keys);
+ ret = btree_split_insert_keys(as, trans, path, n1, keys);
+ if (ret)
+ goto err;
BUG_ON(!bch2_keylist_empty(keys));
}
@@ -1809,15 +1824,15 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
goto split;
}
- ret = bch2_btree_node_check_topology(trans, b);
+
+ ret = bch2_btree_node_check_topology(trans, b) ?:
+ bch2_btree_insert_keys_interior(as, trans, path, b,
+ path->l[b->c.level].iter, keys);
if (ret) {
bch2_btree_node_unlock_write(trans, path, b);
return ret;
}
- bch2_btree_insert_keys_interior(as, trans, path, b,
- path->l[b->c.level].iter, keys);
-
trans_for_each_path_with_node(trans, b, linked, i)
bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 4ef261e8db4f..31fbc2716d8b 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -604,6 +604,13 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
}
struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
+ if (!bucket_valid(ca, bucket.offset)) {
+ if (insert) {
+ bch2_dev_bucket_missing(ca, bucket.offset);
+ ret = -BCH_ERR_trigger_pointer;
+ }
+ goto err;
+ }
if (flags & BTREE_TRIGGER_transactional) {
struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
@@ -1307,13 +1314,11 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
if (resize) {
- bucket_gens->nbuckets = min(bucket_gens->nbuckets,
- old_bucket_gens->nbuckets);
- bucket_gens->nbuckets_minus_first =
- bucket_gens->nbuckets - bucket_gens->first_bucket;
+ u64 copy = min(bucket_gens->nbuckets,
+ old_bucket_gens->nbuckets);
memcpy(bucket_gens->b,
old_bucket_gens->b,
- bucket_gens->nbuckets);
+ sizeof(bucket_gens->b[0]) * copy);
}
rcu_assign_pointer(ca->bucket_gens, bucket_gens);
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 8d75b27a1418..af1532de4a37 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -44,6 +44,7 @@ static inline void bucket_unlock(struct bucket *b)
BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
+ smp_mb__after_atomic();
wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
}
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index 92ee59d9e00e..8a680e52c1ed 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -685,7 +685,7 @@ static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subv
vfs_d_type(d.v->d_type));
if (ret)
ctx->pos = d.k->p.offset + 1;
- return ret;
+ return !ret;
}
int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
@@ -710,7 +710,7 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
if (ret2 > 0)
continue;
- ret2 ?: drop_locks_do(trans, bch2_dir_emit(ctx, dirent, target));
+ ret2 ?: (bch2_trans_unlock(trans), bch2_dir_emit(ctx, dirent, target));
})));
bch2_bkey_buf_exit(&sk, c);
diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
index 1186280b29e9..2ca3cbf12b71 100644
--- a/fs/bcachefs/disk_groups.c
+++ b/fs/bcachefs/disk_groups.c
@@ -470,23 +470,22 @@ inval:
int __bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name)
{
- struct bch_member *mi;
- int ret, v = -1;
+ lockdep_assert_held(&c->sb_lock);
- if (!strlen(name) || !strcmp(name, "none"))
- return 0;
- v = bch2_disk_path_find_or_create(&c->disk_sb, name);
- if (v < 0)
- return v;
+ if (!strlen(name) || !strcmp(name, "none")) {
+ struct bch_member *mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+ SET_BCH_MEMBER_GROUP(mi, 0);
+ } else {
+ int v = bch2_disk_path_find_or_create(&c->disk_sb, name);
+ if (v < 0)
+ return v;
- ret = bch2_sb_disk_groups_to_cpu(c);
- if (ret)
- return ret;
+ struct bch_member *mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+ SET_BCH_MEMBER_GROUP(mi, v + 1);
+ }
- mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
- SET_BCH_MEMBER_GROUP(mi, v + 1);
- return 0;
+ return bch2_sb_disk_groups_to_cpu(c);
}
int bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name)
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index a396865e8b17..fff58b78327c 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -2204,10 +2204,10 @@ void bch2_fs_ec_stop(struct bch_fs *c)
static bool bch2_fs_ec_flush_done(struct bch_fs *c)
{
- bool ret;
+ sched_annotate_sleep();
mutex_lock(&c->ec_stripe_new_lock);
- ret = list_empty(&c->ec_stripe_new_list);
+ bool ret = list_empty(&c->ec_stripe_new_list);
mutex_unlock(&c->ec_stripe_new_lock);
return ret;
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index 62d27e04d763..51893e1ee874 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -160,6 +160,7 @@ static inline void gc_stripe_unlock(struct gc_stripe *s)
BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &s->lock);
+ smp_mb__after_atomic();
wake_up_bit((void *) &s->lock, BUCKET_LOCK_BITNR);
}
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index a615e4852ded..d9ebffa5b3a2 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -269,7 +269,7 @@
x(BCH_ERR_invalid_sb, invalid_sb_downgrade) \
x(BCH_ERR_invalid, invalid_bkey) \
x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
- x(EIO, journal_shutdown) \
+ x(EROFS, journal_shutdown) \
x(EIO, journal_flush_err) \
x(EIO, journal_write_err) \
x(EIO, btree_node_read_err) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 925b0b54ea2f..6b8695b1349c 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -478,7 +478,9 @@ int __bch2_fsck_err(struct bch_fs *c,
} else if (!test_bit(BCH_FS_fsck_running, &c->flags)) {
if (c->opts.errors != BCH_ON_ERROR_continue ||
!(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) {
- prt_str(out, ", shutting down");
+ prt_str_indented(out, ", shutting down\n"
+ "error not marked as autofix and not in fsck\n"
+ "run fsck, and forward to devs so error can be marked for self-healing");
inconsistent = true;
print = true;
ret = -BCH_ERR_fsck_errors_not_fixed;
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index dca2b8425cc0..e597fb9c9823 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -1056,8 +1056,9 @@ bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bke
static bool want_cached_ptr(struct bch_fs *c, struct bch_io_opts *opts,
struct bch_extent_ptr *ptr)
{
- if (!opts->promote_target ||
- !bch2_dev_in_target(c, ptr->dev, opts->promote_target))
+ unsigned target = opts->promote_target ?: opts->foreground_target;
+
+ if (target && !bch2_dev_in_target(c, ptr->dev, target))
return false;
struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 65c2c33d253d..9657144666b8 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -144,10 +144,25 @@ int __must_check bch2_write_inode_size(struct bch_fs *c,
void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
struct quota_res *quota_res, s64 sectors)
{
- bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
- "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
- inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
- inode->ei_inode.bi_sectors);
+ if (unlikely((s64) inode->v.i_blocks + sectors < 0)) {
+ struct printbuf buf = PRINTBUF;
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf, "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
+ inode->ei_inode.bi_sectors);
+
+ bool repeat = false, print = false, suppress = false;
+ bch2_count_fsck_err(c, vfs_inode_i_blocks_underflow, buf.buf, &repeat, &print, &suppress);
+ if (print)
+ bch2_print_str(c, buf.buf);
+ printbuf_exit(&buf);
+
+ if (sectors < 0)
+ sectors = -inode->v.i_blocks;
+ else
+ sectors = 0;
+ }
+
inode->v.i_blocks += sectors;
#ifdef CONFIG_BCACHEFS_QUOTA
@@ -502,11 +517,22 @@ int bchfs_truncate(struct mnt_idmap *idmap,
goto err;
}
- bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
- !bch2_journal_error(&c->journal), c,
- "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
- inode->v.i_ino, (u64) inode->v.i_blocks,
- inode->ei_inode.bi_sectors);
+ if (unlikely(!inode->v.i_size && inode->v.i_blocks &&
+ !bch2_journal_error(&c->journal))) {
+ struct printbuf buf = PRINTBUF;
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf,
+ "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks,
+ inode->ei_inode.bi_sectors);
+
+ bool repeat = false, print = false, suppress = false;
+ bch2_count_fsck_err(c, vfs_inode_i_blocks_not_zero_at_truncate, buf.buf,
+ &repeat, &print, &suppress);
+ if (print)
+ bch2_print_str(c, buf.buf);
+ printbuf_exit(&buf);
+ }
ret = bch2_setattr_nonsize(idmap, inode, iattr);
err:
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 0f1d61aab90b..b6801861c66f 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -66,6 +66,8 @@ static inline void bch2_inode_flags_to_vfs(struct bch_fs *c, struct bch_inode_in
if (bch2_inode_casefold(c, &inode->ei_inode))
inode->v.i_flags |= S_CASEFOLD;
+ else
+ inode->v.i_flags &= ~S_CASEFOLD;
}
void bch2_inode_update_after_write(struct btree_trans *trans,
@@ -848,10 +850,8 @@ int __bch2_unlink(struct inode *vdir, struct dentry *dentry,
set_nlink(&inode->v, 0);
}
- if (IS_CASEFOLDED(vdir)) {
+ if (IS_CASEFOLDED(vdir))
d_invalidate(dentry);
- d_prune_aliases(&inode->v);
- }
err:
bch2_trans_put(trans);
bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode);
@@ -1464,8 +1464,8 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans,
unsigned sectors = cur->kbuf.k->k.size;
s64 offset_into_extent = 0;
enum btree_id data_btree = BTREE_ID_extents;
- int ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent,
- &cur->kbuf);
+ ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent,
+ &cur->kbuf);
if (ret)
goto err;
@@ -2502,10 +2502,9 @@ static int bch2_fs_get_tree(struct fs_context *fc)
bch2_opts_apply(&c->opts, opts);
- /*
- * need to initialise sb and set c->vfs_sb _before_ starting fs,
- * for blk_holder_ops
- */
+ ret = bch2_fs_start(c);
+ if (ret)
+ goto err_stop_fs;
sb = sget(fc->fs_type, NULL, bch2_set_super, fc->sb_flags|SB_NOSEC, c);
ret = PTR_ERR_OR_ZERO(sb);
@@ -2567,9 +2566,10 @@ got_sb:
sb->s_shrink->seeks = 0;
- ret = bch2_fs_start(c);
- if (ret)
- goto err_put_super;
+#ifdef CONFIG_UNICODE
+ sb->s_encoding = c->cf_encoding;
+#endif
+ generic_set_sb_d_ops(sb);
vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM);
ret = PTR_ERR_OR_ZERO(vinode);
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index a418fa62f09d..c1237da079ed 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -255,6 +255,27 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
}
if (i_sectors_delta) {
+ s64 bi_sectors = le64_to_cpu(inode->v.bi_sectors);
+ if (unlikely(bi_sectors + i_sectors_delta < 0)) {
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf, "inode %llu i_sectors underflow: %lli + %lli < 0",
+ extent_iter->pos.inode, bi_sectors, i_sectors_delta);
+
+ bool repeat = false, print = false, suppress = false;
+ bch2_count_fsck_err(c, inode_i_sectors_underflow, buf.buf,
+ &repeat, &print, &suppress);
+ if (print)
+ bch2_print_str(c, buf.buf);
+ printbuf_exit(&buf);
+
+ if (i_sectors_delta < 0)
+ i_sectors_delta = -bi_sectors;
+ else
+ i_sectors_delta = 0;
+ }
+
le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
inode_update_flags = 0;
}
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 2a54ac79189b..ded18a94ed02 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -19,6 +19,7 @@
#include <linux/ioprio.h>
#include <linux/string_choices.h>
+#include <linux/sched/sysctl.h>
void bch2_journal_pos_from_member_info_set(struct bch_fs *c)
{
@@ -1262,7 +1263,8 @@ int bch2_journal_read(struct bch_fs *c,
degraded = true;
}
- closure_sync(&jlist.cl);
+ while (closure_sync_timeout(&jlist.cl, sysctl_hung_task_timeout_secs * HZ / 2))
+ ;
if (jlist.ret)
return jlist.ret;
@@ -1782,7 +1784,7 @@ static CLOSURE_CALLBACK(journal_write_submit)
struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
if (!ca) {
/* XXX: fix this */
- bch_err(c, "missing device for journal write\n");
+ bch_err(c, "missing device %u for journal write", ptr->dev);
continue;
}
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index ea670c3c43d8..976464d8a695 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -266,10 +266,11 @@ out:
static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
{
- bool ret;
-
spin_lock(&j->lock);
- ret = ja->discard_idx != ja->dirty_idx_ondisk;
+ unsigned min_free = max(4, ja->nr / 8);
+
+ bool ret = bch2_journal_dev_buckets_available(j, ja, journal_space_discarded) < min_free &&
+ ja->discard_idx != ja->dirty_idx_ondisk;
spin_unlock(&j->lock);
return ret;
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index fc396b9fa754..dfdbb9259985 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -784,7 +784,8 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
goto err;
ret = bch2_btree_write_buffer_tryflush(trans);
- bch_err_msg(c, ret, "flushing btree write buffer");
+ if (!bch2_err_matches(ret, EROFS))
+ bch_err_msg(c, ret, "flushing btree write buffer");
if (ret)
goto err;
diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c
index 46f3c8b100a9..52c58c6d53d2 100644
--- a/fs/bcachefs/namei.c
+++ b/fs/bcachefs/namei.c
@@ -343,6 +343,9 @@ bool bch2_reinherit_attrs(struct bch_inode_unpacked *dst_u,
bool ret = false;
for (id = 0; id < Inode_opt_nr; id++) {
+ if (!S_ISDIR(dst_u->bi_mode) && id == Inode_opt_casefold)
+ continue;
+
/* Skip attributes that were explicitly set on this inode */
if (dst_u->bi_fields_set & (1 << id))
continue;
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index acb5d845841e..badd0e17ada5 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -20,6 +20,10 @@
* x(version, recovery_passes, errors...)
*/
#define UPGRADE_TABLE() \
+ x(snapshot_2, \
+ RECOVERY_PASS_ALL_FSCK, \
+ BCH_FSCK_ERR_subvol_root_wrong_bi_subvol, \
+ BCH_FSCK_ERR_subvol_not_master_and_not_snapshot) \
x(backpointers, \
RECOVERY_PASS_ALL_FSCK) \
x(inode_v3, \
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index dc53d25c7cbb..3b69a924086f 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -46,7 +46,7 @@ enum bch_fsck_flags {
x(btree_node_unsupported_version, 34, 0) \
x(btree_node_bset_older_than_sb_min, 35, 0) \
x(btree_node_bset_newer_than_sb, 36, 0) \
- x(btree_node_data_missing, 37, 0) \
+ x(btree_node_data_missing, 37, FSCK_AUTOFIX) \
x(btree_node_bset_after_end, 38, 0) \
x(btree_node_replicas_sectors_written_mismatch, 39, 0) \
x(btree_node_replicas_data_mismatch, 40, 0) \
@@ -205,9 +205,9 @@ enum bch_fsck_flags {
x(snapshot_bad_depth, 184, 0) \
x(snapshot_bad_skiplist, 185, 0) \
x(subvol_pos_bad, 186, 0) \
- x(subvol_not_master_and_not_snapshot, 187, 0) \
+ x(subvol_not_master_and_not_snapshot, 187, FSCK_AUTOFIX) \
x(subvol_to_missing_root, 188, 0) \
- x(subvol_root_wrong_bi_subvol, 189, 0) \
+ x(subvol_root_wrong_bi_subvol, 189, FSCK_AUTOFIX) \
x(bkey_in_missing_snapshot, 190, 0) \
x(inode_pos_inode_nonzero, 191, 0) \
x(inode_pos_blockdev_range, 192, 0) \
@@ -236,6 +236,9 @@ enum bch_fsck_flags {
x(inode_has_child_snapshots_wrong, 287, 0) \
x(inode_unreachable, 210, FSCK_AUTOFIX) \
x(inode_journal_seq_in_future, 299, FSCK_AUTOFIX) \
+ x(inode_i_sectors_underflow, 312, FSCK_AUTOFIX) \
+ x(vfs_inode_i_blocks_underflow, 311, FSCK_AUTOFIX) \
+ x(vfs_inode_i_blocks_not_zero_at_truncate, 313, FSCK_AUTOFIX) \
x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \
x(deleted_inode_missing, 212, FSCK_AUTOFIX) \
x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \
@@ -317,7 +320,9 @@ enum bch_fsck_flags {
x(directory_size_mismatch, 303, FSCK_AUTOFIX) \
x(dirent_cf_name_too_big, 304, 0) \
x(dirent_stray_data_after_cf_name, 305, 0) \
- x(MAX, 308, 0)
+ x(rebalance_work_incorrectly_set, 309, FSCK_AUTOFIX) \
+ x(rebalance_work_incorrectly_unset, 310, FSCK_AUTOFIX) \
+ x(MAX, 314, 0)
enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
index 116131f95815..72779912939b 100644
--- a/fs/bcachefs/sb-members.c
+++ b/fs/bcachefs/sb-members.c
@@ -15,9 +15,11 @@ void bch2_dev_missing(struct bch_fs *c, unsigned dev)
bch2_fs_inconsistent(c, "pointer to nonexistent device %u", dev);
}
-void bch2_dev_bucket_missing(struct bch_fs *c, struct bpos bucket)
+void bch2_dev_bucket_missing(struct bch_dev *ca, u64 bucket)
{
- bch2_fs_inconsistent(c, "pointer to nonexistent bucket %llu:%llu", bucket.inode, bucket.offset);
+ bch2_fs_inconsistent(ca->fs,
+ "pointer to nonexistent bucket %llu on device %s (valid range %u-%llu)",
+ bucket, ca->name, ca->mi.first_bucket, ca->mi.nbuckets);
}
#define x(t, n, ...) [n] = #t,
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index 06bb41a3f360..42786657522c 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -249,20 +249,23 @@ static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev)
static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket)
{
struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode);
- if (ca && !bucket_valid(ca, bucket.offset)) {
+ if (ca && unlikely(!bucket_valid(ca, bucket.offset))) {
bch2_dev_put(ca);
ca = NULL;
}
return ca;
}
-void bch2_dev_bucket_missing(struct bch_fs *, struct bpos);
+void bch2_dev_bucket_missing(struct bch_dev *, u64);
static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket)
{
- struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, bucket);
- if (!ca)
- bch2_dev_bucket_missing(c, bucket);
+ struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode);
+ if (ca && unlikely(!bucket_valid(ca, bucket.offset))) {
+ bch2_dev_bucket_missing(ca, bucket.offset);
+ bch2_dev_put(ca);
+ ca = NULL;
+ }
return ca;
}
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index 5537283d0bea..d0209f7658bb 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -6,6 +6,7 @@
#include "errcode.h"
#include "error.h"
#include "fs.h"
+#include "recovery_passes.h"
#include "snapshot.h"
#include "subvolume.h"
@@ -44,8 +45,8 @@ static int check_subvol(struct btree_trans *trans,
ret = bch2_snapshot_lookup(trans, snapid, &snapshot);
if (bch2_err_matches(ret, ENOENT))
- bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
- k.k->p.offset, snapid);
+ return bch2_run_explicit_recovery_pass(c,
+ BCH_RECOVERY_PASS_reconstruct_snapshots) ?: ret;
if (ret)
return ret;
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index e4ab0595c0ae..84a37d971ffd 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -377,6 +377,11 @@ void bch2_fs_read_only(struct bch_fs *c)
bch_verbose(c, "marking filesystem clean");
bch2_fs_mark_clean(c);
} else {
+ /* Make sure error counts/counters are persisted */
+ mutex_lock(&c->sb_lock);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
bch_verbose(c, "done going read-only, filesystem not clean");
}
}
@@ -531,6 +536,10 @@ static void __bch2_fs_free(struct bch_fs *c)
for (unsigned i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
+#ifdef CONFIG_UNICODE
+ utf8_unload(c->cf_encoding);
+#endif
+
bch2_find_btree_nodes_exit(&c->found_btree_nodes);
bch2_free_pending_node_rewrites(c);
bch2_free_fsck_errs(c);
@@ -823,25 +832,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
if (ret)
goto err;
-#ifdef CONFIG_UNICODE
- /* Default encoding until we can potentially have more as an option. */
- c->cf_encoding = utf8_load(BCH_FS_DEFAULT_UTF8_ENCODING);
- if (IS_ERR(c->cf_encoding)) {
- printk(KERN_ERR "Cannot load UTF-8 encoding for filesystem. Version: %u.%u.%u",
- unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
- unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
- unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
- ret = -EINVAL;
- goto err;
- }
-#else
- if (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding)) {
- printk(KERN_ERR "Cannot mount a filesystem with casefolding on a kernel without CONFIG_UNICODE\n");
- ret = -EINVAL;
- goto err;
- }
-#endif
-
pr_uuid(&name, c->sb.user_uuid.b);
ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
if (ret)
@@ -941,6 +931,29 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
if (ret)
goto err;
+#ifdef CONFIG_UNICODE
+ /* Default encoding until we can potentially have more as an option. */
+ c->cf_encoding = utf8_load(BCH_FS_DEFAULT_UTF8_ENCODING);
+ if (IS_ERR(c->cf_encoding)) {
+ printk(KERN_ERR "Cannot load UTF-8 encoding for filesystem. Version: %u.%u.%u",
+ unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
+ unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
+ unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
+ ret = -EINVAL;
+ goto err;
+ }
+ bch_info(c, "Using encoding defined by superblock: utf8-%u.%u.%u",
+ unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
+ unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
+ unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
+#else
+ if (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding)) {
+ printk(KERN_ERR "Cannot mount a filesystem with casefolding on a kernel without CONFIG_UNICODE\n");
+ ret = -EINVAL;
+ goto err;
+ }
+#endif
+
for (i = 0; i < c->sb.nr_devices; i++) {
if (!bch2_member_exists(c->disk_sb.sb, i))
continue;
diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c
index dea73bc1cb51..314a24d15d4e 100644
--- a/fs/bcachefs/thread_with_file.c
+++ b/fs/bcachefs/thread_with_file.c
@@ -455,8 +455,10 @@ ssize_t bch2_stdio_redirect_vprintf(struct stdio_redirect *stdio, bool nonblocki
struct stdio_buf *buf = &stdio->output;
unsigned long flags;
ssize_t ret;
-
again:
+ if (stdio->done)
+ return -EPIPE;
+
spin_lock_irqsave(&buf->lock, flags);
ret = bch2_darray_vprintf(&buf->buf, GFP_NOWAIT, fmt, args);
spin_unlock_irqrestore(&buf->lock, flags);
diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h
index c7916011ef34..67426e33d04e 100644
--- a/fs/bcachefs/xattr_format.h
+++ b/fs/bcachefs/xattr_format.h
@@ -13,7 +13,13 @@ struct bch_xattr {
__u8 x_type;
__u8 x_name_len;
__le16 x_val_len;
- __u8 x_name[] __counted_by(x_name_len);
+ /*
+ * x_name contains the name and value counted by
+ * x_name_len + x_val_len. The introduction of
+ * __counted_by(x_name_len) caused a false positive
+ * detection of an out of bounds write.
+ */
+ __u8 x_name[];
} __packed __aligned(8);
#endif /* _BCACHEFS_XATTR_FORMAT_H */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index e7f8ee5d48a4..7f11ef559be6 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -606,7 +606,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
free_extent_map(em);
cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
- cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
+ cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct folio *), GFP_NOFS);
if (!cb->compressed_folios) {
ret = BLK_STS_RESOURCE;
goto out_free_bio;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 197f5e51c474..13bdd60da3c7 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2047,7 +2047,7 @@ static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
spin_unlock(&folio->mapping->i_private_lock);
- bit_start++;
+ bit_start += sectors_per_node;
continue;
}
@@ -3508,8 +3508,8 @@ static void btree_clear_folio_dirty_tag(struct folio *folio)
ASSERT(folio_test_locked(folio));
xa_lock_irq(&folio->mapping->i_pages);
if (!folio_test_dirty(folio))
- __xa_clear_mark(&folio->mapping->i_pages,
- folio_index(folio), PAGECACHE_TAG_DIRTY);
+ __xa_clear_mark(&folio->mapping->i_pages, folio->index,
+ PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&folio->mapping->i_pages);
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 2e261892c7bc..f5b28b5c4908 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -298,6 +298,8 @@ static inline int __pure num_extent_pages(const struct extent_buffer *eb)
*/
static inline int __pure num_extent_folios(const struct extent_buffer *eb)
{
+ if (!eb->folios[0])
+ return 0;
if (folio_order(eb->folios[0]))
return 1;
return num_extent_pages(eb);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index cc67d1a2d611..bdafe4d4c4a5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2129,12 +2129,13 @@ next_slot:
/*
* If the found extent starts after requested offset, then
- * adjust extent_end to be right before this extent begins
+ * adjust cur_offset to be right before this extent begins.
*/
if (found_key.offset > cur_offset) {
- extent_end = found_key.offset;
- extent_type = 0;
- goto must_cow;
+ if (cow_start == (u64)-1)
+ cow_start = cur_offset;
+ cur_offset = found_key.offset;
+ goto next_slot;
}
/*
@@ -5681,8 +5682,10 @@ struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
return inode;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ iget_failed(&inode->vfs_inode);
return ERR_PTR(-ENOMEM);
+ }
ret = btrfs_read_locked_inode(inode, path);
btrfs_free_path(path);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2c5edcee9450..c3b2e29e3e01 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1541,8 +1541,8 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
u64 extent_gen;
int ret;
- if (unlikely(!extent_root)) {
- btrfs_err(fs_info, "no valid extent root for scrub");
+ if (unlikely(!extent_root || !csum_root)) {
+ btrfs_err(fs_info, "no valid extent or csum root for scrub");
return -EUCLEAN;
}
memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c8c21c55be53..8e6b6fed7429 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -733,82 +733,6 @@ const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
}
-/*
- * We can have very weird soft links passed in.
- * One example is "/proc/self/fd/<fd>", which can be a soft link to
- * a block device.
- *
- * But it's never a good idea to use those weird names.
- * Here we check if the path (not following symlinks) is a good one inside
- * "/dev/".
- */
-static bool is_good_dev_path(const char *dev_path)
-{
- struct path path = { .mnt = NULL, .dentry = NULL };
- char *path_buf = NULL;
- char *resolved_path;
- bool is_good = false;
- int ret;
-
- if (!dev_path)
- goto out;
-
- path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
- if (!path_buf)
- goto out;
-
- /*
- * Do not follow soft link, just check if the original path is inside
- * "/dev/".
- */
- ret = kern_path(dev_path, 0, &path);
- if (ret)
- goto out;
- resolved_path = d_path(&path, path_buf, PATH_MAX);
- if (IS_ERR(resolved_path))
- goto out;
- if (strncmp(resolved_path, "/dev/", strlen("/dev/")))
- goto out;
- is_good = true;
-out:
- kfree(path_buf);
- path_put(&path);
- return is_good;
-}
-
-static int get_canonical_dev_path(const char *dev_path, char *canonical)
-{
- struct path path = { .mnt = NULL, .dentry = NULL };
- char *path_buf = NULL;
- char *resolved_path;
- int ret;
-
- if (!dev_path) {
- ret = -EINVAL;
- goto out;
- }
-
- path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
- if (!path_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = kern_path(dev_path, LOOKUP_FOLLOW, &path);
- if (ret)
- goto out;
- resolved_path = d_path(&path, path_buf, PATH_MAX);
- if (IS_ERR(resolved_path)) {
- ret = PTR_ERR(resolved_path);
- goto out;
- }
- ret = strscpy(canonical, resolved_path, PATH_MAX);
-out:
- kfree(path_buf);
- path_put(&path);
- return ret;
-}
-
static bool is_same_device(struct btrfs_device *device, const char *new_path)
{
struct path old = { .mnt = NULL, .dentry = NULL };
@@ -1513,23 +1437,12 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
bool new_device_added = false;
struct btrfs_device *device = NULL;
struct file *bdev_file;
- char *canonical_path = NULL;
u64 bytenr;
dev_t devt;
int ret;
lockdep_assert_held(&uuid_mutex);
- if (!is_good_dev_path(path)) {
- canonical_path = kmalloc(PATH_MAX, GFP_KERNEL);
- if (canonical_path) {
- ret = get_canonical_dev_path(path, canonical_path);
- if (ret < 0) {
- kfree(canonical_path);
- canonical_path = NULL;
- }
- }
- }
/*
* Avoid an exclusive open here, as the systemd-udev may initiate the
* device scan which may race with the user's mount or mkfs command,
@@ -1574,8 +1487,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
goto free_disk_super;
}
- device = device_list_add(canonical_path ? : path, disk_super,
- &new_device_added);
+ device = device_list_add(path, disk_super, &new_device_added);
if (!IS_ERR(device) && new_device_added)
btrfs_free_stale_devices(device->devt, device);
@@ -1584,7 +1496,6 @@ free_disk_super:
error_bdev_put:
fput(bdev_file);
- kfree(canonical_path);
return device;
}
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
index 4fa0a0121288..60c7cc4c105c 100644
--- a/fs/erofs/fileio.c
+++ b/fs/erofs/fileio.c
@@ -150,10 +150,10 @@ io_retry:
io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9;
attached = 0;
}
- if (!attached++)
- erofs_onlinefolio_split(folio);
if (!bio_add_folio(&io->rq->bio, folio, len, cur))
goto io_retry;
+ if (!attached++)
+ erofs_onlinefolio_split(folio);
io->dev.m_pa += len;
}
cur += len;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index cadec6b1b554..da6ee7c39290 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -357,7 +357,6 @@ static void erofs_default_options(struct erofs_sb_info *sbi)
enum {
Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum,
Opt_device, Opt_fsid, Opt_domain_id, Opt_directio,
- Opt_err
};
static const struct constant_table erofs_param_cache_strategy[] = {
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 5c061aaeeb45..b8e6b76c23d5 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -79,9 +79,6 @@ struct z_erofs_pcluster {
/* L: whether partial decompression or not */
bool partial;
- /* L: indicate several pageofs_outs or not */
- bool multibases;
-
/* L: whether extra buffer allocations are best-effort */
bool besteffort;
@@ -1046,8 +1043,6 @@ static int z_erofs_scan_folio(struct z_erofs_frontend *f,
break;
erofs_onlinefolio_split(folio);
- if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
- f->pcl->multibases = true;
if (f->pcl->length < offset + end - map->m_la) {
f->pcl->length = offset + end - map->m_la;
f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
@@ -1093,7 +1088,6 @@ struct z_erofs_backend {
struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
struct super_block *sb;
struct z_erofs_pcluster *pcl;
-
/* pages with the longest decompressed length for deduplication */
struct page **decompressed_pages;
/* pages to keep the compressed data */
@@ -1102,6 +1096,8 @@ struct z_erofs_backend {
struct list_head decompressed_secondary_bvecs;
struct page **pagepool;
unsigned int onstack_used, nr_pages;
+ /* indicate if temporary copies should be preserved for later use */
+ bool keepxcpy;
};
struct z_erofs_bvec_item {
@@ -1112,18 +1108,20 @@ struct z_erofs_bvec_item {
static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be,
struct z_erofs_bvec *bvec)
{
+ int poff = bvec->offset + be->pcl->pageofs_out;
struct z_erofs_bvec_item *item;
- unsigned int pgnr;
-
- if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
- (bvec->end == PAGE_SIZE ||
- bvec->offset + bvec->end == be->pcl->length)) {
- pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
- DBG_BUGON(pgnr >= be->nr_pages);
- if (!be->decompressed_pages[pgnr]) {
- be->decompressed_pages[pgnr] = bvec->page;
+ struct page **page;
+
+ if (!(poff & ~PAGE_MASK) && (bvec->end == PAGE_SIZE ||
+ bvec->offset + bvec->end == be->pcl->length)) {
+ DBG_BUGON((poff >> PAGE_SHIFT) >= be->nr_pages);
+ page = be->decompressed_pages + (poff >> PAGE_SHIFT);
+ if (!*page) {
+ *page = bvec->page;
return;
}
+ } else {
+ be->keepxcpy = true;
}
/* (cold path) one pcluster is requested multiple times */
@@ -1289,7 +1287,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
.alg = pcl->algorithmformat,
.inplace_io = overlapped,
.partial_decoding = pcl->partial,
- .fillgaps = pcl->multibases,
+ .fillgaps = be->keepxcpy,
.gfp = pcl->besteffort ? GFP_KERNEL :
GFP_NOWAIT | __GFP_NORETRY
}, be->pagepool);
@@ -1346,7 +1344,6 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
pcl->length = 0;
pcl->partial = true;
- pcl->multibases = false;
pcl->besteffort = false;
pcl->bvset.nextpage = NULL;
pcl->vcnt = 0;
diff --git a/fs/namespace.c b/fs/namespace.c
index 98a5cd756e9a..1b466c54a357 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -787,15 +787,11 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
- smp_mb(); // see mntput_no_expire()
+ smp_mb(); // see mntput_no_expire() and do_umount()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
- if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
- mnt_add_count(mnt, -1);
- return 1;
- }
lock_mount_hash();
- if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+ if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) {
mnt_add_count(mnt, -1);
unlock_mount_hash();
return 1;
@@ -2048,6 +2044,7 @@ static int do_umount(struct mount *mnt, int flags)
umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
+ smp_mb(); // paired with __legitimize_mnt()
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
@@ -3560,7 +3557,8 @@ static int can_move_mount_beneath(const struct path *from,
* @mnt_from itself. This defeats the whole purpose of mounting
* @mnt_from beneath @mnt_to.
*/
- if (propagation_would_overmount(parent_mnt_to, mnt_from, mp))
+ if (check_mnt(mnt_from) &&
+ propagation_would_overmount(parent_mnt_to, mnt_from, mp))
return -EINVAL;
return 0;
@@ -3718,15 +3716,14 @@ static int do_move_mount(struct path *old_path,
if (err)
goto out;
- if (is_anon_ns(ns))
- ns->mntns_flags &= ~MNTNS_PROPAGATING;
-
/* if the mount is moved, it should no longer be expire
* automatically */
list_del_init(&old->mnt_expire);
if (attached)
put_mountpoint(old_mp);
out:
+ if (is_anon_ns(ns))
+ ns->mntns_flags &= ~MNTNS_PROPAGATING;
unlock_mount(mp);
if (!err) {
if (attached) {
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index cb01ea81724d..d0bcf744c553 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -705,8 +705,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
int blocksize;
int err;
- down_write(&nilfs->ns_sem);
-
blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
if (!blocksize) {
nilfs_err(sb, "unable to set blocksize");
@@ -779,7 +777,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
set_nilfs_init(nilfs);
err = 0;
out:
- up_write(&nilfs->ns_sem);
return err;
failed_sbh:
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index f2d840ae4ded..87f861e9004f 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1961,12 +1961,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
return -EINVAL;
if (mark_cmd == FAN_MARK_FLUSH) {
- if (mark_type == FAN_MARK_MOUNT)
- fsnotify_clear_vfsmount_marks_by_group(group);
- else if (mark_type == FAN_MARK_FILESYSTEM)
- fsnotify_clear_sb_marks_by_group(group);
- else
- fsnotify_clear_inode_marks_by_group(group);
+ fsnotify_clear_marks_by_group(group, obj_type);
return 0;
}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index b8ac85b548c7..821cb7874685 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6918,6 +6918,7 @@ static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end,
if (IS_ERR(folios[numfolios])) {
ret = PTR_ERR(folios[numfolios]);
mlog_errno(ret);
+ folios[numfolios] = NULL;
goto out;
}
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index c7a9729dc9d0..e5f58ff2175f 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -174,7 +174,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb)
struct ocfs2_recovery_map *rm;
mutex_init(&osb->recovery_lock);
- osb->disable_recovery = 0;
+ osb->recovery_state = OCFS2_REC_ENABLED;
osb->recovery_thread_task = NULL;
init_waitqueue_head(&osb->recovery_event);
@@ -190,31 +190,53 @@ int ocfs2_recovery_init(struct ocfs2_super *osb)
return 0;
}
-/* we can't grab the goofy sem lock from inside wait_event, so we use
- * memory barriers to make sure that we'll see the null task before
- * being woken up */
static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
{
- mb();
return osb->recovery_thread_task != NULL;
}
-void ocfs2_recovery_exit(struct ocfs2_super *osb)
+static void ocfs2_recovery_disable(struct ocfs2_super *osb,
+ enum ocfs2_recovery_state state)
{
- struct ocfs2_recovery_map *rm;
-
- /* disable any new recovery threads and wait for any currently
- * running ones to exit. Do this before setting the vol_state. */
mutex_lock(&osb->recovery_lock);
- osb->disable_recovery = 1;
+ /*
+ * If recovery thread is not running, we can directly transition to
+ * final state.
+ */
+ if (!ocfs2_recovery_thread_running(osb)) {
+ osb->recovery_state = state + 1;
+ goto out_lock;
+ }
+ osb->recovery_state = state;
+ /* Wait for recovery thread to acknowledge state transition */
+ wait_event_cmd(osb->recovery_event,
+ !ocfs2_recovery_thread_running(osb) ||
+ osb->recovery_state >= state + 1,
+ mutex_unlock(&osb->recovery_lock),
+ mutex_lock(&osb->recovery_lock));
+out_lock:
mutex_unlock(&osb->recovery_lock);
- wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
- /* At this point, we know that no more recovery threads can be
- * launched, so wait for any recovery completion work to
- * complete. */
+ /*
+ * At this point we know that no more recovery work can be queued so
+ * wait for any recovery completion work to complete.
+ */
if (osb->ocfs2_wq)
flush_workqueue(osb->ocfs2_wq);
+}
+
+void ocfs2_recovery_disable_quota(struct ocfs2_super *osb)
+{
+ ocfs2_recovery_disable(osb, OCFS2_REC_QUOTA_WANT_DISABLE);
+}
+
+void ocfs2_recovery_exit(struct ocfs2_super *osb)
+{
+ struct ocfs2_recovery_map *rm;
+
+ /* disable any new recovery threads and wait for any currently
+ * running ones to exit. Do this before setting the vol_state. */
+ ocfs2_recovery_disable(osb, OCFS2_REC_WANT_DISABLE);
/*
* Now that recovery is shut down, and the osb is about to be
@@ -1472,6 +1494,18 @@ static int __ocfs2_recovery_thread(void *arg)
}
}
restart:
+ if (quota_enabled) {
+ mutex_lock(&osb->recovery_lock);
+ /* Confirm that recovery thread will no longer recover quotas */
+ if (osb->recovery_state == OCFS2_REC_QUOTA_WANT_DISABLE) {
+ osb->recovery_state = OCFS2_REC_QUOTA_DISABLED;
+ wake_up(&osb->recovery_event);
+ }
+ if (osb->recovery_state >= OCFS2_REC_QUOTA_DISABLED)
+ quota_enabled = 0;
+ mutex_unlock(&osb->recovery_lock);
+ }
+
status = ocfs2_super_lock(osb, 1);
if (status < 0) {
mlog_errno(status);
@@ -1569,27 +1603,29 @@ bail:
ocfs2_free_replay_slots(osb);
osb->recovery_thread_task = NULL;
- mb(); /* sync with ocfs2_recovery_thread_running */
+ if (osb->recovery_state == OCFS2_REC_WANT_DISABLE)
+ osb->recovery_state = OCFS2_REC_DISABLED;
wake_up(&osb->recovery_event);
mutex_unlock(&osb->recovery_lock);
- if (quota_enabled)
- kfree(rm_quota);
+ kfree(rm_quota);
return status;
}
void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
{
+ int was_set = -1;
+
mutex_lock(&osb->recovery_lock);
+ if (osb->recovery_state < OCFS2_REC_WANT_DISABLE)
+ was_set = ocfs2_recovery_map_set(osb, node_num);
trace_ocfs2_recovery_thread(node_num, osb->node_num,
- osb->disable_recovery, osb->recovery_thread_task,
- osb->disable_recovery ?
- -1 : ocfs2_recovery_map_set(osb, node_num));
+ osb->recovery_state, osb->recovery_thread_task, was_set);
- if (osb->disable_recovery)
+ if (osb->recovery_state >= OCFS2_REC_WANT_DISABLE)
goto out;
if (osb->recovery_thread_task)
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index e3c3a35dc5e0..6397170f302f 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -148,6 +148,7 @@ void ocfs2_wait_for_recovery(struct ocfs2_super *osb);
int ocfs2_recovery_init(struct ocfs2_super *osb);
void ocfs2_recovery_exit(struct ocfs2_super *osb);
+void ocfs2_recovery_disable_quota(struct ocfs2_super *osb);
int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
void ocfs2_free_replay_slots(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 51c52768132d..6aaa94c554c1 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -308,6 +308,21 @@ enum ocfs2_journal_trigger_type {
void ocfs2_initialize_journal_triggers(struct super_block *sb,
struct ocfs2_triggers triggers[]);
+enum ocfs2_recovery_state {
+ OCFS2_REC_ENABLED = 0,
+ OCFS2_REC_QUOTA_WANT_DISABLE,
+ /*
+ * Must be OCFS2_REC_QUOTA_WANT_DISABLE + 1 for
+ * ocfs2_recovery_disable_quota() to work.
+ */
+ OCFS2_REC_QUOTA_DISABLED,
+ OCFS2_REC_WANT_DISABLE,
+ /*
+ * Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work
+ */
+ OCFS2_REC_DISABLED,
+};
+
struct ocfs2_journal;
struct ocfs2_slot_info;
struct ocfs2_recovery_map;
@@ -370,7 +385,7 @@ struct ocfs2_super
struct ocfs2_recovery_map *recovery_map;
struct ocfs2_replay_map *replay_map;
struct task_struct *recovery_thread_task;
- int disable_recovery;
+ enum ocfs2_recovery_state recovery_state;
wait_queue_head_t checkpoint_event;
struct ocfs2_journal *journal;
unsigned long osb_commit_interval;
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 2956d888c131..e272429da3db 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -453,8 +453,7 @@ out:
/* Sync changes in local quota file into global quota file and
* reinitialize local quota file.
- * The function expects local quota file to be already locked and
- * s_umount locked in shared mode. */
+ * The function expects local quota file to be already locked. */
static int ocfs2_recover_local_quota_file(struct inode *lqinode,
int type,
struct ocfs2_quota_recovery *rec)
@@ -588,7 +587,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
{
unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
LOCAL_GROUP_QUOTA_SYSTEM_INODE };
- struct super_block *sb = osb->sb;
struct ocfs2_local_disk_dqinfo *ldinfo;
struct buffer_head *bh;
handle_t *handle;
@@ -600,7 +598,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
"slot %u\n", osb->dev_str, slot_num);
- down_read(&sb->s_umount);
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
continue;
@@ -677,7 +674,6 @@ out_put:
break;
}
out:
- up_read(&sb->s_umount);
kfree(rec);
return status;
}
@@ -843,8 +839,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
/*
- * s_umount held in exclusive mode protects us against racing with
- * recovery thread...
+ * ocfs2_dismount_volume() has already aborted quota recovery...
*/
if (oinfo->dqi_rec) {
ocfs2_free_quota_recovery(oinfo->dqi_rec);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index f7b483f0de2a..6ac4dcd54588 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -698,10 +698,12 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
ac, cl);
- if (PTR_ERR(bg_bh) == -ENOSPC)
+ if (PTR_ERR(bg_bh) == -ENOSPC) {
+ ac->ac_which = OCFS2_AC_USE_MAIN_DISCONTIG;
bg_bh = ocfs2_block_group_alloc_discontig(handle,
alloc_inode,
ac, cl);
+ }
if (IS_ERR(bg_bh)) {
status = PTR_ERR(bg_bh);
bg_bh = NULL;
@@ -1794,6 +1796,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
{
int status;
u16 chain;
+ u32 contig_bits;
u64 next_group;
struct inode *alloc_inode = ac->ac_inode;
struct buffer_head *group_bh = NULL;
@@ -1819,10 +1822,21 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
status = -ENOSPC;
/* for now, the chain search is a bit simplistic. We just use
* the 1st group with any empty bits. */
- while ((status = ac->ac_group_search(alloc_inode, group_bh,
- bits_wanted, min_bits,
- ac->ac_max_block,
- res)) == -ENOSPC) {
+ while (1) {
+ if (ac->ac_which == OCFS2_AC_USE_MAIN_DISCONTIG) {
+ contig_bits = le16_to_cpu(bg->bg_contig_free_bits);
+ if (!contig_bits)
+ contig_bits = ocfs2_find_max_contig_free_bits(bg->bg_bitmap,
+ le16_to_cpu(bg->bg_bits), 0);
+ if (bits_wanted > contig_bits && contig_bits >= min_bits)
+ bits_wanted = contig_bits;
+ }
+
+ status = ac->ac_group_search(alloc_inode, group_bh,
+ bits_wanted, min_bits,
+ ac->ac_max_block, res);
+ if (status != -ENOSPC)
+ break;
if (!bg->bg_next_group)
break;
@@ -1982,6 +1996,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
victim = ocfs2_find_victim_chain(cl);
ac->ac_chain = victim;
+search:
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
if (!status) {
@@ -2022,6 +2037,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
}
}
+ /* Chains can't supply the bits_wanted contiguous space.
+ * We should switch to using every single bit when allocating
+ * from the global bitmap. */
+ if (i == le16_to_cpu(cl->cl_next_free_rec) &&
+ status == -ENOSPC && ac->ac_which == OCFS2_AC_USE_MAIN) {
+ ac->ac_which = OCFS2_AC_USE_MAIN_DISCONTIG;
+ ac->ac_chain = victim;
+ goto search;
+ }
+
set_hint:
if (status != -ENOSPC) {
/* If the next search of this group is not likely to
@@ -2365,7 +2390,8 @@ int __ocfs2_claim_clusters(handle_t *handle,
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
- && ac->ac_which != OCFS2_AC_USE_MAIN);
+ && ac->ac_which != OCFS2_AC_USE_MAIN
+ && ac->ac_which != OCFS2_AC_USE_MAIN_DISCONTIG);
if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
WARN_ON(min_clusters > 1);
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index b481b834857d..bcf2ed4a8631 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -29,6 +29,7 @@ struct ocfs2_alloc_context {
#define OCFS2_AC_USE_MAIN 2
#define OCFS2_AC_USE_INODE 3
#define OCFS2_AC_USE_META 4
+#define OCFS2_AC_USE_MAIN_DISCONTIG 5
u32 ac_which;
/* these are used by the chain search */
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8bb5022f3082..3d2533950bae 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1812,6 +1812,9 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
/* Orphan scan should be stopped as early as possible */
ocfs2_orphan_scan_stop(osb);
+ /* Stop quota recovery so that we can disable quotas */
+ ocfs2_recovery_disable_quota(osb);
+
ocfs2_disable_quotas(osb);
/* All dquots should be freed by now */
diff --git a/fs/pnode.c b/fs/pnode.c
index 7a062a5de10e..fb77427df39e 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -150,7 +150,7 @@ static struct mount *propagation_next(struct mount *m,
struct mount *origin)
{
/* are there any slaves of this mount? */
- if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list))
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
return first_slave(m);
while (1) {
@@ -174,7 +174,7 @@ static struct mount *skip_propagation_subtree(struct mount *m,
* Advance m such that propagation_next will not return
* the slaves of m.
*/
- if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list))
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
m = last_slave(m);
return m;
@@ -185,7 +185,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
while (1) {
while (1) {
struct mount *next;
- if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list))
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
return first_slave(m);
next = next_peer(m);
if (m->mnt_group_id == origin->mnt_group_id) {
@@ -226,11 +226,15 @@ static int propagate_one(struct mount *m, struct mountpoint *dest_mp)
struct mount *child;
int type;
/* skip ones added by this propagate_mnt() */
- if (IS_MNT_PROPAGATED(m))
+ if (IS_MNT_NEW(m))
return 0;
- /* skip if mountpoint isn't covered by it */
+ /* skip if mountpoint isn't visible in m */
if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root))
return 0;
+ /* skip if m is in the anon_ns we are emptying */
+ if (m->mnt_ns->mntns_flags & MNTNS_PROPAGATING)
+ return 0;
+
if (peers(m, last_dest)) {
type = CL_MAKE_SHARED;
} else {
@@ -380,9 +384,6 @@ bool propagation_would_overmount(const struct mount *from,
if (!IS_MNT_SHARED(from))
return false;
- if (IS_MNT_PROPAGATED(to))
- return false;
-
if (to->mnt.mnt_root != mp->m_dentry)
return false;
diff --git a/fs/pnode.h b/fs/pnode.h
index ddafe0d087ca..34b6247af01d 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -12,7 +12,7 @@
#define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
#define IS_MNT_SLAVE(m) ((m)->mnt_master)
-#define IS_MNT_PROPAGATED(m) (!(m)->mnt_ns || ((m)->mnt_ns->mntns_flags & MNTNS_PROPAGATING))
+#define IS_MNT_NEW(m) (!(m)->mnt_ns)
#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index fe738623cf1b..240d82c6f908 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -29,7 +29,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
{
struct cached_fid *cfid;
- spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) {
if (!strcmp(cfid->path, path)) {
/*
@@ -38,25 +37,20 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
* being deleted due to a lease break.
*/
if (!cfid->time || !cfid->has_lease) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
kref_get(&cfid->refcount);
- spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
}
if (lookup_only) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
if (cfids->num_entries >= max_cached_dirs) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid = init_cached_dir(path);
if (cfid == NULL) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid->cfids = cfids;
@@ -74,7 +68,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
*/
cfid->has_lease = true;
- spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
@@ -187,8 +180,10 @@ replay_again:
if (!utf16_path)
return -ENOMEM;
+ spin_lock(&cfids->cfid_list_lock);
cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
if (cfid == NULL) {
+ spin_unlock(&cfids->cfid_list_lock);
kfree(utf16_path);
return -ENOENT;
}
@@ -197,7 +192,6 @@ replay_again:
* Otherwise, it is either a new entry or laundromat worker removed it
* from @cfids->entries. Caller will put last reference if the latter.
*/
- spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease && cfid->time) {
spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index 18d67ab113f0..1b79fe07476f 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -1266,10 +1266,9 @@ typedef struct smb_com_query_information_rsp {
typedef struct smb_com_setattr_req {
struct smb_hdr hdr; /* wct = 8 */
__le16 attr;
- __le16 time_low;
- __le16 time_high;
+ __le32 last_write_time;
__le16 reserved[5]; /* must be zero */
- __u16 ByteCount;
+ __le16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII */
unsigned char fileName[];
} __attribute__((packed)) SETATTR_REQ;
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 59f6fdfe560e..ecf774a8f1ca 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -395,6 +395,10 @@ extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon);
extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
+extern int SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fileName, __le32 attributes, __le64 write_time,
+ const struct nls_table *nls_codepage,
+ struct cifs_sb_info *cifs_sb);
extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage,
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 60cb264a01e5..f55457b4b82e 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -5171,6 +5171,63 @@ CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
+int
+SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fileName, __le32 attributes, __le64 write_time,
+ const struct nls_table *nls_codepage,
+ struct cifs_sb_info *cifs_sb)
+{
+ SETATTR_REQ *pSMB;
+ SETATTR_RSP *pSMBr;
+ struct timespec64 ts;
+ int bytes_returned;
+ int name_len;
+ int rc;
+
+ cifs_dbg(FYI, "In %s path %s\n", __func__, fileName);
+
+retry:
+ rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+ name_len =
+ cifsConvertToUTF16((__le16 *) pSMB->fileName,
+ fileName, PATH_MAX, nls_codepage,
+ cifs_remap(cifs_sb));
+ name_len++; /* trailing null */
+ name_len *= 2;
+ } else {
+ name_len = copy_path_name(pSMB->fileName, fileName);
+ }
+ /* Only few attributes can be set by this command, others are not accepted by Win9x. */
+ pSMB->attr = cpu_to_le16(le32_to_cpu(attributes) &
+ (ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE));
+ /* Zero write time value (in both NT and SETATTR formats) means to not change it. */
+ if (le64_to_cpu(write_time) != 0) {
+ ts = cifs_NTtimeToUnix(write_time);
+ pSMB->last_write_time = cpu_to_le32(ts.tv_sec);
+ }
+ pSMB->BufferFormat = 0x04;
+ name_len++; /* account for buffer type byte */
+ inc_rfc1001_len(pSMB, (__u16)name_len);
+ pSMB->ByteCount = cpu_to_le16(name_len);
+
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc)
+ cifs_dbg(FYI, "Send error in %s = %d\n", __func__, rc);
+
+ cifs_buf_release(pSMB);
+
+ if (rc == -EAGAIN)
+ goto retry;
+
+ return rc;
+}
+
/* Some legacy servers such as NT4 require that the file times be set on
an open handle, rather than by pathname - this is awkward due to
potential access conflicts on the open, but it is unavoidable for these
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index df976ce6aed9..6bf04d9a5491 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -3753,28 +3753,7 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
}
}
- /*
- * Clamp the rsize/wsize mount arguments if they are too big for the server
- * and set the rsize/wsize to the negotiated values if not passed in by
- * the user on mount
- */
- if ((cifs_sb->ctx->wsize == 0) ||
- (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) {
- cifs_sb->ctx->wsize =
- round_down(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE);
- /*
- * in the very unlikely event that the server sent a max write size under PAGE_SIZE,
- * (which would get rounded down to 0) then reset wsize to absolute minimum eg 4096
- */
- if (cifs_sb->ctx->wsize == 0) {
- cifs_sb->ctx->wsize = PAGE_SIZE;
- cifs_dbg(VFS, "wsize too small, reset to minimum ie PAGE_SIZE, usually 4096\n");
- }
- }
- if ((cifs_sb->ctx->rsize == 0) ||
- (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
- cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
-
+ cifs_negotiate_iosize(server, cifs_sb->ctx, tcon);
/*
* The cookie is initialized from volume info returned above.
* Inside cifs_fscache_get_super_cookie it checks
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 9e8f404b9e56..851b74f557c1 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -160,10 +160,8 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
rdata->server = server;
- if (cifs_sb->ctx->rsize == 0)
- cifs_sb->ctx->rsize =
- server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
- cifs_sb->ctx);
+ cifs_negotiate_rsize(server, cifs_sb->ctx,
+ tlink_tcon(req->cfile->tlink));
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&size, &rdata->credits);
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 2980941b9667..a634a34d4086 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1021,6 +1021,7 @@ static int smb3_reconfigure(struct fs_context *fc)
struct dentry *root = fc->root;
struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses;
+ unsigned int rsize = ctx->rsize, wsize = ctx->wsize;
char *new_password = NULL, *new_password2 = NULL;
bool need_recon = false;
int rc;
@@ -1103,11 +1104,8 @@ static int smb3_reconfigure(struct fs_context *fc)
STEAL_STRING(cifs_sb, ctx, iocharset);
/* if rsize or wsize not passed in on remount, use previous values */
- if (ctx->rsize == 0)
- ctx->rsize = cifs_sb->ctx->rsize;
- if (ctx->wsize == 0)
- ctx->wsize = cifs_sb->ctx->wsize;
-
+ ctx->rsize = rsize ? CIFS_ALIGN_RSIZE(fc, rsize) : cifs_sb->ctx->rsize;
+ ctx->wsize = wsize ? CIFS_ALIGN_WSIZE(fc, wsize) : cifs_sb->ctx->wsize;
smb3_cleanup_fs_context_contents(cifs_sb->ctx);
rc = smb3_fs_context_dup(cifs_sb->ctx, ctx);
@@ -1312,7 +1310,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
__func__);
goto cifs_parse_mount_err;
}
- ctx->bsize = result.uint_32;
+ ctx->bsize = CIFS_ALIGN_BSIZE(fc, result.uint_32);
ctx->got_bsize = true;
break;
case Opt_rasize:
@@ -1336,24 +1334,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->rasize = result.uint_32;
break;
case Opt_rsize:
- ctx->rsize = result.uint_32;
+ ctx->rsize = CIFS_ALIGN_RSIZE(fc, result.uint_32);
ctx->got_rsize = true;
ctx->vol_rsize = ctx->rsize;
break;
case Opt_wsize:
- ctx->wsize = result.uint_32;
+ ctx->wsize = CIFS_ALIGN_WSIZE(fc, result.uint_32);
ctx->got_wsize = true;
- if (ctx->wsize % PAGE_SIZE != 0) {
- ctx->wsize = round_down(ctx->wsize, PAGE_SIZE);
- if (ctx->wsize == 0) {
- ctx->wsize = PAGE_SIZE;
- cifs_dbg(VFS, "wsize too small, reset to minimum %ld\n", PAGE_SIZE);
- } else {
- cifs_dbg(VFS,
- "wsize rounded down to %d to multiple of PAGE_SIZE %ld\n",
- ctx->wsize, PAGE_SIZE);
- }
- }
ctx->vol_wsize = ctx->wsize;
break;
case Opt_acregmax:
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index d1d29249bcdb..9e83302ce4b8 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -20,6 +20,21 @@
cifs_dbg(VFS, fmt, ## __VA_ARGS__); \
} while (0)
+static inline size_t cifs_io_align(struct fs_context *fc,
+ const char *name, size_t size)
+{
+ if (!size || !IS_ALIGNED(size, PAGE_SIZE)) {
+ cifs_errorf(fc, "unaligned %s, making it a multiple of %lu bytes\n",
+ name, PAGE_SIZE);
+ size = umax(round_down(size, PAGE_SIZE), PAGE_SIZE);
+ }
+ return size;
+}
+
+#define CIFS_ALIGN_WSIZE(_fc, _size) cifs_io_align(_fc, "wsize", _size)
+#define CIFS_ALIGN_RSIZE(_fc, _size) cifs_io_align(_fc, "rsize", _size)
+#define CIFS_ALIGN_BSIZE(_fc, _size) cifs_io_align(_fc, "bsize", _size)
+
enum smb_version {
Smb_1 = 1,
Smb_20,
@@ -361,4 +376,36 @@ static inline void cifs_mount_unlock(void)
mutex_unlock(&cifs_mount_mutex);
}
+static inline void cifs_negotiate_rsize(struct TCP_Server_Info *server,
+ struct smb3_fs_context *ctx,
+ struct cifs_tcon *tcon)
+{
+ unsigned int size;
+
+ size = umax(server->ops->negotiate_rsize(tcon, ctx), PAGE_SIZE);
+ if (ctx->rsize)
+ size = umax(umin(ctx->rsize, size), PAGE_SIZE);
+ ctx->rsize = round_down(size, PAGE_SIZE);
+}
+
+static inline void cifs_negotiate_wsize(struct TCP_Server_Info *server,
+ struct smb3_fs_context *ctx,
+ struct cifs_tcon *tcon)
+{
+ unsigned int size;
+
+ size = umax(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE);
+ if (ctx->wsize)
+ size = umax(umin(ctx->wsize, size), PAGE_SIZE);
+ ctx->wsize = round_down(size, PAGE_SIZE);
+}
+
+static inline void cifs_negotiate_iosize(struct TCP_Server_Info *server,
+ struct smb3_fs_context *ctx,
+ struct cifs_tcon *tcon)
+{
+ cifs_negotiate_rsize(server, ctx, tcon);
+ cifs_negotiate_wsize(server, ctx, tcon);
+}
+
#endif
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index 0adeec652dc1..b27a182629ec 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -432,7 +432,7 @@ cifs_negotiate(const unsigned int xid,
}
static unsigned int
-cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+smb1_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -467,7 +467,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
}
static unsigned int
-cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+smb1_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -543,24 +543,104 @@ static int cifs_query_path_info(const unsigned int xid,
const char *full_path,
struct cifs_open_info_data *data)
{
- int rc;
+ int rc = -EOPNOTSUPP;
FILE_ALL_INFO fi = {};
+ struct cifs_search_info search_info = {};
+ bool non_unicode_wildcard = false;
data->reparse_point = false;
data->adjust_tz = false;
- /* could do find first instead but this returns more info */
- rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls,
- cifs_remap(cifs_sb));
/*
- * BB optimize code so we do not make the above call when server claims
- * no NT SMB support and the above call failed at least once - set flag
- * in tcon or mount.
+ * First try CIFSSMBQPathInfo() function which returns more info
+ * (NumberOfLinks) than CIFSFindFirst() fallback function.
+ * Some servers like Win9x do not support SMB_QUERY_FILE_ALL_INFO over
+ * TRANS2_QUERY_PATH_INFORMATION, but supports it with filehandle over
+ * TRANS2_QUERY_FILE_INFORMATION (function CIFSSMBQFileInfo(). But SMB
+ * Open command on non-NT servers works only for files, does not work
+ * for directories. And moreover Win9x SMB server returns bogus data in
+ * SMB_QUERY_FILE_ALL_INFO Attributes field. So for non-NT servers,
+ * do not even use CIFSSMBQPathInfo() or CIFSSMBQFileInfo() function.
*/
- if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
+ if (tcon->ses->capabilities & CAP_NT_SMBS)
+ rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */,
+ cifs_sb->local_nls, cifs_remap(cifs_sb));
+
+ /*
+ * Non-UNICODE variant of fallback functions below expands wildcards,
+ * so they cannot be used for querying paths with wildcard characters.
+ */
+ if (rc && !(tcon->ses->capabilities & CAP_UNICODE) && strpbrk(full_path, "*?\"><"))
+ non_unicode_wildcard = true;
+
+ /*
+ * Then fallback to CIFSFindFirst() which works also with non-NT servers
+ * but does not does not provide NumberOfLinks.
+ */
+ if ((rc == -EOPNOTSUPP || rc == -EINVAL) &&
+ !non_unicode_wildcard) {
+ if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find))
+ search_info.info_level = SMB_FIND_FILE_INFO_STANDARD;
+ else
+ search_info.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
+ rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL,
+ CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END,
+ &search_info, false);
+ if (rc == 0) {
+ if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) {
+ FIND_FILE_STANDARD_INFO *di;
+ int offset = tcon->ses->server->timeAdj;
+
+ di = (FIND_FILE_STANDARD_INFO *)search_info.srch_entries_start;
+ fi.CreationTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
+ di->CreationDate, di->CreationTime, offset)));
+ fi.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
+ di->LastAccessDate, di->LastAccessTime, offset)));
+ fi.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
+ di->LastWriteDate, di->LastWriteTime, offset)));
+ fi.ChangeTime = fi.LastWriteTime;
+ fi.Attributes = cpu_to_le32(le16_to_cpu(di->Attributes));
+ fi.AllocationSize = cpu_to_le64(le32_to_cpu(di->AllocationSize));
+ fi.EndOfFile = cpu_to_le64(le32_to_cpu(di->DataSize));
+ } else {
+ FILE_FULL_DIRECTORY_INFO *di;
+
+ di = (FILE_FULL_DIRECTORY_INFO *)search_info.srch_entries_start;
+ fi.CreationTime = di->CreationTime;
+ fi.LastAccessTime = di->LastAccessTime;
+ fi.LastWriteTime = di->LastWriteTime;
+ fi.ChangeTime = di->ChangeTime;
+ fi.Attributes = di->ExtFileAttributes;
+ fi.AllocationSize = di->AllocationSize;
+ fi.EndOfFile = di->EndOfFile;
+ fi.EASize = di->EaSize;
+ }
+ fi.NumberOfLinks = cpu_to_le32(1);
+ fi.DeletePending = 0;
+ fi.Directory = !!(le32_to_cpu(fi.Attributes) & ATTR_DIRECTORY);
+ cifs_buf_release(search_info.ntwrk_buf_start);
+ } else if (!full_path[0]) {
+ /*
+ * CIFSFindFirst() does not work on root path if the
+ * root path was exported on the server from the top
+ * level path (drive letter).
+ */
+ rc = -EOPNOTSUPP;
+ }
+ }
+
+ /*
+ * If everything failed then fallback to the legacy SMB command
+ * SMB_COM_QUERY_INFORMATION which works with all servers, but
+ * provide just few information.
+ */
+ if ((rc == -EOPNOTSUPP || rc == -EINVAL) && !non_unicode_wildcard) {
rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
cifs_remap(cifs_sb));
data->adjust_tz = true;
+ } else if ((rc == -EOPNOTSUPP || rc == -EINVAL) && non_unicode_wildcard) {
+ /* Path with non-UNICODE wildcard character cannot exist. */
+ rc = -ENOENT;
}
if (!rc) {
@@ -639,6 +719,13 @@ static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
FILE_ALL_INFO fi = {};
+ /*
+ * CIFSSMBQFileInfo() for non-NT servers returns bogus data in
+ * Attributes fields. So do not use this command for non-NT servers.
+ */
+ if (!(tcon->ses->capabilities & CAP_NT_SMBS))
+ return -EOPNOTSUPP;
+
if (cfile->symlink_target) {
data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
if (!data->symlink_target)
@@ -809,6 +896,9 @@ smb_set_file_info(struct inode *inode, const char *full_path,
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifsFileInfo *open_file;
+ FILE_BASIC_INFO new_buf;
+ struct cifs_open_info_data query_data;
+ __le64 write_time = buf->LastWriteTime;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
@@ -816,20 +906,58 @@ smb_set_file_info(struct inode *inode, const char *full_path,
/* if the file is already open for write, just use that fileid */
open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+
if (open_file) {
fid.netfid = open_file->fid.netfid;
netpid = open_file->pid;
tcon = tlink_tcon(open_file->tlink);
- goto set_via_filehandle;
+ } else {
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ tlink = NULL;
+ goto out;
+ }
+ tcon = tlink_tcon(tlink);
}
- tlink = cifs_sb_tlink(cifs_sb);
- if (IS_ERR(tlink)) {
- rc = PTR_ERR(tlink);
- tlink = NULL;
- goto out;
+ /*
+ * Non-NT servers interprets zero time value in SMB_SET_FILE_BASIC_INFO
+ * over TRANS2_SET_FILE_INFORMATION as a valid time value. NT servers
+ * interprets zero time value as do not change existing value on server.
+ * API of ->set_file_info() callback expects that zero time value has
+ * the NT meaning - do not change. Therefore if server is non-NT and
+ * some time values in "buf" are zero, then fetch missing time values.
+ */
+ if (!(tcon->ses->capabilities & CAP_NT_SMBS) &&
+ (!buf->CreationTime || !buf->LastAccessTime ||
+ !buf->LastWriteTime || !buf->ChangeTime)) {
+ rc = cifs_query_path_info(xid, tcon, cifs_sb, full_path, &query_data);
+ if (rc) {
+ if (open_file) {
+ cifsFileInfo_put(open_file);
+ open_file = NULL;
+ }
+ goto out;
+ }
+ /*
+ * Original write_time from buf->LastWriteTime is preserved
+ * as SMBSetInformation() interprets zero as do not change.
+ */
+ new_buf = *buf;
+ buf = &new_buf;
+ if (!buf->CreationTime)
+ buf->CreationTime = query_data.fi.CreationTime;
+ if (!buf->LastAccessTime)
+ buf->LastAccessTime = query_data.fi.LastAccessTime;
+ if (!buf->LastWriteTime)
+ buf->LastWriteTime = query_data.fi.LastWriteTime;
+ if (!buf->ChangeTime)
+ buf->ChangeTime = query_data.fi.ChangeTime;
}
- tcon = tlink_tcon(tlink);
+
+ if (open_file)
+ goto set_via_filehandle;
rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls,
cifs_sb);
@@ -850,8 +978,45 @@ smb_set_file_info(struct inode *inode, const char *full_path,
.fid = &fid,
};
- cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
- rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ if (S_ISDIR(inode->i_mode) && !(tcon->ses->capabilities & CAP_NT_SMBS)) {
+ /* Opening directory path is not possible on non-NT servers. */
+ rc = -EOPNOTSUPP;
+ } else {
+ /*
+ * Use cifs_open_file() instead of CIFS_open() as the
+ * cifs_open_file() selects the correct function which
+ * works also on non-NT servers.
+ */
+ rc = cifs_open_file(xid, &oparms, &oplock, NULL);
+ /*
+ * Opening path for writing on non-NT servers is not
+ * possible when the read-only attribute is already set.
+ * Non-NT server in this case returns -EACCES. For those
+ * servers the only possible way how to clear the read-only
+ * bit is via SMB_COM_SETATTR command.
+ */
+ if (rc == -EACCES &&
+ (cinode->cifsAttrs & ATTR_READONLY) &&
+ le32_to_cpu(buf->Attributes) != 0 && /* 0 = do not change attrs */
+ !(le32_to_cpu(buf->Attributes) & ATTR_READONLY) &&
+ !(tcon->ses->capabilities & CAP_NT_SMBS))
+ rc = -EOPNOTSUPP;
+ }
+
+ /* Fallback to SMB_COM_SETATTR command when absolutelty needed. */
+ if (rc == -EOPNOTSUPP) {
+ cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n");
+ rc = SMBSetInformation(xid, tcon, full_path,
+ buf->Attributes != 0 ? buf->Attributes : cpu_to_le32(cinode->cifsAttrs),
+ write_time,
+ cifs_sb->local_nls, cifs_sb);
+ if (rc == 0)
+ cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
+ else
+ rc = -EACCES;
+ goto out;
+ }
+
if (rc != 0) {
if (rc == -EIO)
rc = -EINVAL;
@@ -859,6 +1024,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
}
netpid = current->tgid;
+ cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for attrs/times not supported by this server\n");
set_via_filehandle:
rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid);
@@ -869,6 +1035,21 @@ set_via_filehandle:
CIFSSMBClose(xid, tcon, fid.netfid);
else
cifsFileInfo_put(open_file);
+
+ /*
+ * Setting the read-only bit is not honered on non-NT servers when done
+ * via open-semantics. So for setting it, use SMB_COM_SETATTR command.
+ * This command works only after the file is closed, so use it only when
+ * operation was called without the filehandle.
+ */
+ if (open_file == NULL &&
+ !(tcon->ses->capabilities & CAP_NT_SMBS) &&
+ le32_to_cpu(buf->Attributes) & ATTR_READONLY) {
+ SMBSetInformation(xid, tcon, full_path,
+ buf->Attributes,
+ 0 /* do not change write time */,
+ cifs_sb->local_nls, cifs_sb);
+ }
out:
if (tlink != NULL)
cifs_put_tlink(tlink);
@@ -1161,8 +1342,8 @@ struct smb_version_operations smb1_operations = {
.check_trans2 = cifs_check_trans2,
.need_neg = cifs_need_neg,
.negotiate = cifs_negotiate,
- .negotiate_wsize = cifs_negotiate_wsize,
- .negotiate_rsize = cifs_negotiate_rsize,
+ .negotiate_wsize = smb1_negotiate_wsize,
+ .negotiate_rsize = smb1_negotiate_rsize,
.sess_setup = CIFS_SessSetup,
.logoff = CIFSSMBLogoff,
.tree_connect = CIFSTCon,
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 57d9bfbadd97..2a3e46b8e15a 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -666,6 +666,8 @@ finished:
/* smb2_parse_contexts() fills idata->fi.IndexNumber */
rc = smb2_parse_contexts(server, &rsp_iov[0], &oparms->fid->epoch,
oparms->fid->lease_key, &oplock, &idata->fi, NULL);
+ if (rc)
+ cifs_dbg(VFS, "rc: %d parsing context of compound op\n", rc);
}
for (i = 0; i < num_cmds; i++) {
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index c4d52bebd37d..0b35816d551f 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -2921,6 +2921,7 @@ replay_again:
req->CreateContextsOffset = cpu_to_le32(
sizeof(struct smb2_create_req) +
iov[1].iov_len);
+ le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len);
pc_buf = iov[n_iov-1].iov_base;
}
@@ -4092,12 +4093,8 @@ static void cifs_renegotiate_iosize(struct TCP_Server_Info *server,
return;
spin_lock(&tcon->sb_list_lock);
- list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) {
- cifs_sb->ctx->rsize =
- server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
- cifs_sb->ctx->wsize =
- server->ops->negotiate_wsize(tcon, cifs_sb->ctx);
- }
+ list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link)
+ cifs_negotiate_iosize(server, cifs_sb->ctx, tcon);
spin_unlock(&tcon->sb_list_lock);
}
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
index 83caa3849749..b3d121052408 100644
--- a/fs/smb/server/auth.c
+++ b/fs/smb/server/auth.c
@@ -550,7 +550,19 @@ int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
retval = -ENOMEM;
goto out;
}
- sess->user = user;
+
+ if (!sess->user) {
+ /* First successful authentication */
+ sess->user = user;
+ } else {
+ if (!ksmbd_compare_user(sess->user, user)) {
+ ksmbd_debug(AUTH, "different user tried to reuse session\n");
+ retval = -EPERM;
+ ksmbd_free_user(user);
+ goto out;
+ }
+ ksmbd_free_user(user);
+ }
memcpy(sess->sess_key, resp->payload, resp->session_key_len);
memcpy(out_blob, resp->payload + resp->session_key_len,
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 3f45f28f6f0f..9dec4c2940bc 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -59,10 +59,12 @@ static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess)
struct ksmbd_session_rpc *entry;
long index;
+ down_write(&sess->rpc_lock);
xa_for_each(&sess->rpc_handle_list, index, entry) {
xa_erase(&sess->rpc_handle_list, index);
__session_rpc_close(sess, entry);
}
+ up_write(&sess->rpc_lock);
xa_destroy(&sess->rpc_handle_list);
}
@@ -92,7 +94,7 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
{
struct ksmbd_session_rpc *entry, *old;
struct ksmbd_rpc_command *resp;
- int method;
+ int method, id;
method = __rpc_method(rpc_name);
if (!method)
@@ -102,26 +104,29 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
if (!entry)
return -ENOMEM;
+ down_read(&sess->rpc_lock);
entry->method = method;
- entry->id = ksmbd_ipc_id_alloc();
- if (entry->id < 0)
+ entry->id = id = ksmbd_ipc_id_alloc();
+ if (id < 0)
goto free_entry;
- old = xa_store(&sess->rpc_handle_list, entry->id, entry, KSMBD_DEFAULT_GFP);
+ old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP);
if (xa_is_err(old))
goto free_id;
- resp = ksmbd_rpc_open(sess, entry->id);
+ resp = ksmbd_rpc_open(sess, id);
if (!resp)
goto erase_xa;
+ up_read(&sess->rpc_lock);
kvfree(resp);
- return entry->id;
+ return id;
erase_xa:
xa_erase(&sess->rpc_handle_list, entry->id);
free_id:
ksmbd_rpc_id_free(entry->id);
free_entry:
kfree(entry);
+ up_read(&sess->rpc_lock);
return -EINVAL;
}
@@ -129,9 +134,11 @@ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
{
struct ksmbd_session_rpc *entry;
+ down_write(&sess->rpc_lock);
entry = xa_erase(&sess->rpc_handle_list, id);
if (entry)
__session_rpc_close(sess, entry);
+ up_write(&sess->rpc_lock);
}
int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
@@ -439,6 +446,7 @@ static struct ksmbd_session *__session_create(int protocol)
sess->sequence_number = 1;
rwlock_init(&sess->tree_conns_lock);
atomic_set(&sess->refcnt, 2);
+ init_rwsem(&sess->rpc_lock);
ret = __init_smb2_session(sess);
if (ret)
diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
index f21348381d59..c5749d6ec715 100644
--- a/fs/smb/server/mgmt/user_session.h
+++ b/fs/smb/server/mgmt/user_session.h
@@ -63,6 +63,7 @@ struct ksmbd_session {
rwlock_t tree_conns_lock;
atomic_t refcnt;
+ struct rw_semaphore rpc_lock;
};
static inline int test_session_flag(struct ksmbd_session *sess, int bit)
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 81a29857b1e3..03f606afad93 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -1496,7 +1496,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
sizeof(struct create_lease_v2) - 4)
- return NULL;
+ goto err_out;
memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
@@ -1512,7 +1512,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
sizeof(struct create_lease))
- return NULL;
+ goto err_out;
memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
@@ -1521,6 +1521,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
lreq->version = 1;
}
return lreq;
+err_out:
+ kfree(lreq);
+ return NULL;
}
/**
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 57839f9708bb..f2a2be8467c6 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -633,6 +633,11 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
return name;
}
+ if (*name == '\0') {
+ kfree(name);
+ return ERR_PTR(-EINVAL);
+ }
+
if (*name == '\\') {
pr_err("not allow directory name included leading slash\n");
kfree(name);
@@ -1445,7 +1450,7 @@ static int ntlm_authenticate(struct ksmbd_work *work,
{
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
- struct channel *chann = NULL;
+ struct channel *chann = NULL, *old;
struct ksmbd_user *user;
u64 prev_id;
int sz, rc;
@@ -1557,7 +1562,12 @@ binding_session:
return -ENOMEM;
chann->conn = conn;
- xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP);
+ old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann,
+ KSMBD_DEFAULT_GFP);
+ if (xa_is_err(old)) {
+ kfree(chann);
+ return xa_err(old);
+ }
}
}
@@ -1602,11 +1612,6 @@ static int krb5_authenticate(struct ksmbd_work *work,
if (prev_sess_id && prev_sess_id != sess->id)
destroy_previous_session(conn, sess->user, prev_sess_id);
- if (sess->state == SMB2_SESSION_VALID) {
- ksmbd_free_user(sess->user);
- sess->user = NULL;
- }
-
retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
out_blob, &out_len);
if (retval) {
@@ -2249,10 +2254,6 @@ int smb2_session_logoff(struct ksmbd_work *work)
sess->state = SMB2_SESSION_EXPIRED;
up_write(&conn->session_lock);
- if (sess->user) {
- ksmbd_free_user(sess->user);
- sess->user = NULL;
- }
ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
rsp->StructureSize = cpu_to_le16(4);
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 391d07da586c..482eba0f4dc1 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -426,6 +426,13 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
goto out;
}
+ if (v_len <= *pos) {
+ pr_err("stream write position %lld is out of bounds (stream length: %zd)\n",
+ *pos, v_len);
+ err = -EINVAL;
+ goto out;
+ }
+
if (v_len < size) {
wbuf = kvzalloc(size, KSMBD_DEFAULT_GFP);
if (!wbuf) {
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index 1f8fa3468173..dfed6fce8904 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -661,21 +661,40 @@ __close_file_table_ids(struct ksmbd_file_table *ft,
bool (*skip)(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp))
{
- unsigned int id;
- struct ksmbd_file *fp;
- int num = 0;
+ struct ksmbd_file *fp;
+ unsigned int id = 0;
+ int num = 0;
+
+ while (1) {
+ write_lock(&ft->lock);
+ fp = idr_get_next(ft->idr, &id);
+ if (!fp) {
+ write_unlock(&ft->lock);
+ break;
+ }
- idr_for_each_entry(ft->idr, fp, id) {
- if (skip(tcon, fp))
+ if (skip(tcon, fp) ||
+ !atomic_dec_and_test(&fp->refcount)) {
+ id++;
+ write_unlock(&ft->lock);
continue;
+ }
set_close_state_blocked_works(fp);
+ idr_remove(ft->idr, fp->volatile_id);
+ fp->volatile_id = KSMBD_NO_FID;
+ write_unlock(&ft->lock);
+
+ down_write(&fp->f_ci->m_lock);
+ list_del_init(&fp->node);
+ up_write(&fp->f_ci->m_lock);
- if (!atomic_dec_and_test(&fp->refcount))
- continue;
__ksmbd_close_fd(ft, fp);
+
num++;
+ id++;
}
+
return num;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index d80f94346199..22f4bf956ba1 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1585,8 +1585,11 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
user_uffdio_copy = (struct uffdio_copy __user *) arg;
ret = -EAGAIN;
- if (atomic_read(&ctx->mmap_changing))
+ if (unlikely(atomic_read(&ctx->mmap_changing))) {
+ if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
+ return -EFAULT;
goto out;
+ }
ret = -EFAULT;
if (copy_from_user(&uffdio_copy, user_uffdio_copy,
@@ -1641,8 +1644,11 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
ret = -EAGAIN;
- if (atomic_read(&ctx->mmap_changing))
+ if (unlikely(atomic_read(&ctx->mmap_changing))) {
+ if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
+ return -EFAULT;
goto out;
+ }
ret = -EFAULT;
if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
@@ -1744,8 +1750,11 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
user_uffdio_continue = (struct uffdio_continue __user *)arg;
ret = -EAGAIN;
- if (atomic_read(&ctx->mmap_changing))
+ if (unlikely(atomic_read(&ctx->mmap_changing))) {
+ if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
+ return -EFAULT;
goto out;
+ }
ret = -EFAULT;
if (copy_from_user(&uffdio_continue, user_uffdio_continue,
@@ -1801,8 +1810,11 @@ static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long
user_uffdio_poison = (struct uffdio_poison __user *)arg;
ret = -EAGAIN;
- if (atomic_read(&ctx->mmap_changing))
+ if (unlikely(atomic_read(&ctx->mmap_changing))) {
+ if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
+ return -EFAULT;
goto out;
+ }
ret = -EFAULT;
if (copy_from_user(&uffdio_poison, user_uffdio_poison,
@@ -1870,8 +1882,12 @@ static int userfaultfd_move(struct userfaultfd_ctx *ctx,
user_uffdio_move = (struct uffdio_move __user *) arg;
- if (atomic_read(&ctx->mmap_changing))
- return -EAGAIN;
+ ret = -EAGAIN;
+ if (unlikely(atomic_read(&ctx->mmap_changing))) {
+ if (unlikely(put_user(ret, &user_uffdio_move->move)))
+ return -EFAULT;
+ goto out;
+ }
if (copy_from_user(&uffdio_move, user_uffdio_move,
/* don't copy "move" last field */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 58a635a6d5bd..66409bc3a4e0 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -108,13 +108,13 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
#define TEXT_MAIN .text
#endif
#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
-#define DATA_MAIN .data
+#define DATA_MAIN .data .data.rel .data.rel.local
#define SDATA_MAIN .sdata
#define RODATA_MAIN .rodata
#define BSS_MAIN .bss
diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h
index 24ad120b8827..c33cba111171 100644
--- a/include/drm/ttm/ttm_backup.h
+++ b/include/drm/ttm/ttm_backup.h
@@ -9,14 +9,12 @@
#include <linux/mm_types.h>
#include <linux/shmem_fs.h>
-struct ttm_backup;
-
/**
* ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer
* @handle: The handle to convert.
*
* Converts an opaque handle received from the
- * struct ttm_backoup_ops::backup_page() function to an (invalid)
+ * ttm_backup_backup_page() function to an (invalid)
* struct page pointer suitable for a struct page array.
*
* Return: An (invalid) struct page pointer.
@@ -45,8 +43,8 @@ static inline bool ttm_backup_page_ptr_is_handle(const struct page *page)
*
* Return: The handle that was previously used in
* ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable
- * for use as argument in the struct ttm_backup_ops drop() or
- * copy_backed_up_page() functions.
+ * for use as argument in the struct ttm_backup_drop() or
+ * ttm_backup_copy_page() functions.
*/
static inline unsigned long
ttm_backup_page_ptr_to_handle(const struct page *page)
@@ -55,20 +53,20 @@ ttm_backup_page_ptr_to_handle(const struct page *page)
return (unsigned long)page >> 1;
}
-void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle);
+void ttm_backup_drop(struct file *backup, pgoff_t handle);
-int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
pgoff_t handle, bool intr);
s64
-ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
+ttm_backup_backup_page(struct file *backup, struct page *page,
bool writeback, pgoff_t idx, gfp_t page_gfp,
gfp_t alloc_gfp);
-void ttm_backup_fini(struct ttm_backup *backup);
+void ttm_backup_fini(struct file *backup);
u64 ttm_backup_bytes_avail(void);
-struct ttm_backup *ttm_backup_shmem_create(loff_t size);
+struct file *ttm_backup_shmem_create(loff_t size);
#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 13cf47f3322f..406437ad674b 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -118,7 +118,7 @@ struct ttm_tt {
* ttm_tt_create() callback is responsible for assigning
* this field.
*/
- struct ttm_backup *backup;
+ struct file *backup;
/**
* @caching: The current caching state of the pages, see enum
* ttm_caching.
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 400fee6427a5..7a5b391dcc01 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -776,8 +776,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq);
@@ -840,12 +840,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_al(policy, target_freq,
@@ -855,6 +855,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
efficiencies);
}
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -908,12 +916,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_ah(policy, target_freq,
@@ -923,6 +931,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
efficiencies);
}
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -993,12 +1009,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_ac(policy, target_freq,
@@ -1008,7 +1024,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
efficiencies);
}
-static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
+static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
+ unsigned int min, unsigned int max,
+ int idx)
{
unsigned int freq;
@@ -1017,11 +1043,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
freq = policy->freq_table[idx].frequency;
- return freq == clamp_val(freq, policy->min, policy->max);
+ return freq == clamp_val(freq, min, max);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
+ unsigned int min,
+ unsigned int max,
unsigned int relation)
{
bool efficiencies = policy->efficiencies_available &&
@@ -1032,29 +1060,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
relation &= ~CPUFREQ_RELATION_E;
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
- return cpufreq_table_index_unsorted(policy, target_freq,
- relation);
+ return cpufreq_table_index_unsorted(policy, target_freq, min,
+ max, relation);
retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- idx = cpufreq_table_find_index_l(policy, target_freq,
- efficiencies);
+ idx = find_index_l(policy, target_freq, min, max, efficiencies);
break;
case CPUFREQ_RELATION_H:
- idx = cpufreq_table_find_index_h(policy, target_freq,
- efficiencies);
+ idx = find_index_h(policy, target_freq, min, max, efficiencies);
break;
case CPUFREQ_RELATION_C:
- idx = cpufreq_table_find_index_c(policy, target_freq,
- efficiencies);
+ idx = find_index_c(policy, target_freq, min, max, efficiencies);
break;
default:
WARN_ON_ONCE(1);
return 0;
}
- /* Limit frequency index to honor policy->min/max */
- if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
+ /* Limit frequency index to honor min and max */
+ if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
efficiencies = false;
goto retry;
}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6cd8d1d28b8b..fc27b53c58c2 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -907,21 +907,6 @@ extern void fsnotify_wait_marks_destroyed(void);
/* Clear all of the marks of a group attached to a given object type */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
unsigned int obj_type);
-/* run all the marks in a group, and clear all of the vfsmount marks */
-static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
-}
-/* run all the marks in a group, and clear all of the inode marks */
-static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
-}
-/* run all the marks in a group, and clear all of the sn marks */
-static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB);
-}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 675959fb97ba..d6ffe01962c2 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1002,6 +1002,12 @@ struct vmbus_channel {
/* The max size of a packet on this channel */
u32 max_pkt_size;
+
+ /* function to mmap ring buffer memory to the channel's sysfs ring attribute */
+ int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma);
+
+ /* boolean to control visibility of sysfs for ring buffer */
+ bool ring_sysfs_visible;
};
#define lock_requestor(channel, flags) \
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 508d466de1cc..457b4fba88bd 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1526,7 +1526,7 @@ struct ieee80211_mgmt {
struct {
u8 action_code;
u8 dialog_token;
- u8 status_code;
+ __le16 status_code;
u8 variable[];
} __packed ttlm_res;
struct {
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index ccce8a751e2a..3a8d35d41fda 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -440,10 +440,10 @@ static inline int __iommu_copy_struct_from_user(
void *dst_data, const struct iommu_user_data *src_data,
unsigned int data_type, size_t data_len, size_t min_len)
{
- if (src_data->type != data_type)
- return -EINVAL;
if (WARN_ON(!dst_data || !src_data))
return -EINVAL;
+ if (src_data->type != data_type)
+ return -EINVAL;
if (src_data->len < min_len || data_len < src_data->len)
return -EINVAL;
return copy_struct_from_user(dst_data, data_len, src_data->uptr,
@@ -456,8 +456,8 @@ static inline int __iommu_copy_struct_from_user(
* include/uapi/linux/iommufd.h
* @user_data: Pointer to a struct iommu_user_data for user space data info
* @data_type: The data type of the @kdst. Must match with @user_data->type
- * @min_last: The last memember of the data structure @kdst points in the
- * initial version.
+ * @min_last: The last member of the data structure @kdst points in the initial
+ * version.
* Return 0 for success, otherwise -error.
*/
#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
diff --git a/include/linux/module.h b/include/linux/module.h
index d94b196d5a34..b3329110d668 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -162,6 +162,8 @@ extern void cleanup_module(void);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
+struct module_kobject *lookup_or_create_module_kobject(const char *name);
+
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2d11d013cabe..7ea022750e4e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4972,6 +4972,7 @@ static inline void __dev_mc_unsync(struct net_device *dev,
/* Functions used for secondary unicast and multicast support */
void dev_set_rx_mode(struct net_device *dev);
+int netif_set_promiscuity(struct net_device *dev, int inc);
int dev_set_promiscuity(struct net_device *dev, int inc);
int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
int dev_set_allmulti(struct net_device *dev, int inc);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e39d4d563b19..785048a3b3e6 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -51,7 +51,7 @@ struct tk_read_base {
* @offs_real: Offset clock monotonic -> clock realtime
* @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai
- * @tai_offset: The current UTC to TAI offset in seconds
+ * @coarse_nsec: The nanoseconds part for coarse time getters
* @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
* @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
* @clock_was_set_seq: The sequence number of clock was set events
@@ -76,6 +76,7 @@ struct tk_read_base {
* ntp shifted nano seconds.
* @ntp_err_mult: Multiplication factor for scaled math conversion
* @skip_second_overflow: Flag used to avoid updating NTP twice with same second
+ * @tai_offset: The current UTC to TAI offset in seconds
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
* we need to add to xtime (or xtime corrected for sub jiffy times)
@@ -100,7 +101,7 @@ struct tk_read_base {
* which results in the following cacheline layout:
*
* 0: seqcount, tkr_mono
- * 1: xtime_sec ... tai_offset
+ * 1: xtime_sec ... coarse_nsec
* 2: tkr_raw, raw_sec
* 3,4: Internal variables
*
@@ -121,7 +122,7 @@ struct timekeeper {
ktime_t offs_real;
ktime_t offs_boot;
ktime_t offs_tai;
- s32 tai_offset;
+ u32 coarse_nsec;
/* Cacheline 2: */
struct tk_read_base tkr_raw;
@@ -144,6 +145,7 @@ struct timekeeper {
u32 ntp_error_shift;
u32 ntp_err_mult;
u32 skip_second_overflow;
+ s32 tai_offset;
};
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 31e9ffd936e3..5ca8d4dd149d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -61,6 +61,7 @@ struct vm_struct {
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
+ unsigned long requested_size;
};
struct vmap_area {
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index a8586c3058c7..797992019f9e 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1931,6 +1931,8 @@ struct hci_cp_le_pa_create_sync {
__u8 sync_cte_type;
} __packed;
+#define HCI_OP_LE_PA_CREATE_SYNC_CANCEL 0x2045
+
#define HCI_OP_LE_PA_TERM_SYNC 0x2046
struct hci_cp_le_pa_term_sync {
__le16 handle;
@@ -2830,7 +2832,7 @@ struct hci_evt_le_create_big_complete {
__le16 bis_handle[];
} __packed;
-#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
+#define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d
struct hci_evt_le_big_sync_estabilished {
__u8 status;
__u8 handle;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 5115da34f881..522d837a23fa 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1113,10 +1113,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
return NULL;
}
-static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
- __u8 sid,
- bdaddr_t *dst,
- __u8 dst_type)
+static inline struct hci_conn *
+hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1124,8 +1122,10 @@ static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK || bacmp(&c->dst, dst) ||
- c->dst_type != dst_type || c->sid != sid)
+ if (c->type != ISO_LINK)
+ continue;
+
+ if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
continue;
rcu_read_unlock();
@@ -1524,8 +1524,6 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
bool hci_iso_setup_path(struct hci_conn *conn);
int hci_le_create_cis_pending(struct hci_dev *hdev);
-int hci_pa_create_sync_pending(struct hci_dev *hdev);
-int hci_le_big_create_sync_pending(struct hci_dev *hdev);
int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -1566,9 +1564,9 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 data_len, __u8 *data);
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
- struct bt_iso_qos *qos,
- __u16 sync_handle, __u8 num_bis, __u8 bis[]);
+int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos, __u16 sync_handle,
+ __u8 num_bis, __u8 bis[]);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 7e2cf0cca939..72558c826aa1 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -185,3 +185,6 @@ int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
struct hci_conn_params *params);
+
+int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn);
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 825141d675e5..d01c82983b4d 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -103,6 +103,12 @@ struct netdev_stat_ops {
struct netdev_queue_stats_tx *tx);
};
+void netdev_stat_queue_sum(struct net_device *netdev,
+ int rx_start, int rx_end,
+ struct netdev_queue_stats_rx *rx_sum,
+ int tx_start, int tx_end,
+ struct netdev_queue_stats_tx *tx_sum);
+
/**
* struct netdev_queue_mgmt_ops - netdev ops for queue management
*
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index a58ae7589d12..e8bd6ddb7b12 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -71,9 +71,6 @@ struct xdp_sock {
*/
u32 tx_budget_spent;
- /* Protects generic receive. */
- spinlock_t rx_lock;
-
/* Statistics */
u64 rx_dropped;
u64 rx_queue_full;
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 1dcd4d71468a..cac56e6b0869 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -53,6 +53,8 @@ struct xsk_buff_pool {
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
+ /* Protects generic receive in shared and non-shared umem mode. */
+ spinlock_t rx_lock;
struct list_head free_list;
struct list_head xskb_list;
u32 heads_cnt;
@@ -238,8 +240,8 @@ static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb,
return orig_addr;
offset = xskb->xdp.data - xskb->xdp.data_hard_start;
- orig_addr -= offset;
offset += pool->headroom;
+ orig_addr -= offset;
return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
}
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
index 36a4a1e1d8ca..d8bd5d37131a 100644
--- a/include/sound/soc_sdw_utils.h
+++ b/include/sound/soc_sdw_utils.h
@@ -226,6 +226,7 @@ int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
bool playback);
int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *dai);
+int asoc_sdw_cs35l56_volume_limit(struct snd_soc_card *card, const char *name_prefix);
/* MAXIM codec support */
int asoc_sdw_maxim_init(struct snd_soc_card *card,
diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
index d099ae27f849..682499b871ea 100644
--- a/include/sound/ump_convert.h
+++ b/include/sound/ump_convert.h
@@ -19,7 +19,7 @@ struct ump_cvt_to_ump_bank {
/* context for converting from MIDI1 byte stream to UMP packet */
struct ump_cvt_to_ump {
/* MIDI1 intermediate buffer */
- unsigned char buf[4];
+ unsigned char buf[6]; /* up to 6 bytes for SysEx */
int len;
int cmd_bytes;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 549ab3b41961..3efc00cc1bcd 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1928,7 +1928,7 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct prelim_ref *oldref,
const struct prelim_ref *newref, u64 tree_size),
- TP_ARGS(fs_info, newref, oldref, tree_size),
+ TP_ARGS(fs_info, oldref, newref, tree_size),
TP_STRUCT__entry_btrfs(
__field( u64, root_id )
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 28705ae67784..fd404729b115 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4968,6 +4968,9 @@ union bpf_attr {
* the netns switch takes place from ingress to ingress without
* going through the CPU's backlog queue.
*
+ * *skb*\ **->mark** and *skb*\ **->tstamp** are not cleared during
+ * the netns switch.
+ *
* The *flags* argument is reserved and must be 0. The helper is
* currently only supported for tc BPF program types at the
* ingress hook and for veth and netkit target device types. The
diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h
index fe24c3459ac0..30c8dad6214e 100644
--- a/include/uapi/linux/ethtool_netlink_generated.h
+++ b/include/uapi/linux/ethtool_netlink_generated.h
@@ -31,11 +31,6 @@ enum ethtool_header_flags {
ETHTOOL_FLAG_STATS = 4,
};
-enum {
- ETHTOOL_PHY_UPSTREAM_TYPE_MAC,
- ETHTOOL_PHY_UPSTREAM_TYPE_PHY,
-};
-
enum ethtool_tcp_data_split {
ETHTOOL_TCP_DATA_SPLIT_UNKNOWN,
ETHTOOL_TCP_DATA_SPLIT_DISABLED,
diff --git a/init/Kconfig b/init/Kconfig
index 63f5974b9fa6..4cdd1049283c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -140,6 +140,9 @@ config LD_CAN_USE_KEEP_IN_OVERLAY
config RUSTC_HAS_COERCE_POINTEE
def_bool RUSTC_VERSION >= 108400
+config RUSTC_HAS_UNNECESSARY_TRANSMUTES
+ def_bool RUSTC_VERSION >= 108800
+
config PAHOLE_VERSION
int
default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE))
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index f60d0a9d505e..9414ca6d101c 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -123,11 +123,11 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
seq_printf(m, "SqHead:\t%u\n", sq_head);
seq_printf(m, "SqTail:\t%u\n", sq_tail);
- seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
+ seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head));
seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
seq_printf(m, "CqHead:\t%u\n", cq_head);
seq_printf(m, "CqTail:\t%u\n", cq_tail);
- seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
+ seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail));
seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
for (i = 0; i < sq_entries; i++) {
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a2b256e96d5d..541e65a1eebf 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -448,24 +448,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
return req->link;
}
-static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
-{
- if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
- return NULL;
- return __io_prep_linked_timeout(req);
-}
-
-static noinline void __io_arm_ltimeout(struct io_kiocb *req)
-{
- io_queue_linked_timeout(__io_prep_linked_timeout(req));
-}
-
-static inline void io_arm_ltimeout(struct io_kiocb *req)
-{
- if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
- __io_arm_ltimeout(req);
-}
-
static void io_prep_async_work(struct io_kiocb *req)
{
const struct io_issue_def *def = &io_issue_defs[req->opcode];
@@ -518,7 +500,6 @@ static void io_prep_async_link(struct io_kiocb *req)
static void io_queue_iowq(struct io_kiocb *req)
{
- struct io_kiocb *link = io_prep_linked_timeout(req);
struct io_uring_task *tctx = req->tctx;
BUG_ON(!tctx);
@@ -543,8 +524,6 @@ static void io_queue_iowq(struct io_kiocb *req)
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
io_wq_enqueue(tctx->io_wq, &req->work);
- if (link)
- io_queue_linked_timeout(link);
}
static void io_req_queue_iowq_tw(struct io_kiocb *req, io_tw_token_t tw)
@@ -869,6 +848,14 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
struct io_ring_ctx *ctx = req->ctx;
bool posted;
+ /*
+ * If multishot has already posted deferred completions, ensure that
+ * those are flushed first before posting this one. If not, CQEs
+ * could get reordered.
+ */
+ if (!wq_list_empty(&ctx->submit_state.compl_reqs))
+ __io_submit_flush_completions(ctx);
+
lockdep_assert(!io_wq_current_is_worker());
lockdep_assert_held(&ctx->uring_lock);
@@ -1724,15 +1711,22 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
return !!req->file;
}
+#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
+
static inline int __io_issue_sqe(struct io_kiocb *req,
unsigned int issue_flags,
const struct io_issue_def *def)
{
const struct cred *creds = NULL;
+ struct io_kiocb *link = NULL;
int ret;
- if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
- creds = override_creds(req->creds);
+ if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
+ if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
+ creds = override_creds(req->creds);
+ if (req->flags & REQ_F_ARM_LTIMEOUT)
+ link = __io_prep_linked_timeout(req);
+ }
if (!def->audit_skip)
audit_uring_entry(req->opcode);
@@ -1742,8 +1736,12 @@ static inline int __io_issue_sqe(struct io_kiocb *req,
if (!def->audit_skip)
audit_uring_exit(!ret, ret);
- if (creds)
- revert_creds(creds);
+ if (unlikely(creds || link)) {
+ if (creds)
+ revert_creds(creds);
+ if (link)
+ io_queue_linked_timeout(link);
+ }
return ret;
}
@@ -1769,7 +1767,6 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (ret == IOU_ISSUE_SKIP_COMPLETE) {
ret = 0;
- io_arm_ltimeout(req);
/* If the op doesn't have a file, we're not polling for it */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
@@ -1824,8 +1821,6 @@ void io_wq_submit_work(struct io_wq_work *work)
else
req_ref_get(req);
- io_arm_ltimeout(req);
-
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
fail:
@@ -1941,15 +1936,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
static void io_queue_async(struct io_kiocb *req, int ret)
__must_hold(&req->ctx->uring_lock)
{
- struct io_kiocb *linked_timeout;
-
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
io_req_defer_failed(req, ret);
return;
}
- linked_timeout = io_prep_linked_timeout(req);
-
switch (io_arm_poll_handler(req, 0)) {
case IO_APOLL_READY:
io_kbuf_recycle(req, 0);
@@ -1962,9 +1953,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
case IO_APOLL_OK:
break;
}
-
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
}
static inline void io_queue_sqe(struct io_kiocb *req)
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index d037cc68e9d3..03c699493b5a 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -20,7 +20,7 @@
#include "sqpoll.h"
#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
-#define IORING_TW_CAP_ENTRIES_VALUE 8
+#define IORING_TW_CAP_ENTRIES_VALUE 32
enum {
IO_SQ_THREAD_SHOULD_STOP = 0,
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 5c8d43cdb0a3..c05ba7ca00fa 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -761,7 +761,7 @@ static int msi_domain_translate(struct irq_domain *domain, struct irq_fwspec *fw
static void msi_domain_debug_show(struct seq_file *m, struct irq_domain *d,
struct irq_data *irqd, int ind)
{
- struct msi_desc *desc = irq_data_get_msi_desc(irqd);
+ struct msi_desc *desc = irqd ? irq_data_get_msi_desc(irqd) : NULL;
if (!desc)
return;
diff --git a/kernel/params.c b/kernel/params.c
index 2509f216c9f3..b92d64161b75 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -760,38 +760,35 @@ void destroy_params(const struct kernel_param *params, unsigned num)
params[i].ops->free(params[i].arg);
}
-static struct module_kobject * __init locate_module_kobject(const char *name)
+struct module_kobject __modinit * lookup_or_create_module_kobject(const char *name)
{
struct module_kobject *mk;
struct kobject *kobj;
int err;
kobj = kset_find_obj(module_kset, name);
- if (kobj) {
- mk = to_module_kobject(kobj);
- } else {
- mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
- BUG_ON(!mk);
-
- mk->mod = THIS_MODULE;
- mk->kobj.kset = module_kset;
- err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL,
- "%s", name);
-#ifdef CONFIG_MODULES
- if (!err)
- err = sysfs_create_file(&mk->kobj, &module_uevent.attr);
-#endif
- if (err) {
- kobject_put(&mk->kobj);
- pr_crit("Adding module '%s' to sysfs failed (%d), the system may be unstable.\n",
- name, err);
- return NULL;
- }
+ if (kobj)
+ return to_module_kobject(kobj);
- /* So that we hold reference in both cases. */
- kobject_get(&mk->kobj);
+ mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
+ if (!mk)
+ return NULL;
+
+ mk->mod = THIS_MODULE;
+ mk->kobj.kset = module_kset;
+ err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name);
+ if (IS_ENABLED(CONFIG_MODULES) && !err)
+ err = sysfs_create_file(&mk->kobj, &module_uevent.attr);
+ if (err) {
+ kobject_put(&mk->kobj);
+ pr_crit("Adding module '%s' to sysfs failed (%d), the system may be unstable.\n",
+ name, err);
+ return NULL;
}
+ /* So that we hold reference in both cases. */
+ kobject_get(&mk->kobj);
+
return mk;
}
@@ -802,7 +799,7 @@ static void __init kernel_add_sysfs_param(const char *name,
struct module_kobject *mk;
int err;
- mk = locate_module_kobject(name);
+ mk = lookup_or_create_module_kobject(name);
if (!mk)
return;
@@ -873,7 +870,7 @@ static void __init version_sysfs_builtin(void)
int err;
for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
- mk = locate_module_kobject(vattr->module_name);
+ mk = lookup_or_create_module_kobject(vattr->module_name);
if (mk) {
err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
WARN_ON_ONCE(err);
@@ -946,7 +943,9 @@ struct kset *module_kset;
static void module_kobj_release(struct kobject *kobj)
{
struct module_kobject *mk = to_module_kobject(kobj);
- complete(mk->kobj_completion);
+
+ if (mk->kobj_completion)
+ complete(mk->kobj_completion);
}
const struct kobj_type module_ktype = {
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 1e67d076f195..a009c91f7b05 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -164,10 +164,34 @@ static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
return ts;
}
+static inline struct timespec64 tk_xtime_coarse(const struct timekeeper *tk)
+{
+ struct timespec64 ts;
+
+ ts.tv_sec = tk->xtime_sec;
+ ts.tv_nsec = tk->coarse_nsec;
+ return ts;
+}
+
+/*
+ * Update the nanoseconds part for the coarse time keepers. They can't rely
+ * on xtime_nsec because xtime_nsec could be adjusted by a small negative
+ * amount when the multiplication factor of the clock is adjusted, which
+ * could cause the coarse clocks to go slightly backwards. See
+ * timekeeping_apply_adjustment(). Thus we keep a separate copy for the coarse
+ * clockids which only is updated when the clock has been set or we have
+ * accumulated time.
+ */
+static inline void tk_update_coarse_nsecs(struct timekeeper *tk)
+{
+ tk->coarse_nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
+}
+
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec = ts->tv_sec;
tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
+ tk_update_coarse_nsecs(tk);
}
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
@@ -175,6 +199,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
tk->xtime_sec += ts->tv_sec;
tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
tk_normalize_xtime(tk);
+ tk_update_coarse_nsecs(tk);
}
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
@@ -708,6 +733,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
tk_normalize_xtime(tk);
delta -= incr;
}
+ tk_update_coarse_nsecs(tk);
}
/**
@@ -804,8 +830,8 @@ EXPORT_SYMBOL_GPL(ktime_get_with_offset);
ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
{
struct timekeeper *tk = &tk_core.timekeeper;
- unsigned int seq;
ktime_t base, *offset = offsets[offs];
+ unsigned int seq;
u64 nsecs;
WARN_ON(timekeeping_suspended);
@@ -813,7 +839,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
do {
seq = read_seqcount_begin(&tk_core.seq);
base = ktime_add(tk->tkr_mono.base, *offset);
- nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
+ nsecs = tk->coarse_nsec;
} while (read_seqcount_retry(&tk_core.seq, seq));
@@ -2161,7 +2187,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
struct timekeeper *real_tk = &tk_core.timekeeper;
unsigned int clock_set = 0;
int shift = 0, maxshift;
- u64 offset;
+ u64 offset, orig_offset;
guard(raw_spinlock_irqsave)(&tk_core.lock);
@@ -2172,7 +2198,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
tk->tkr_mono.clock->max_raw_delta);
-
+ orig_offset = offset;
/* Check if there's really nothing to do */
if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
return false;
@@ -2205,6 +2231,14 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
*/
clock_set |= accumulate_nsecs_to_secs(tk);
+ /*
+ * To avoid inconsistencies caused adjtimex TK_ADV_FREQ calls
+ * making small negative adjustments to the base xtime_nsec
+ * value, only update the coarse clocks if we accumulated time
+ */
+ if (orig_offset != offset)
+ tk_update_coarse_nsecs(tk);
+
timekeeping_update_from_shadow(&tk_core, clock_set);
return !!clock_set;
@@ -2248,7 +2282,7 @@ void ktime_get_coarse_real_ts64(struct timespec64 *ts)
do {
seq = read_seqcount_begin(&tk_core.seq);
- *ts = tk_xtime(tk);
+ *ts = tk_xtime_coarse(tk);
} while (read_seqcount_retry(&tk_core.seq, seq));
}
EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
@@ -2271,7 +2305,7 @@ void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts)
do {
seq = read_seqcount_begin(&tk_core.seq);
- *ts = tk_xtime(tk);
+ *ts = tk_xtime_coarse(tk);
offset = tk_core.timekeeper.offs_real;
} while (read_seqcount_retry(&tk_core.seq, seq));
@@ -2350,12 +2384,12 @@ void ktime_get_coarse_ts64(struct timespec64 *ts)
do {
seq = read_seqcount_begin(&tk_core.seq);
- now = tk_xtime(tk);
+ now = tk_xtime_coarse(tk);
mono = tk->wall_to_monotonic;
} while (read_seqcount_retry(&tk_core.seq, seq));
set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
- now.tv_nsec + mono.tv_nsec);
+ now.tv_nsec + mono.tv_nsec);
}
EXPORT_SYMBOL(ktime_get_coarse_ts64);
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 01c2ab1e8971..32ef27c71b57 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -98,12 +98,12 @@ void update_vsyscall(struct timekeeper *tk)
/* CLOCK_REALTIME_COARSE */
vdso_ts = &vc[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
vdso_ts->sec = tk->xtime_sec;
- vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
+ vdso_ts->nsec = tk->coarse_nsec;
/* CLOCK_MONOTONIC_COARSE */
vdso_ts = &vc[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
- nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
+ nsec = tk->coarse_nsec;
nsec = nsec + tk->wall_to_monotonic.tv_nsec;
vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 61130bb34d6c..6981830c3128 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3436,7 +3436,7 @@ static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **
/* Copy the subops hash */
*filter_hash = alloc_and_copy_ftrace_hash(size_bits, subops_hash->filter_hash);
- if (!filter_hash)
+ if (!*filter_hash)
return -ENOMEM;
/* Remove any notrace functions from the copy */
remove_hash(*filter_hash, subops_hash->notrace_hash);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8ddf6b17215c..5b8db27fb6ef 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6043,8 +6043,10 @@ unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr)
tscratch = tr->scratch;
/* if there is no tscrach, module_delta must be NULL. */
module_delta = READ_ONCE(tr->module_delta);
- if (!module_delta || tscratch->entries[0].mod_addr > addr)
+ if (!module_delta || !tscratch->nr_entries ||
+ tscratch->entries[0].mod_addr > addr) {
return addr + tr->text_delta;
+ }
/* Note that entries must be sorted. */
nr_entries = tscratch->nr_entries;
@@ -6821,13 +6823,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
/* Copy the data into the page, so we can start over. */
ret = trace_seq_to_buffer(&iter->seq,
page_address(spd.pages[i]),
- trace_seq_used(&iter->seq));
+ min((size_t)trace_seq_used(&iter->seq),
+ PAGE_SIZE));
if (ret < 0) {
__free_page(spd.pages[i]);
break;
}
spd.partial[i].offset = 0;
- spd.partial[i].len = trace_seq_used(&iter->seq);
+ spd.partial[i].len = ret;
trace_seq_init(&iter->seq);
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index fee40ffbd490..b9ab06c99543 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1042,11 +1042,12 @@ enum print_line_t print_event_fields(struct trace_iterator *iter,
struct trace_event_call *call;
struct list_head *head;
+ lockdep_assert_held_read(&trace_event_sem);
+
/* ftrace defined events have separate call structures */
if (event->type <= __TRACE_LAST_TYPE) {
bool found = false;
- down_read(&trace_event_sem);
list_for_each_entry(call, &ftrace_events, list) {
if (call->event.type == event->type) {
found = true;
@@ -1056,7 +1057,6 @@ enum print_line_t print_event_fields(struct trace_iterator *iter,
if (call->event.type > __TRACE_LAST_TYPE)
break;
}
- up_read(&trace_event_sem);
if (!found) {
trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
goto out;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2a47682d1ab7..47d76d03ce30 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3075,6 +3075,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, bool freeze, struct folio *folio)
{
+ bool pmd_migration = is_pmd_migration_entry(*pmd);
+
VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio));
VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
@@ -3085,9 +3087,12 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
* require a folio to check the PMD against. Otherwise, there
* is a risk of replacing the wrong folio.
*/
- if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
- is_pmd_migration_entry(*pmd)) {
- if (folio && folio != pmd_folio(*pmd))
+ if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || pmd_migration) {
+ /*
+ * Do not apply pmd_folio() to a migration entry; and folio lock
+ * guarantees that it must be of the wrong folio anyway.
+ */
+ if (folio && (pmd_migration || folio != pmd_folio(*pmd)))
return;
__split_huge_pmd_locked(vma, pmd, address, freeze);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e3e6ac991b9c..6ea1be71aa42 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4034,10 +4034,13 @@ static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
list_for_each_entry_safe(folio, next, src_list, lru) {
int i;
+ bool cma;
if (folio_test_hugetlb_vmemmap_optimized(folio))
continue;
+ cma = folio_test_hugetlb_cma(folio);
+
list_del(&folio->lru);
split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
@@ -4053,6 +4056,9 @@ static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
new_folio->mapping = NULL;
init_new_hugetlb_folio(dst, new_folio);
+ /* Copy the CMA flag so that it is freed correctly */
+ if (cma)
+ folio_set_hugetlb_cma(new_folio);
list_add(&new_folio->lru, &dst_list);
}
}
diff --git a/mm/internal.h b/mm/internal.h
index e9695baa5922..25a29872c634 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -248,11 +248,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
bool *any_writable, bool *any_young, bool *any_dirty)
{
- unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
- const pte_t *end_ptep = start_ptep + max_nr;
pte_t expected_pte, *ptep;
bool writable, young, dirty;
- int nr;
+ int nr, cur_nr;
if (any_writable)
*any_writable = false;
@@ -265,11 +263,15 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
+ /* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
+ max_nr = min_t(unsigned long, max_nr,
+ folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
+
nr = pte_batch_hint(start_ptep, pte);
expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
ptep = start_ptep + nr;
- while (ptep < end_ptep) {
+ while (nr < max_nr) {
pte = ptep_get(ptep);
if (any_writable)
writable = !!pte_write(pte);
@@ -282,14 +284,6 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
if (!pte_same(pte, expected_pte))
break;
- /*
- * Stop immediately once we reached the end of the folio. In
- * corner cases the next PFN might fall into a different
- * folio.
- */
- if (pte_pfn(pte) >= folio_end_pfn)
- break;
-
if (any_writable)
*any_writable |= writable;
if (any_young)
@@ -297,12 +291,13 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
if (any_dirty)
*any_dirty |= dirty;
- nr = pte_batch_hint(ptep, pte);
- expected_pte = pte_advance_pfn(expected_pte, nr);
- ptep += nr;
+ cur_nr = pte_batch_hint(ptep, pte);
+ expected_pte = pte_advance_pfn(expected_pte, cur_nr);
+ ptep += cur_nr;
+ nr += cur_nr;
}
- return min(ptep - start_ptep, max_nr);
+ return min(nr, max_nr);
}
/**
diff --git a/mm/memblock.c b/mm/memblock.c
index 0a53db4d9f7b..0e9ebb8aa7fe 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -457,7 +457,14 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
min(new_area_start, memblock.current_limit),
new_alloc_size, PAGE_SIZE);
- new_array = addr ? __va(addr) : NULL;
+ if (addr) {
+ /* The memory may not have been accepted, yet. */
+ accept_memory(addr, new_alloc_size);
+
+ new_array = __va(addr);
+ } else {
+ new_array = NULL;
+ }
}
if (!addr) {
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
@@ -2183,11 +2190,14 @@ static void __init memmap_init_reserved_pages(void)
struct memblock_region *region;
phys_addr_t start, end;
int nid;
+ unsigned long max_reserved;
/*
* set nid on all reserved pages and also treat struct
* pages for the NOMAP regions as PageReserved
*/
+repeat:
+ max_reserved = memblock.reserved.max;
for_each_mem_region(region) {
nid = memblock_get_region_node(region);
start = region->base;
@@ -2196,8 +2206,15 @@ static void __init memmap_init_reserved_pages(void)
if (memblock_is_nomap(region))
reserve_bootmem_region(start, end, nid);
- memblock_set_node(start, end, &memblock.reserved, nid);
+ memblock_set_node(start, region->size, &memblock.reserved, nid);
}
+ /*
+ * 'max' is changed means memblock.reserved has been doubled its
+ * array, which may result a new reserved region before current
+ * 'start'. Now we should repeat the procedure to set its node id.
+ */
+ if (max_reserved != memblock.reserved.max)
+ goto repeat;
/*
* initialize struct pages for reserved regions that don't have
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 9659689b8ace..327764ca0ee4 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1786,7 +1786,7 @@ static bool arch_has_descending_max_zone_pfns(void)
return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
}
-static void set_high_memory(void)
+static void __init set_high_memory(void)
{
phys_addr_t highmem = memblock_end_of_DRAM();
diff --git a/mm/slub.c b/mm/slub.c
index dc9e729e1d26..be8b09e09d30 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
return 0;
}
-/* Should be called only if mem_alloc_profiling_enabled() */
-static noinline void free_slab_obj_exts(struct slab *slab)
+static inline void free_slab_obj_exts(struct slab *slab)
{
struct slabobj_ext *obj_exts;
@@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab)
slab->obj_exts = 0;
}
-static inline bool need_slab_obj_ext(void)
-{
- if (mem_alloc_profiling_enabled())
- return true;
-
- /*
- * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
- * inside memcg_slab_post_alloc_hook. No other users for now.
- */
- return false;
-}
-
#else /* CONFIG_SLAB_OBJ_EXT */
static inline void init_slab_obj_exts(struct slab *slab)
@@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab)
{
}
-static inline bool need_slab_obj_ext(void)
-{
- return false;
-}
-
#endif /* CONFIG_SLAB_OBJ_EXT */
#ifdef CONFIG_MEM_ALLOC_PROFILING
@@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
static inline void
alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
{
- if (need_slab_obj_ext())
+ if (mem_alloc_profiling_enabled())
__alloc_tagging_slab_alloc_hook(s, object, flags);
}
@@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order,
static __always_inline void unaccount_slab(struct slab *slab, int order,
struct kmem_cache *s)
{
- if (memcg_kmem_online() || need_slab_obj_ext())
- free_slab_obj_exts(slab);
+ /*
+ * The slab object extensions should now be freed regardless of
+ * whether mem_alloc_profiling_enabled() or not because profiling
+ * might have been disabled after slab->obj_exts got allocated.
+ */
+ free_slab_obj_exts(slab);
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
-(PAGE_SIZE << order));
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2eff8b51a945..f214843612dc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1272,13 +1272,22 @@ int folio_alloc_swap(struct folio *folio, gfp_t gfp)
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
- /*
- * Should not even be attempting large allocations when huge
- * page swap is disabled. Warn and fail the allocation.
- */
- if (order && (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER)) {
- VM_WARN_ON_ONCE(1);
- return -EINVAL;
+ if (order) {
+ /*
+ * Reject large allocation when THP_SWAP is disabled,
+ * the caller should split the folio and try again.
+ */
+ if (!IS_ENABLED(CONFIG_THP_SWAP))
+ return -EAGAIN;
+
+ /*
+ * Allocation size should never exceed cluster size
+ * (HPAGE_PMD_SIZE).
+ */
+ if (size > SWAPFILE_CLUSTER) {
+ VM_WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
}
local_lock(&percpu_swap_cluster.lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3ed720a787ec..2d7511654831 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
{
vm->flags = flags;
vm->addr = (void *)va->va_start;
- vm->size = va_size(va);
+ vm->size = vm->requested_size = va_size(va);
vm->caller = caller;
va->vm = vm;
}
@@ -3133,6 +3133,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size,
area->flags = flags;
area->caller = caller;
+ area->requested_size = requested_size;
va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
if (IS_ERR(va)) {
@@ -4063,6 +4064,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
*/
void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
{
+ struct vm_struct *vm = NULL;
+ size_t alloced_size = 0;
size_t old_size = 0;
void *n;
@@ -4072,15 +4075,17 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
}
if (p) {
- struct vm_struct *vm;
-
vm = find_vm_area(p);
if (unlikely(!vm)) {
WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
return NULL;
}
- old_size = get_vm_area_size(vm);
+ alloced_size = get_vm_area_size(vm);
+ old_size = vm->requested_size;
+ if (WARN(alloced_size < old_size,
+ "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
+ return NULL;
}
/*
@@ -4088,14 +4093,26 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
* would be a good heuristic for when to shrink the vm_area?
*/
if (size <= old_size) {
- /* Zero out spare memory. */
- if (want_init_on_alloc(flags))
+ /* Zero out "freed" memory. */
+ if (want_init_on_free())
memset((void *)p + size, 0, old_size - size);
+ vm->requested_size = size;
kasan_poison_vmalloc(p + size, old_size - size);
- kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
return (void *)p;
}
+ /*
+ * We already have the bytes available in the allocation; use them.
+ */
+ if (size <= alloced_size) {
+ kasan_unpoison_vmalloc(p + old_size, size - old_size,
+ KASAN_VMALLOC_PROT_NORMAL);
+ /* Zero out "alloced" memory. */
+ if (want_init_on_alloc(flags))
+ memset((void *)p + old_size, 0, size - old_size);
+ vm->requested_size = size;
+ }
+
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
n = __vmalloc_noprof(size, flags);
if (!n)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7e1b53857648..6533e281ada3 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -2064,95 +2064,6 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
return hci_le_create_big(conn, &conn->iso_qos);
}
-static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
-{
- bt_dev_dbg(hdev, "");
-
- if (err)
- bt_dev_err(hdev, "Unable to create PA: %d", err);
-}
-
-static bool hci_conn_check_create_pa_sync(struct hci_conn *conn)
-{
- if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID)
- return false;
-
- return true;
-}
-
-static int create_pa_sync(struct hci_dev *hdev, void *data)
-{
- struct hci_cp_le_pa_create_sync cp = {0};
- struct hci_conn *conn;
- int err = 0;
-
- hci_dev_lock(hdev);
-
- rcu_read_lock();
-
- /* The spec allows only one pending LE Periodic Advertising Create
- * Sync command at a time. If the command is pending now, don't do
- * anything. We check for pending connections after each PA Sync
- * Established event.
- *
- * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
- * page 2493:
- *
- * If the Host issues this command when another HCI_LE_Periodic_
- * Advertising_Create_Sync command is pending, the Controller shall
- * return the error code Command Disallowed (0x0C).
- */
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags))
- goto unlock;
- }
-
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (hci_conn_check_create_pa_sync(conn)) {
- struct bt_iso_qos *qos = &conn->iso_qos;
-
- cp.options = qos->bcast.options;
- cp.sid = conn->sid;
- cp.addr_type = conn->dst_type;
- bacpy(&cp.addr, &conn->dst);
- cp.skip = cpu_to_le16(qos->bcast.skip);
- cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
- cp.sync_cte_type = qos->bcast.sync_cte_type;
-
- break;
- }
- }
-
-unlock:
- rcu_read_unlock();
-
- hci_dev_unlock(hdev);
-
- if (bacmp(&cp.addr, BDADDR_ANY)) {
- hci_dev_set_flag(hdev, HCI_PA_SYNC);
- set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
-
- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
- sizeof(cp), &cp, HCI_CMD_TIMEOUT);
- if (!err)
- err = hci_update_passive_scan_sync(hdev);
-
- if (err) {
- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
- }
- }
-
- return err;
-}
-
-int hci_pa_create_sync_pending(struct hci_dev *hdev)
-{
- /* Queue start pa_create_sync and scan */
- return hci_cmd_sync_queue(hdev, create_pa_sync,
- NULL, create_pa_complete);
-}
-
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid,
struct bt_iso_qos *qos)
@@ -2167,97 +2078,18 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
conn->dst_type = dst_type;
conn->sid = sid;
conn->state = BT_LISTEN;
+ conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10);
hci_conn_hold(conn);
- hci_pa_create_sync_pending(hdev);
+ hci_connect_pa_sync(hdev, conn);
return conn;
}
-static bool hci_conn_check_create_big_sync(struct hci_conn *conn)
-{
- if (!conn->num_bis)
- return false;
-
- return true;
-}
-
-static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err)
-{
- bt_dev_dbg(hdev, "");
-
- if (err)
- bt_dev_err(hdev, "Unable to create BIG sync: %d", err);
-}
-
-static int big_create_sync(struct hci_dev *hdev, void *data)
-{
- DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
- struct hci_conn *conn;
-
- rcu_read_lock();
-
- pdu->num_bis = 0;
-
- /* The spec allows only one pending LE BIG Create Sync command at
- * a time. If the command is pending now, don't do anything. We
- * check for pending connections after each BIG Sync Established
- * event.
- *
- * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
- * page 2586:
- *
- * If the Host sends this command when the Controller is in the
- * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
- * Established event has not been generated, the Controller shall
- * return the error code Command Disallowed (0x0C).
- */
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags))
- goto unlock;
- }
-
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (hci_conn_check_create_big_sync(conn)) {
- struct bt_iso_qos *qos = &conn->iso_qos;
-
- set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
-
- pdu->handle = qos->bcast.big;
- pdu->sync_handle = cpu_to_le16(conn->sync_handle);
- pdu->encryption = qos->bcast.encryption;
- memcpy(pdu->bcode, qos->bcast.bcode,
- sizeof(pdu->bcode));
- pdu->mse = qos->bcast.mse;
- pdu->timeout = cpu_to_le16(qos->bcast.timeout);
- pdu->num_bis = conn->num_bis;
- memcpy(pdu->bis, conn->bis, conn->num_bis);
-
- break;
- }
- }
-
-unlock:
- rcu_read_unlock();
-
- if (!pdu->num_bis)
- return 0;
-
- return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
- struct_size(pdu, bis, pdu->num_bis), pdu);
-}
-
-int hci_le_big_create_sync_pending(struct hci_dev *hdev)
-{
- /* Queue big_create_sync */
- return hci_cmd_sync_queue_once(hdev, big_create_sync,
- NULL, big_create_sync_complete);
-}
-
-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
- struct bt_iso_qos *qos,
- __u16 sync_handle, __u8 num_bis, __u8 bis[])
+int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos, __u16 sync_handle,
+ __u8 num_bis, __u8 bis[])
{
int err;
@@ -2274,9 +2106,10 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
hcon->num_bis = num_bis;
memcpy(hcon->bis, bis, num_bis);
+ hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10);
}
- return hci_le_big_create_sync_pending(hdev);
+ return hci_connect_big_sync(hdev, hcon);
}
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 5f808f0b0e9a..6d6061111ac5 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -6378,8 +6378,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr,
- ev->bdaddr_type);
+ conn = hci_conn_hash_lookup_create_pa_sync(hdev);
if (!conn) {
bt_dev_err(hdev,
"Unable to find connection for dst %pMR sid 0x%2.2x",
@@ -6418,9 +6417,6 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
}
unlock:
- /* Handle any other pending PA sync command */
- hci_pa_create_sync_pending(hdev);
-
hci_dev_unlock(hdev);
}
@@ -6932,7 +6928,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
+ if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
flex_array_size(ev, bis, ev->num_bis)))
return;
@@ -7003,9 +6999,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
}
unlock:
- /* Handle any other pending BIG sync command */
- hci_le_big_create_sync_pending(hdev);
-
hci_dev_unlock(hdev);
}
@@ -7127,8 +7120,8 @@ static const struct hci_le_ev {
hci_le_create_big_complete_evt,
sizeof(struct hci_evt_le_create_big_complete),
HCI_MAX_EVENT_SIZE),
- /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
- HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
+ /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
+ HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
hci_le_big_sync_established_evt,
sizeof(struct hci_evt_le_big_sync_estabilished),
HCI_MAX_EVENT_SIZE),
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 609b035e5c90..e56b1cbedab9 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -2693,16 +2693,16 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
/* Force address filtering if PA Sync is in progress */
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
- struct hci_cp_le_pa_create_sync *sent;
+ struct hci_conn *conn;
- sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
- if (sent) {
+ conn = hci_conn_hash_lookup_create_pa_sync(hdev);
+ if (conn) {
struct conn_params pa;
memset(&pa, 0, sizeof(pa));
- bacpy(&pa.addr, &sent->addr);
- pa.addr_type = sent->addr_type;
+ bacpy(&pa.addr, &conn->dst);
+ pa.addr_type = conn->dst_type;
/* Clear first since there could be addresses left
* behind.
@@ -6895,3 +6895,143 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
+
+static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+{
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (!err)
+ return;
+
+ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+
+ if (err == -ECANCELED)
+ return;
+
+ hci_dev_lock(hdev);
+
+ hci_update_passive_scan_sync(hdev);
+
+ hci_dev_unlock(hdev);
+}
+
+static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_cp_le_pa_create_sync cp;
+ struct hci_conn *conn = data;
+ struct bt_iso_qos *qos = &conn->iso_qos;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
+ return -EBUSY;
+
+ /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can
+ * program the address in the allow list so PA advertisements can be
+ * received.
+ */
+ set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+
+ hci_update_passive_scan_sync(hdev);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.options = qos->bcast.options;
+ cp.sid = conn->sid;
+ cp.addr_type = conn->dst_type;
+ bacpy(&cp.addr, &conn->dst);
+ cp.skip = cpu_to_le16(qos->bcast.skip);
+ cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
+ cp.sync_cte_type = qos->bcast.sync_cte_type;
+
+ /* The spec allows only one pending LE Periodic Advertising Create
+ * Sync command at a time so we forcefully wait for PA Sync Established
+ * event since cmd_work can only schedule one command at a time.
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 2493:
+ *
+ * If the Host issues this command when another HCI_LE_Periodic_
+ * Advertising_Create_Sync command is pending, the Controller shall
+ * return the error code Command Disallowed (0x0C).
+ */
+ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC,
+ sizeof(cp), &cp,
+ HCI_EV_LE_PA_SYNC_ESTABLISHED,
+ conn->conn_timeout, NULL);
+ if (err == -ETIMEDOUT)
+ __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
+ return err;
+}
+
+int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn,
+ create_pa_complete);
+}
+
+static void create_big_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct hci_conn *conn = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (err == -ECANCELED)
+ return;
+
+ if (hci_conn_valid(hdev, conn))
+ clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
+}
+
+static int hci_le_big_create_sync(struct hci_dev *hdev, void *data)
+{
+ DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11);
+ struct hci_conn *conn = data;
+ struct bt_iso_qos *qos = &conn->iso_qos;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
+
+ memset(cp, 0, sizeof(*cp));
+ cp->handle = qos->bcast.big;
+ cp->sync_handle = cpu_to_le16(conn->sync_handle);
+ cp->encryption = qos->bcast.encryption;
+ memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode));
+ cp->mse = qos->bcast.mse;
+ cp->timeout = cpu_to_le16(qos->bcast.timeout);
+ cp->num_bis = conn->num_bis;
+ memcpy(cp->bis, conn->bis, conn->num_bis);
+
+ /* The spec allows only one pending LE BIG Create Sync command at
+ * a time, so we forcefully wait for BIG Sync Established event since
+ * cmd_work can only schedule one command at a time.
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 2586:
+ *
+ * If the Host sends this command when the Controller is in the
+ * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
+ * Established event has not been generated, the Controller shall
+ * return the error code Command Disallowed (0x0C).
+ */
+ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
+ struct_size(cp, bis, cp->num_bis), cp,
+ HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
+ conn->conn_timeout, NULL);
+ if (err == -ETIMEDOUT)
+ hci_le_big_terminate_sync(hdev, cp->handle);
+
+ return err;
+}
+
+int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn,
+ create_big_complete);
+}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 3501a991f1c6..2819cda616bc 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -1462,14 +1462,13 @@ static void iso_conn_big_sync(struct sock *sk)
lock_sock(sk);
if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
- err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
- &iso_pi(sk)->qos,
- iso_pi(sk)->sync_handle,
- iso_pi(sk)->bc_num_bis,
- iso_pi(sk)->bc_bis);
+ err = hci_conn_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
+ &iso_pi(sk)->qos,
+ iso_pi(sk)->sync_handle,
+ iso_pi(sk)->bc_num_bis,
+ iso_pi(sk)->bc_bis);
if (err)
- bt_dev_err(hdev, "hci_le_big_create_sync: %d",
- err);
+ bt_dev_err(hdev, "hci_big_create_sync: %d", err);
}
release_sock(sk);
@@ -1922,7 +1921,7 @@ static void iso_conn_ready(struct iso_conn *conn)
hcon);
} else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
ev = hci_recv_event_data(hcon->hdev,
- HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
+ HCI_EVT_LE_BIG_SYNC_ESTABLISHED);
/* Get reference to PA sync parent socket, if it exists */
parent = iso_get_sock(&hcon->src, &hcon->dst,
@@ -2113,12 +2112,11 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
- err = hci_le_big_create_sync(hdev,
- hcon,
- &iso_pi(sk)->qos,
- iso_pi(sk)->sync_handle,
- iso_pi(sk)->bc_num_bis,
- iso_pi(sk)->bc_bis);
+ err = hci_conn_big_create_sync(hdev, hcon,
+ &iso_pi(sk)->qos,
+ iso_pi(sk)->sync_handle,
+ iso_pi(sk)->bc_num_bis,
+ iso_pi(sk)->bc_bis);
if (err) {
bt_dev_err(hdev, "hci_le_big_create_sync: %d",
err);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5ca7ac43c58d..73472756618a 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7415,6 +7415,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
return -ENOMEM;
/* Init rx_len */
conn->rx_len = len;
+
+ skb_set_delivery_time(conn->rx_skb, skb->tstamp,
+ skb->tstamp_type);
}
/* Copy as much as the rx_skb can hold */
diff --git a/net/can/gw.c b/net/can/gw.c
index ef93293c1fae..55eccb1c7620 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -130,7 +130,7 @@ struct cgw_job {
u32 handled_frames;
u32 dropped_frames;
u32 deleted_frames;
- struct cf_mod mod;
+ struct cf_mod __rcu *cf_mod;
union {
/* CAN frame data source */
struct net_device *dev;
@@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
struct cgw_job *gwj = (struct cgw_job *)data;
struct canfd_frame *cf;
struct sk_buff *nskb;
+ struct cf_mod *mod;
int modidx = 0;
/* process strictly Classic CAN or CAN FD frames */
@@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
* When there is at least one modification function activated,
* we need to copy the skb as we want to modify skb->data.
*/
- if (gwj->mod.modfunc[0])
+ mod = rcu_dereference(gwj->cf_mod);
+ if (mod->modfunc[0])
nskb = skb_copy(skb, GFP_ATOMIC);
else
nskb = skb_clone(skb, GFP_ATOMIC);
@@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
cf = (struct canfd_frame *)nskb->data;
/* perform preprocessed modification functions if there are any */
- while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
- (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
+ while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx])
+ (*mod->modfunc[modidx++])(cf, mod);
/* Has the CAN frame been modified? */
if (modidx) {
@@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
}
/* check for checksum updates */
- if (gwj->mod.csumfunc.crc8)
- (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+ if (mod->csumfunc.crc8)
+ (*mod->csumfunc.crc8)(cf, &mod->csum.crc8);
- if (gwj->mod.csumfunc.xor)
- (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+ if (mod->csumfunc.xor)
+ (*mod->csumfunc.xor)(cf, &mod->csum.xor);
}
/* clear the skb timestamp if not configured the other way */
@@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head)
{
struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
+ /* cgw_job::cf_mod is always accessed from the same cgw_job object within
+ * the same RCU read section. Once cgw_job is scheduled for removal,
+ * cf_mod can also be removed without mandating an additional grace period.
+ */
+ kfree(rcu_access_pointer(gwj->cf_mod));
kmem_cache_free(cgw_cache, gwj);
}
+/* Return cgw_job::cf_mod with RTNL protected section */
+static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj)
+{
+ return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked());
+}
+
static int cgw_notifier(struct notifier_block *nb,
unsigned long msg, void *ptr)
{
@@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
{
struct rtcanmsg *rtcan;
struct nlmsghdr *nlh;
+ struct cf_mod *mod;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
if (!nlh)
@@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
goto cancel;
}
+ mod = cgw_job_cf_mod(gwj);
if (gwj->flags & CGW_FLAGS_CAN_FD) {
struct cgw_fdframe_mod mb;
- if (gwj->mod.modtype.and) {
- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.and;
+ if (mod->modtype.and) {
+ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
+ mb.modtype = mod->modtype.and;
if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.or) {
- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.or;
+ if (mod->modtype.or) {
+ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
+ mb.modtype = mod->modtype.or;
if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.xor) {
- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.xor;
+ if (mod->modtype.xor) {
+ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
+ mb.modtype = mod->modtype.xor;
if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.set) {
- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.set;
+ if (mod->modtype.set) {
+ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
+ mb.modtype = mod->modtype.set;
if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
}
} else {
struct cgw_frame_mod mb;
- if (gwj->mod.modtype.and) {
- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.and;
+ if (mod->modtype.and) {
+ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
+ mb.modtype = mod->modtype.and;
if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.or) {
- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.or;
+ if (mod->modtype.or) {
+ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
+ mb.modtype = mod->modtype.or;
if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.xor) {
- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.xor;
+ if (mod->modtype.xor) {
+ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
+ mb.modtype = mod->modtype.xor;
if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.set) {
- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.set;
+ if (mod->modtype.set) {
+ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
+ mb.modtype = mod->modtype.set;
if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
}
}
- if (gwj->mod.uid) {
- if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
+ if (mod->uid) {
+ if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0)
goto cancel;
}
- if (gwj->mod.csumfunc.crc8) {
+ if (mod->csumfunc.crc8) {
if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
- &gwj->mod.csum.crc8) < 0)
+ &mod->csum.crc8) < 0)
goto cancel;
}
- if (gwj->mod.csumfunc.xor) {
+ if (mod->csumfunc.xor) {
if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
- &gwj->mod.csum.xor) < 0)
+ &mod->csum.xor) < 0)
goto cancel;
}
@@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct rtcanmsg *r;
struct cgw_job *gwj;
- struct cf_mod mod;
+ struct cf_mod *mod;
struct can_can_gw ccgw;
u8 limhops = 0;
int err = 0;
@@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
+ mod = kmalloc(sizeof(*mod), GFP_KERNEL);
+ if (!mod)
+ return -ENOMEM;
+
+ err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
if (err < 0)
- return err;
+ goto out_free_cf;
- if (mod.uid) {
+ if (mod->uid) {
ASSERT_RTNL();
/* check for updating an existing job with identical uid */
hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
- if (gwj->mod.uid != mod.uid)
+ struct cf_mod *old_cf;
+
+ old_cf = cgw_job_cf_mod(gwj);
+ if (old_cf->uid != mod->uid)
continue;
/* interfaces & filters must be identical */
- if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
- return -EINVAL;
+ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) {
+ err = -EINVAL;
+ goto out_free_cf;
+ }
- /* update modifications with disabled softirq & quit */
- local_bh_disable();
- memcpy(&gwj->mod, &mod, sizeof(mod));
- local_bh_enable();
+ rcu_assign_pointer(gwj->cf_mod, mod);
+ kfree_rcu_mightsleep(old_cf);
return 0;
}
}
/* ifindex == 0 is not allowed for job creation */
- if (!ccgw.src_idx || !ccgw.dst_idx)
- return -ENODEV;
+ if (!ccgw.src_idx || !ccgw.dst_idx) {
+ err = -ENODEV;
+ goto out_free_cf;
+ }
gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
- if (!gwj)
- return -ENOMEM;
+ if (!gwj) {
+ err = -ENOMEM;
+ goto out_free_cf;
+ }
gwj->handled_frames = 0;
gwj->dropped_frames = 0;
@@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
gwj->limit_hops = limhops;
/* insert already parsed information */
- memcpy(&gwj->mod, &mod, sizeof(mod));
+ RCU_INIT_POINTER(gwj->cf_mod, mod);
memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
err = -ENODEV;
@@ -1152,9 +1178,11 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!err)
hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
out:
- if (err)
+ if (err) {
kmem_cache_free(cgw_cache, gwj);
-
+out_free_cf:
+ kfree(mod);
+ }
return err;
}
@@ -1214,19 +1242,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
/* remove only the first matching entry */
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
+ struct cf_mod *cf_mod;
+
if (gwj->flags != r->flags)
continue;
if (gwj->limit_hops != limhops)
continue;
+ cf_mod = cgw_job_cf_mod(gwj);
/* we have a match when uid is enabled and identical */
- if (gwj->mod.uid || mod.uid) {
- if (gwj->mod.uid != mod.uid)
+ if (cf_mod->uid || mod.uid) {
+ if (cf_mod->uid != mod.uid)
continue;
} else {
/* no uid => check for identical modifications */
- if (memcmp(&gwj->mod, &mod, sizeof(mod)))
+ if (memcmp(cf_mod, &mod, sizeof(mod)))
continue;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 1be7cb73a602..11da1e272ec2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -9193,18 +9193,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
return 0;
}
-/**
- * dev_set_promiscuity - update promiscuity count on a device
- * @dev: device
- * @inc: modifier
- *
- * Add or remove promiscuity from a device. While the count in the device
- * remains above zero the interface remains promiscuous. Once it hits zero
- * the device reverts back to normal filtering operation. A negative inc
- * value is used to drop promiscuity on the device.
- * Return 0 if successful or a negative errno code on error.
- */
-int dev_set_promiscuity(struct net_device *dev, int inc)
+int netif_set_promiscuity(struct net_device *dev, int inc)
{
unsigned int old_flags = dev->flags;
int err;
@@ -9216,7 +9205,6 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
dev_set_rx_mode(dev);
return err;
}
-EXPORT_SYMBOL(dev_set_promiscuity);
int netif_set_allmulti(struct net_device *dev, int inc, bool notify)
{
@@ -11966,9 +11954,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
struct sk_buff *skb = NULL;
/* Shutdown queueing discipline. */
+ netdev_lock_ops(dev);
dev_shutdown(dev);
dev_tcx_uninstall(dev);
- netdev_lock_ops(dev);
dev_xdp_uninstall(dev);
dev_memory_provider_uninstall(dev);
netdev_unlock_ops(dev);
@@ -12161,7 +12149,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
synchronize_net();
/* Shutdown queueing discipline. */
+ netdev_lock_ops(dev);
dev_shutdown(dev);
+ netdev_unlock_ops(dev);
/* Notify protocols, that we are about to destroy
* this device. They should clean all the things.
diff --git a/net/core/dev_api.c b/net/core/dev_api.c
index 90898cd540ce..f9a160ab596f 100644
--- a/net/core/dev_api.c
+++ b/net/core/dev_api.c
@@ -268,6 +268,29 @@ void dev_disable_lro(struct net_device *dev)
EXPORT_SYMBOL(dev_disable_lro);
/**
+ * dev_set_promiscuity() - update promiscuity count on a device
+ * @dev: device
+ * @inc: modifier
+ *
+ * Add or remove promiscuity from a device. While the count in the device
+ * remains above zero the interface remains promiscuous. Once it hits zero
+ * the device reverts back to normal filtering operation. A negative inc
+ * value is used to drop promiscuity on the device.
+ * Return 0 if successful or a negative errno code on error.
+ */
+int dev_set_promiscuity(struct net_device *dev, int inc)
+{
+ int ret;
+
+ netdev_lock_ops(dev);
+ ret = netif_set_promiscuity(dev, inc);
+ netdev_unlock_ops(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(dev_set_promiscuity);
+
+/**
* dev_set_allmulti() - update allmulti count on a device
* @dev: device
* @inc: modifier
diff --git a/net/core/filter.c b/net/core/filter.c
index 79cab4d78dc3..577a4504e26f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2509,6 +2509,7 @@ int skb_do_redirect(struct sk_buff *skb)
goto out_drop;
skb->dev = dev;
dev_sw_netstats_rx_add(dev, skb->len);
+ skb_scrub_packet(skb, false);
return -EAGAIN;
}
return flags & BPF_F_NEIGH ?
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 230743bdbb14..dae9f0d432fb 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -708,25 +708,66 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
return 0;
}
+/**
+ * netdev_stat_queue_sum() - add up queue stats from range of queues
+ * @netdev: net_device
+ * @rx_start: index of the first Rx queue to query
+ * @rx_end: index after the last Rx queue (first *not* to query)
+ * @rx_sum: output Rx stats, should be already initialized
+ * @tx_start: index of the first Tx queue to query
+ * @tx_end: index after the last Tx queue (first *not* to query)
+ * @tx_sum: output Tx stats, should be already initialized
+ *
+ * Add stats from [start, end) range of queue IDs to *x_sum structs.
+ * The sum structs must be already initialized. Usually this
+ * helper is invoked from the .get_base_stats callbacks of drivers
+ * to account for stats of disabled queues. In that case the ranges
+ * are usually [netdev->real_num_*x_queues, netdev->num_*x_queues).
+ */
+void netdev_stat_queue_sum(struct net_device *netdev,
+ int rx_start, int rx_end,
+ struct netdev_queue_stats_rx *rx_sum,
+ int tx_start, int tx_end,
+ struct netdev_queue_stats_tx *tx_sum)
+{
+ const struct netdev_stat_ops *ops;
+ struct netdev_queue_stats_rx rx;
+ struct netdev_queue_stats_tx tx;
+ int i;
+
+ ops = netdev->stat_ops;
+
+ for (i = rx_start; i < rx_end; i++) {
+ memset(&rx, 0xff, sizeof(rx));
+ if (ops->get_queue_stats_rx)
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ netdev_nl_stats_add(rx_sum, &rx, sizeof(rx));
+ }
+ for (i = tx_start; i < tx_end; i++) {
+ memset(&tx, 0xff, sizeof(tx));
+ if (ops->get_queue_stats_tx)
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ netdev_nl_stats_add(tx_sum, &tx, sizeof(tx));
+ }
+}
+EXPORT_SYMBOL(netdev_stat_queue_sum);
+
static int
netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
const struct genl_info *info)
{
- struct netdev_queue_stats_rx rx_sum, rx;
- struct netdev_queue_stats_tx tx_sum, tx;
- const struct netdev_stat_ops *ops;
+ struct netdev_queue_stats_rx rx_sum;
+ struct netdev_queue_stats_tx tx_sum;
void *hdr;
- int i;
- ops = netdev->stat_ops;
/* Netdev can't guarantee any complete counters */
- if (!ops->get_base_stats)
+ if (!netdev->stat_ops->get_base_stats)
return 0;
memset(&rx_sum, 0xff, sizeof(rx_sum));
memset(&tx_sum, 0xff, sizeof(tx_sum));
- ops->get_base_stats(netdev, &rx_sum, &tx_sum);
+ netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum);
/* The op was there, but nothing reported, don't bother */
if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
@@ -739,18 +780,8 @@ netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
goto nla_put_failure;
- for (i = 0; i < netdev->real_num_rx_queues; i++) {
- memset(&rx, 0xff, sizeof(rx));
- if (ops->get_queue_stats_rx)
- ops->get_queue_stats_rx(netdev, i, &rx);
- netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
- }
- for (i = 0; i < netdev->real_num_tx_queues; i++) {
- memset(&tx, 0xff, sizeof(tx));
- if (ops->get_queue_stats_tx)
- ops->get_queue_stats_tx(netdev, i, &tx);
- netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
- }
+ netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum,
+ 0, netdev->real_num_tx_queues, &tx_sum);
if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
netdev_nl_stats_write_tx(rsp, &tx_sum))
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 934f777f29d3..d293087b426d 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -439,7 +439,7 @@ static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
iif, sdif);
NAPI_GRO_CB(skb)->is_flist = !sk;
if (sk)
- sock_put(sk);
+ sock_gen_put(sk);
}
INDIRECT_CALLABLE_SCOPE
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 2c0725583be3..9a8142ccbabe 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -247,6 +247,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
return segs;
}
+static void __udpv6_gso_segment_csum(struct sk_buff *seg,
+ struct in6_addr *oldip,
+ const struct in6_addr *newip,
+ __be16 *oldport, __be16 newport)
+{
+ struct udphdr *uh = udp_hdr(seg);
+
+ if (ipv6_addr_equal(oldip, newip) && *oldport == newport)
+ return;
+
+ if (uh->check) {
+ inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32,
+ newip->s6_addr32, true);
+
+ inet_proto_csum_replace2(&uh->check, seg, *oldport, newport,
+ false);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+
+ *oldip = *newip;
+ *oldport = newport;
+}
+
+static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs)
+{
+ const struct ipv6hdr *iph;
+ const struct udphdr *uh;
+ struct ipv6hdr *iph2;
+ struct sk_buff *seg;
+ struct udphdr *uh2;
+
+ seg = segs;
+ uh = udp_hdr(seg);
+ iph = ipv6_hdr(seg);
+ uh2 = udp_hdr(seg->next);
+ iph2 = ipv6_hdr(seg->next);
+
+ if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) &&
+ ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
+ ipv6_addr_equal(&iph->daddr, &iph2->daddr))
+ return segs;
+
+ while ((seg = seg->next)) {
+ uh2 = udp_hdr(seg);
+ iph2 = ipv6_hdr(seg);
+
+ __udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
+ &uh2->source, uh->source);
+ __udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
+ &uh2->dest, uh->dest);
+ }
+
+ return segs;
+}
+
static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6)
@@ -259,7 +315,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
- return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
+ if (is_ipv6)
+ return __udpv6_gso_segment_list_csum(skb);
+ else
+ return __udpv4_gso_segment_list_csum(skb);
}
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9ba83f0c9928..c6b22170dc49 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3214,16 +3214,13 @@ static void add_v4_addrs(struct inet6_dev *idev)
struct in6_addr addr;
struct net_device *dev;
struct net *net = dev_net(idev->dev);
- int scope, plen, offset = 0;
+ int scope, plen;
u32 pflags = 0;
ASSERT_RTNL();
memset(&addr, 0, sizeof(struct in6_addr));
- /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
- if (idev->dev->addr_len == sizeof(struct in6_addr))
- offset = sizeof(struct in6_addr) - 4;
- memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
+ memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
scope = IPV6_ADDR_COMPATv4;
@@ -3534,7 +3531,13 @@ static void addrconf_gre_config(struct net_device *dev)
return;
}
- if (dev->type == ARPHRD_ETHER) {
+ /* Generate the IPv6 link-local address using addrconf_addr_gen(),
+ * unless we have an IPv4 GRE device not bound to an IP address and
+ * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
+ * case). Such devices fall back to add_v4_addrs() instead.
+ */
+ if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
+ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
addrconf_addr_gen(idev, true);
return;
}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index d9b11fe41bf0..a8a04f441e78 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -42,7 +42,7 @@ static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
iif, sdif);
NAPI_GRO_CB(skb)->is_flist = !sk;
if (sk)
- sock_put(sk);
+ sock_gen_put(sk);
#endif /* IS_ENABLED(CONFIG_IPV6) */
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 5d1f2d6d09ad..35eaf0812c5b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -7675,6 +7675,7 @@ ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res);
int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+ u16 status_code;
skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
if (!skb)
@@ -7697,19 +7698,18 @@ ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
WARN_ON(1);
fallthrough;
case NEG_TTLM_RES_REJECT:
- mgmt->u.action.u.ttlm_res.status_code =
- WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
+ status_code = WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
break;
case NEG_TTLM_RES_ACCEPT:
- mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS;
+ status_code = WLAN_STATUS_SUCCESS;
break;
case NEG_TTLM_RES_SUGGEST_PREFERRED:
- mgmt->u.action.u.ttlm_res.status_code =
- WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
+ status_code = WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
break;
}
+ mgmt->u.action.u.ttlm_res.status_code = cpu_to_le16(status_code);
ieee80211_tx_skb(sdata, skb);
}
@@ -7875,7 +7875,7 @@ void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
* This can be better implemented in the future, to handle request
* rejections.
*/
- if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS)
+ if (le16_to_cpu(mgmt->u.action.u.ttlm_res.status_code) != WLAN_STATUS_SUCCESS)
__ieee80211_disconnect(sdata);
}
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index b17b3cc7fb90..a362254b310c 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -1085,7 +1085,13 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp);
- if (status->free_list)
+ /*
+ * This is a bit racy but we can avoid a lot of work
+ * with this test...
+ */
+ if (local->tx_mntrs)
+ ieee80211_tx_monitor(local, skb, retry_count, status);
+ else if (status->free_list)
list_add_tail(&skb->list, status->free_list);
else
dev_kfree_skb(skb);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index cf3ce72c3de6..5251524b96af 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -64,7 +64,7 @@ struct hbucket {
#define ahash_sizeof_regions(htable_bits) \
(ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
#define ahash_region(n, htable_bits) \
- ((n) % ahash_numof_locks(htable_bits))
+ ((n) / jhash_size(HTABLE_REGION_BITS))
#define ahash_bucket_start(h, htable_bits) \
((htable_bits) < HTABLE_REGION_BITS ? 0 \
: (h) * jhash_size(HTABLE_REGION_BITS))
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 3313bceb6cc9..014f07740369 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -119,13 +119,12 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
return false;
}
-/* Get route to daddr, update *saddr, optionally bind route to saddr */
+/* Get route to daddr, optionally bind route to saddr */
static struct rtable *do_output_route4(struct net *net, __be32 daddr,
- int rt_mode, __be32 *saddr)
+ int rt_mode, __be32 *ret_saddr)
{
struct flowi4 fl4;
struct rtable *rt;
- bool loop = false;
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
@@ -135,23 +134,17 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
retry:
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt)) {
- /* Invalid saddr ? */
- if (PTR_ERR(rt) == -EINVAL && *saddr &&
- rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
- *saddr = 0;
- flowi4_update_output(&fl4, 0, daddr, 0);
- goto retry;
- }
IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
return NULL;
- } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
+ }
+ if (rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
ip_rt_put(rt);
- *saddr = fl4.saddr;
flowi4_update_output(&fl4, 0, daddr, fl4.saddr);
- loop = true;
+ rt_mode = 0;
goto retry;
}
- *saddr = fl4.saddr;
+ if (ret_saddr)
+ *ret_saddr = fl4.saddr;
return rt;
}
@@ -344,19 +337,15 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
if (ret_saddr)
*ret_saddr = dest_dst->dst_saddr.ip;
} else {
- __be32 saddr = htonl(INADDR_ANY);
-
noref = 0;
/* For such unconfigured boxes avoid many route lookups
* for performance reasons because we do not remember saddr
*/
rt_mode &= ~IP_VS_RT_MODE_CONNECT;
- rt = do_output_route4(net, daddr, rt_mode, &saddr);
+ rt = do_output_route4(net, daddr, rt_mode, ret_saddr);
if (!rt)
goto err_unreach;
- if (ret_saddr)
- *ret_saddr = saddr;
}
local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 61fea7baae5d..2f22ca59586f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -975,8 +975,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
upcall.cmd = OVS_PACKET_CMD_ACTION;
upcall.mru = OVS_CB(skb)->mru;
- for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
- a = nla_next(a, &rem)) {
+ nla_for_each_nested(a, attr, rem) {
switch (nla_type(a)) {
case OVS_USERSPACE_ATTR_USERDATA:
upcall.userdata = a;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index e0a81d313aa7..9b6d79bd8737 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -35,6 +35,11 @@ struct drr_sched {
struct Qdisc_class_hash clhash;
};
+static bool cl_is_active(struct drr_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
{
struct drr_sched *q = qdisc_priv(sch);
@@ -337,7 +342,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
int err = 0;
- bool first;
cl = drr_classify(skb, sch, &err);
if (cl == NULL) {
@@ -347,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -357,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- if (first) {
+ if (!cl_is_active(cl)) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index c3bdeb14185b..2c069f0181c6 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
[TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
};
+static bool cl_is_active(struct ets_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
unsigned int *quantum,
struct netlink_ext_ack *extack)
@@ -416,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct ets_sched *q = qdisc_priv(sch);
struct ets_class *cl;
int err = 0;
- bool first;
cl = ets_classify(skb, sch, &err);
if (!cl) {
@@ -426,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -436,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- if (first && !ets_class_is_strict(q, cl)) {
+ if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6c8ef826cec0..cb8c525ea20e 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1569,7 +1569,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
return err;
}
- if (first) {
+ if (first && !cl->cl_nactive) {
if (cl->cl_flags & HFSC_RSC)
init_ed(cl, len);
if (cl->cl_flags & HFSC_FSC)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 4b9a639b642e..14bf71f57057 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -348,7 +348,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
*/
static inline void htb_next_rb_node(struct rb_node **n)
{
- *n = rb_next(*n);
+ if (*n)
+ *n = rb_next(*n);
}
/**
@@ -609,8 +610,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
*/
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
- WARN_ON(!cl->prio_activity);
-
+ if (!cl->prio_activity)
+ return;
htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
}
@@ -1485,8 +1486,6 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
- if (!cl->prio_activity)
- return;
htb_deactivate(qdisc_priv(sch), cl);
}
@@ -1740,8 +1739,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
if (cl->parent)
cl->parent->children--;
- if (cl->prio_activity)
- htb_deactivate(q, cl);
+ htb_deactivate(q, cl);
if (cl->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&cl->pq_node,
@@ -1949,8 +1947,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* turn parent into inner node */
qdisc_purge_queue(parent->leaf.q);
parent_qdisc = parent->leaf.q;
- if (parent->prio_activity)
- htb_deactivate(q, parent);
+ htb_deactivate(q, parent);
/* remove from evt list because of level change */
if (parent->cmode != HTB_CAN_SEND) {
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 687a932eb9b2..bf1282cb22eb 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -202,6 +202,11 @@ struct qfq_sched {
*/
enum update_reason {enqueue, requeue};
+static bool cl_is_active(struct qfq_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
{
struct qfq_sched *q = qdisc_priv(sch);
@@ -1215,7 +1220,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct qfq_class *cl;
struct qfq_aggregate *agg;
int err = 0;
- bool first;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
@@ -1237,7 +1241,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
@@ -1253,8 +1256,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
++sch->q.qlen;
agg = cl->agg;
- /* if the queue was not empty, then done here */
- if (!first) {
+ /* if the class is active, then done here */
+ if (cl_is_active(cl)) {
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
list_first_entry(&agg->active, struct qfq_class, alist)
== cl && cl->deficit < len)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 9865f305275d..ddd3a97f6609 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2681,7 +2681,7 @@ cfg80211_defrag_mle(const struct element *mle, const u8 *ie, size_t ielen,
/* Required length for first defragmentation */
buf_len = mle->datalen - 1;
for_each_element(elem, mle->data + mle->datalen,
- ielen - sizeof(*mle) + mle->datalen) {
+ ie + ielen - mle->data - mle->datalen) {
if (elem->id != WLAN_EID_FRAGMENT)
break;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 5696af45bcf7..4abc81f33d3e 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -338,13 +338,14 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
u32 len = xdp_get_buff_len(xdp);
int err;
- spin_lock_bh(&xs->rx_lock);
err = xsk_rcv_check(xs, xdp, len);
if (!err) {
+ spin_lock_bh(&xs->pool->rx_lock);
err = __xsk_rcv(xs, xdp, len);
xsk_flush(xs);
+ spin_unlock_bh(&xs->pool->rx_lock);
}
- spin_unlock_bh(&xs->rx_lock);
+
return err;
}
@@ -1734,7 +1735,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
xs = xdp_sk(sk);
xs->state = XSK_READY;
mutex_init(&xs->mutex);
- spin_lock_init(&xs->rx_lock);
INIT_LIST_HEAD(&xs->map_list);
spin_lock_init(&xs->map_list_lock);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 25a76c5ce0f1..c5181a9044ad 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -89,6 +89,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
pool->addrs = umem->addrs;
pool->tx_metadata_len = umem->tx_metadata_len;
pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM;
+ spin_lock_init(&pool->rx_lock);
INIT_LIST_HEAD(&pool->free_list);
INIT_LIST_HEAD(&pool->xskb_list);
INIT_LIST_HEAD(&pool->xsk_tx_list);
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
index 014af0d1fc70..a08eb5518cac 100644
--- a/rust/bindings/lib.rs
+++ b/rust/bindings/lib.rs
@@ -26,6 +26,7 @@
#[allow(dead_code)]
#[allow(clippy::undocumented_unsafe_blocks)]
+#[cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))]
mod bindings_raw {
// Manual definition for blocklisted types.
type __kernel_size_t = usize;
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index ae9d072741ce..87a71fd40c3c 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -2,6 +2,9 @@
//! Implementation of [`Vec`].
+// May not be needed in Rust 1.87.0 (pending beta backport).
+#![allow(clippy::ptr_eq)]
+
use super::{
allocator::{KVmalloc, Kmalloc, Vmalloc},
layout::ArrayLayout,
diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
index a335c3b1ff5e..2054682c5724 100644
--- a/rust/kernel/list.rs
+++ b/rust/kernel/list.rs
@@ -4,6 +4,9 @@
//! A linked list implementation.
+// May not be needed in Rust 1.87.0 (pending beta backport).
+#![allow(clippy::ptr_eq)]
+
use crate::sync::ArcBorrow;
use crate::types::Opaque;
use core::iter::{DoubleEndedIterator, FusedIterator};
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index 878111cb77bc..fb61ce81ea28 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -73,7 +73,7 @@ impl fmt::Display for BStr {
b'\r' => f.write_str("\\r")?,
// Printable characters.
0x20..=0x7e => f.write_char(b as char)?,
- _ => write!(f, "\\x{:02x}", b)?,
+ _ => write!(f, "\\x{b:02x}")?,
}
}
Ok(())
@@ -109,7 +109,7 @@ impl fmt::Debug for BStr {
b'\\' => f.write_str("\\\\")?,
// Printable characters.
0x20..=0x7e => f.write_char(b as char)?,
- _ => write!(f, "\\x{:02x}", b)?,
+ _ => write!(f, "\\x{b:02x}")?,
}
}
f.write_char('"')
@@ -447,7 +447,7 @@ impl fmt::Display for CStr {
// Printable character.
f.write_char(c as char)?;
} else {
- write!(f, "\\x{:02x}", c)?;
+ write!(f, "\\x{c:02x}")?;
}
}
Ok(())
@@ -479,7 +479,7 @@ impl fmt::Debug for CStr {
// Printable characters.
b'\"' => f.write_str("\\\"")?,
0x20..=0x7e => f.write_char(c as char)?,
- _ => write!(f, "\\x{:02x}", c)?,
+ _ => write!(f, "\\x{c:02x}")?,
}
}
f.write_str("\"")
@@ -641,13 +641,13 @@ mod tests {
#[test]
fn test_cstr_display() {
let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap();
- assert_eq!(format!("{}", hello_world), "hello, world!");
+ assert_eq!(format!("{hello_world}"), "hello, world!");
let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap();
- assert_eq!(format!("{}", non_printables), "\\x01\\x09\\x0a");
+ assert_eq!(format!("{non_printables}"), "\\x01\\x09\\x0a");
let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap();
- assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu");
+ assert_eq!(format!("{non_ascii}"), "d\\xe9j\\xe0 vu");
let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap();
- assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80");
+ assert_eq!(format!("{good_bytes}"), "\\xf0\\x9f\\xa6\\x80");
}
#[test]
@@ -658,47 +658,47 @@ mod tests {
bytes[i as usize] = i.wrapping_add(1);
}
let cstr = CStr::from_bytes_with_nul(&bytes).unwrap();
- assert_eq!(format!("{}", cstr), ALL_ASCII_CHARS);
+ assert_eq!(format!("{cstr}"), ALL_ASCII_CHARS);
}
#[test]
fn test_cstr_debug() {
let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap();
- assert_eq!(format!("{:?}", hello_world), "\"hello, world!\"");
+ assert_eq!(format!("{hello_world:?}"), "\"hello, world!\"");
let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap();
- assert_eq!(format!("{:?}", non_printables), "\"\\x01\\x09\\x0a\"");
+ assert_eq!(format!("{non_printables:?}"), "\"\\x01\\x09\\x0a\"");
let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap();
- assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\"");
+ assert_eq!(format!("{non_ascii:?}"), "\"d\\xe9j\\xe0 vu\"");
let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap();
- assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\"");
+ assert_eq!(format!("{good_bytes:?}"), "\"\\xf0\\x9f\\xa6\\x80\"");
}
#[test]
fn test_bstr_display() {
let hello_world = BStr::from_bytes(b"hello, world!");
- assert_eq!(format!("{}", hello_world), "hello, world!");
+ assert_eq!(format!("{hello_world}"), "hello, world!");
let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_");
- assert_eq!(format!("{}", escapes), "_\\t_\\n_\\r_\\_'_\"_");
+ assert_eq!(format!("{escapes}"), "_\\t_\\n_\\r_\\_'_\"_");
let others = BStr::from_bytes(b"\x01");
- assert_eq!(format!("{}", others), "\\x01");
+ assert_eq!(format!("{others}"), "\\x01");
let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu");
- assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu");
+ assert_eq!(format!("{non_ascii}"), "d\\xe9j\\xe0 vu");
let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80");
- assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80");
+ assert_eq!(format!("{good_bytes}"), "\\xf0\\x9f\\xa6\\x80");
}
#[test]
fn test_bstr_debug() {
let hello_world = BStr::from_bytes(b"hello, world!");
- assert_eq!(format!("{:?}", hello_world), "\"hello, world!\"");
+ assert_eq!(format!("{hello_world:?}"), "\"hello, world!\"");
let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_");
- assert_eq!(format!("{:?}", escapes), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\"");
+ assert_eq!(format!("{escapes:?}"), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\"");
let others = BStr::from_bytes(b"\x01");
- assert_eq!(format!("{:?}", others), "\"\\x01\"");
+ assert_eq!(format!("{others:?}"), "\"\\x01\"");
let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu");
- assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\"");
+ assert_eq!(format!("{non_ascii:?}"), "\"d\\xe9j\\xe0 vu\"");
let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80");
- assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\"");
+ assert_eq!(format!("{good_bytes:?}"), "\"\\xf0\\x9f\\xa6\\x80\"");
}
}
diff --git a/rust/macros/kunit.rs b/rust/macros/kunit.rs
index 4f553ecf40c0..99ccac82edde 100644
--- a/rust/macros/kunit.rs
+++ b/rust/macros/kunit.rs
@@ -15,10 +15,7 @@ pub(crate) fn kunit_tests(attr: TokenStream, ts: TokenStream) -> TokenStream {
}
if attr.len() > 255 {
- panic!(
- "The test suite name `{}` exceeds the maximum length of 255 bytes",
- attr
- )
+ panic!("The test suite name `{attr}` exceeds the maximum length of 255 bytes")
}
let mut tokens: Vec<_> = ts.into_iter().collect();
@@ -102,16 +99,14 @@ pub(crate) fn kunit_tests(attr: TokenStream, ts: TokenStream) -> TokenStream {
let mut kunit_macros = "".to_owned();
let mut test_cases = "".to_owned();
for test in &tests {
- let kunit_wrapper_fn_name = format!("kunit_rust_wrapper_{}", test);
+ let kunit_wrapper_fn_name = format!("kunit_rust_wrapper_{test}");
let kunit_wrapper = format!(
- "unsafe extern \"C\" fn {}(_test: *mut kernel::bindings::kunit) {{ {}(); }}",
- kunit_wrapper_fn_name, test
+ "unsafe extern \"C\" fn {kunit_wrapper_fn_name}(_test: *mut kernel::bindings::kunit) {{ {test}(); }}"
);
writeln!(kunit_macros, "{kunit_wrapper}").unwrap();
writeln!(
test_cases,
- " kernel::kunit::kunit_case(kernel::c_str!(\"{}\"), {}),",
- test, kunit_wrapper_fn_name
+ " kernel::kunit::kunit_case(kernel::c_str!(\"{test}\"), {kunit_wrapper_fn_name}),"
)
.unwrap();
}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index a9418fbc9b44..2f66107847f7 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -48,7 +48,7 @@ impl<'a> ModInfoBuilder<'a> {
)
} else {
// Loadable modules' modinfo strings go as-is.
- format!("{field}={content}\0", field = field, content = content)
+ format!("{field}={content}\0")
};
write!(
@@ -126,10 +126,7 @@ impl ModuleInfo {
};
if seen_keys.contains(&key) {
- panic!(
- "Duplicated key \"{}\". Keys can only be specified once.",
- key
- );
+ panic!("Duplicated key \"{key}\". Keys can only be specified once.");
}
assert_eq!(expect_punct(it), ':');
@@ -143,10 +140,7 @@ impl ModuleInfo {
"license" => info.license = expect_string_ascii(it),
"alias" => info.alias = Some(expect_string_array(it)),
"firmware" => info.firmware = Some(expect_string_array(it)),
- _ => panic!(
- "Unknown key \"{}\". Valid keys are: {:?}.",
- key, EXPECTED_KEYS
- ),
+ _ => panic!("Unknown key \"{key}\". Valid keys are: {EXPECTED_KEYS:?}."),
}
assert_eq!(expect_punct(it), ',');
@@ -158,7 +152,7 @@ impl ModuleInfo {
for key in REQUIRED_KEYS {
if !seen_keys.iter().any(|e| e == key) {
- panic!("Missing required key \"{}\".", key);
+ panic!("Missing required key \"{key}\".");
}
}
@@ -170,10 +164,7 @@ impl ModuleInfo {
}
if seen_keys != ordered_keys {
- panic!(
- "Keys are not ordered as expected. Order them like: {:?}.",
- ordered_keys
- );
+ panic!("Keys are not ordered as expected. Order them like: {ordered_keys:?}.");
}
info
diff --git a/rust/macros/paste.rs b/rust/macros/paste.rs
index 6529a387673f..cce712d19855 100644
--- a/rust/macros/paste.rs
+++ b/rust/macros/paste.rs
@@ -50,7 +50,7 @@ fn concat_helper(tokens: &[TokenTree]) -> Vec<(String, Span)> {
let tokens = group.stream().into_iter().collect::<Vec<TokenTree>>();
segments.append(&mut concat_helper(tokens.as_slice()));
}
- token => panic!("unexpected token in paste segments: {:?}", token),
+ token => panic!("unexpected token in paste segments: {token:?}"),
};
}
diff --git a/rust/pin-init/internal/src/pinned_drop.rs b/rust/pin-init/internal/src/pinned_drop.rs
index c824dd8b436d..c4ca7a70b726 100644
--- a/rust/pin-init/internal/src/pinned_drop.rs
+++ b/rust/pin-init/internal/src/pinned_drop.rs
@@ -28,8 +28,7 @@ pub(crate) fn pinned_drop(_args: TokenStream, input: TokenStream) -> TokenStream
// Found the end of the generics, this should be `PinnedDrop`.
assert!(
matches!(tt, TokenTree::Ident(i) if i.to_string() == "PinnedDrop"),
- "expected 'PinnedDrop', found: '{:?}'",
- tt
+ "expected 'PinnedDrop', found: '{tt:?}'"
);
pinned_drop_idx = Some(i);
break;
diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
index 13495910271f..c98d7a8cde77 100644
--- a/rust/uapi/lib.rs
+++ b/rust/uapi/lib.rs
@@ -24,6 +24,7 @@
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
+#![cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))]
// Manual definition of blocklisted types.
type __kernel_size_t = usize;
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 2d6e59561c9d..d88acdf40855 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -8,6 +8,7 @@
# Default set of warnings, always enabled
KBUILD_CFLAGS += -Wall
+KBUILD_CFLAGS += -Wextra
KBUILD_CFLAGS += -Wundef
KBUILD_CFLAGS += -Werror=implicit-function-declaration
KBUILD_CFLAGS += -Werror=implicit-int
@@ -56,6 +57,13 @@ KBUILD_CFLAGS += -Wno-pointer-sign
# globally built with -Wcast-function-type.
KBUILD_CFLAGS += $(call cc-option, -Wcast-function-type)
+# Currently, disable -Wstringop-overflow for GCC 11, globally.
+KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
+KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
+
+# Currently, disable -Wunterminated-string-initialization as broken
+KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
+
# The allocators already balk at large sizes, so silence the compiler
# warnings for bounds checks involving those possible values. While
# -Wno-alloc-size-larger-than would normally be used here, earlier versions
@@ -82,7 +90,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# Warn if there is an enum types mismatch
KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion)
-KBUILD_CFLAGS += -Wextra
KBUILD_CFLAGS += -Wunused
#
diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
index b0a6cd5b818c..85d60d986401 100644
--- a/scripts/Makefile.vmlinux
+++ b/scripts/Makefile.vmlinux
@@ -13,7 +13,7 @@ ifdef CONFIG_ARCH_VMLINUX_NEEDS_RELOCS
vmlinux-final := vmlinux.unstripped
quiet_cmd_strip_relocs = RSTRIP $@
- cmd_strip_relocs = $(OBJCOPY) --remove-section='.rel*' $< $@
+ cmd_strip_relocs = $(OBJCOPY) --remove-section='.rel*' --remove-section=!'.rel*.dyn' $< $@
vmlinux: $(vmlinux-final) FORCE
$(call if_changed,strip_relocs)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 877137cb09ac..8a2b09e4a7d5 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -441,6 +441,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
fallthrough;
case 0x10ec0215:
+ case 0x10ec0236:
+ case 0x10ec0245:
+ case 0x10ec0256:
+ case 0x10ec0257:
case 0x10ec0285:
case 0x10ec0289:
alc_update_coef_idx(codec, 0x36, 1<<13, 0);
@@ -448,12 +452,8 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0230:
case 0x10ec0233:
case 0x10ec0235:
- case 0x10ec0236:
- case 0x10ec0245:
case 0x10ec0255:
- case 0x10ec0256:
case 0x19e58326:
- case 0x10ec0257:
case 0x10ec0282:
case 0x10ec0283:
case 0x10ec0286:
@@ -6742,6 +6742,25 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
codec->power_save_node = 0;
}
+/* avoid DAC 0x06 for speaker switch 0x17; it has no volume control */
+static void alc274_fixup_hp_aio_bind_dacs(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */
+ /* The speaker is routed to the Node 0x06 by a mistake, thus the
+ * speaker's volume can't be adjusted since the node doesn't have
+ * Amp-out capability. Assure the speaker and lineout pin to be
+ * coupled with DAC NID 0x02.
+ */
+ static const hda_nid_t preferred_pairs[] = {
+ 0x16, 0x02, 0x17, 0x02, 0x21, 0x03, 0
+ };
+ struct alc_spec *spec = codec->spec;
+
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ spec->gen.preferred_dacs = preferred_pairs;
+}
+
/* avoid DAC 0x06 for bass speaker 0x17; it has no volume control */
static void alc289_fixup_asus_ga401(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
@@ -6963,6 +6982,41 @@ static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec,
}
}
+/* GPIO1 = amplifier on/off */
+static void alc285_fixup_hp_spectre_x360_df1(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const hda_nid_t conn[] = { 0x02 };
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x14, 0x90170110 }, /* front/high speakers */
+ { 0x17, 0x90170130 }, /* back/bass speakers */
+ { }
+ };
+
+ // enable mute led
+ alc285_fixup_hp_mute_led_coefbit(codec, fix, action);
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ /* needed for amp of back speakers */
+ spec->gpio_mask |= 0x01;
+ spec->gpio_dir |= 0x01;
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ /* share DAC to have unified volume control */
+ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn);
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ /* need to toggle GPIO to enable the amp of back speakers */
+ alc_update_gpio_data(codec, 0x01, true);
+ msleep(100);
+ alc_update_gpio_data(codec, 0x01, false);
+ break;
+ }
+}
+
static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -7761,6 +7815,7 @@ enum {
ALC280_FIXUP_HP_9480M,
ALC245_FIXUP_HP_X360_AMP,
ALC285_FIXUP_HP_SPECTRE_X360_EB1,
+ ALC285_FIXUP_HP_SPECTRE_X360_DF1,
ALC285_FIXUP_HP_ENVY_X360,
ALC288_FIXUP_DELL_HEADSET_MODE,
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -7970,6 +8025,7 @@ enum {
ALC294_FIXUP_BASS_SPEAKER_15,
ALC283_FIXUP_DELL_HP_RESUME,
ALC294_FIXUP_ASUS_CS35L41_SPI_2,
+ ALC274_FIXUP_HP_AIO_BIND_DACS,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9837,6 +9893,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_hp_spectre_x360_eb1
},
+ [ALC285_FIXUP_HP_SPECTRE_X360_DF1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_hp_spectre_x360_df1
+ },
[ALC285_FIXUP_HP_ENVY_X360] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_hp_envy_x360,
@@ -10340,6 +10400,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC,
},
+ [ALC274_FIXUP_HP_AIO_BIND_DACS] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc274_fixup_hp_aio_bind_dacs,
+ },
};
static const struct hda_quirk alc269_fixup_tbl[] = {
@@ -10564,6 +10628,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ SND_PCI_QUIRK(0x103c, 0x863e, "HP Spectre x360 15-df1xxx", ALC285_FIXUP_HP_SPECTRE_X360_DF1),
SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
@@ -10768,12 +10833,13 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8caf, "HP Elite mt645 G8 Mobile Thin Client", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8cbd, "HP Pavilion Aero Laptop 13-bg0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
- SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x103c, 0x8cde, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX),
+ SND_PCI_QUIRK(0x103c, 0x8cde, "HP OmniBook Ultra Flip Laptop 14t", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX),
SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8d18, "HP EliteStudio 8 AIO", ALC274_FIXUP_HP_AIO_BIND_DACS),
SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8d85, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8d86, "HP Elite X360 14 G12", ALC285_FIXUP_HP_GPIO_LED),
@@ -10793,11 +10859,15 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8da1, "HP 16 Clipper OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8da7, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8da8, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8dd4, "HP EliteStudio 8 AIO", ALC274_FIXUP_HP_AIO_BIND_DACS),
SND_PCI_QUIRK(0x103c, 0x8de8, "HP Gemtree", ALC245_FIXUP_TAS2781_SPI_2),
SND_PCI_QUIRK(0x103c, 0x8de9, "HP Gemtree", ALC245_FIXUP_TAS2781_SPI_2),
SND_PCI_QUIRK(0x103c, 0x8dec, "HP EliteBook 640 G12", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ded, "HP EliteBook 640 G12", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8dee, "HP EliteBook 660 G12", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8def, "HP EliteBook 660 G12", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8df0, "HP EliteBook 630 G12", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8df1, "HP EliteBook 630 G12", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8dfc, "HP EliteBook 645 G12", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8dfe, "HP EliteBook 665 G12", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8e11, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10843,10 +10913,10 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
- SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
@@ -10900,7 +10970,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
- SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1ccf, "ASUS G814JU/JV/JI", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1cdf, "ASUS G814JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1cef, "ASUS G834JY/JZ/JI/JG", ALC285_FIXUP_ASUS_HEADSET_MIC),
@@ -11494,6 +11564,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
{.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
+ {.id = ALC285_FIXUP_HP_SPECTRE_X360_DF1, .name = "alc285-hp-spectre-x360-df1"},
{.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"},
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
{.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"},
diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
index a38409dd1d34..70fa54d568ef 100644
--- a/sound/soc/amd/acp/acp-i2s.c
+++ b/sound/soc/amd/acp/acp-i2s.c
@@ -97,7 +97,7 @@ static int acp_i2s_set_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, u32 rx_mas
struct acp_stream *stream;
int slot_len, no_of_slots;
- chip = dev_get_platdata(dev);
+ chip = dev_get_drvdata(dev->parent);
switch (slot_width) {
case SLOT_WIDTH_8:
slot_len = 8;
diff --git a/sound/soc/amd/acp/acp-legacy-common.c b/sound/soc/amd/acp/acp-legacy-common.c
index b4d68484e06d..ba8db0851daa 100644
--- a/sound/soc/amd/acp/acp-legacy-common.c
+++ b/sound/soc/amd/acp/acp-legacy-common.c
@@ -450,7 +450,7 @@ int acp_machine_select(struct acp_chip_info *chip)
struct snd_soc_acpi_mach *mach;
int size, platform;
- if (chip->flag == FLAG_AMD_LEGACY_ONLY_DMIC) {
+ if (chip->flag == FLAG_AMD_LEGACY_ONLY_DMIC && chip->is_pdm_dev) {
platform = chip->acp_rev;
chip->mach_dev = platform_device_register_data(chip->dev, "acp-pdm-mach",
PLATFORM_DEVID_NONE, &platform,
diff --git a/sound/soc/amd/acp/acp-rembrandt.c b/sound/soc/amd/acp/acp-rembrandt.c
index c30a7b59fbff..aeffd24710e7 100644
--- a/sound/soc/amd/acp/acp-rembrandt.c
+++ b/sound/soc/amd/acp/acp-rembrandt.c
@@ -199,7 +199,7 @@ static void rembrandt_audio_remove(struct platform_device *pdev)
static int rmb_pcm_resume(struct device *dev)
{
- struct acp_chip_info *chip = dev_get_platdata(dev);
+ struct acp_chip_info *chip = dev_get_drvdata(dev->parent);
struct acp_stream *stream;
struct snd_pcm_substream *substream;
snd_pcm_uframes_t buf_in_frames;
diff --git a/sound/soc/amd/acp/acp-renoir.c b/sound/soc/amd/acp/acp-renoir.c
index ebf0106fc737..04f6d70b6a92 100644
--- a/sound/soc/amd/acp/acp-renoir.c
+++ b/sound/soc/amd/acp/acp-renoir.c
@@ -146,7 +146,7 @@ static void renoir_audio_remove(struct platform_device *pdev)
static int rn_pcm_resume(struct device *dev)
{
- struct acp_chip_info *chip = dev_get_platdata(dev);
+ struct acp_chip_info *chip = dev_get_drvdata(dev->parent);
struct acp_stream *stream;
struct snd_pcm_substream *substream;
snd_pcm_uframes_t buf_in_frames;
diff --git a/sound/soc/amd/acp/acp63.c b/sound/soc/amd/acp/acp63.c
index 0ddde14da7d5..10fb416b959d 100644
--- a/sound/soc/amd/acp/acp63.c
+++ b/sound/soc/amd/acp/acp63.c
@@ -250,7 +250,7 @@ static void acp63_audio_remove(struct platform_device *pdev)
static int acp63_pcm_resume(struct device *dev)
{
- struct acp_chip_info *chip = dev_get_platdata(dev);
+ struct acp_chip_info *chip = dev_get_drvdata(dev->parent);
struct acp_stream *stream;
struct snd_pcm_substream *substream;
snd_pcm_uframes_t buf_in_frames;
diff --git a/sound/soc/amd/acp/acp70.c b/sound/soc/amd/acp/acp70.c
index 7f4a25b46576..b95e3949e70b 100644
--- a/sound/soc/amd/acp/acp70.c
+++ b/sound/soc/amd/acp/acp70.c
@@ -182,7 +182,7 @@ static void acp_acp70_audio_remove(struct platform_device *pdev)
static int acp70_pcm_resume(struct device *dev)
{
- struct acp_chip_info *chip = dev_get_platdata(dev);
+ struct acp_chip_info *chip = dev_get_drvdata(dev->parent);
struct acp_stream *stream;
struct snd_pcm_substream *substream;
snd_pcm_uframes_t buf_in_frames;
diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
index 8e57f31ef7f7..7936b3173632 100644
--- a/sound/soc/amd/ps/pci-ps.c
+++ b/sound/soc/amd/ps/pci-ps.c
@@ -193,6 +193,7 @@ static irqreturn_t acp63_irq_handler(int irq, void *dev_id)
struct amd_sdw_manager *amd_manager;
u32 ext_intr_stat, ext_intr_stat1;
u16 irq_flag = 0;
+ u16 wake_irq_flag = 0;
u16 sdw_dma_irq_flag = 0;
adata = dev_id;
@@ -231,7 +232,7 @@ static irqreturn_t acp63_irq_handler(int irq, void *dev_id)
}
if (adata->acp_rev >= ACP70_PCI_REV)
- irq_flag = check_and_handle_acp70_sdw_wake_irq(adata);
+ wake_irq_flag = check_and_handle_acp70_sdw_wake_irq(adata);
if (ext_intr_stat & BIT(PDM_DMA_STAT)) {
ps_pdm_data = dev_get_drvdata(&adata->pdm_dev->dev);
@@ -245,7 +246,7 @@ static irqreturn_t acp63_irq_handler(int irq, void *dev_id)
if (sdw_dma_irq_flag)
return IRQ_WAKE_THREAD;
- if (irq_flag)
+ if (irq_flag | wake_irq_flag)
return IRQ_HANDLED;
else
return IRQ_NONE;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 40bb7a1d44bc..20f99cbee29b 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -776,10 +776,9 @@ config SND_SOC_CS_AMP_LIB
tristate
config SND_SOC_CS_AMP_LIB_TEST
- tristate "KUnit test for Cirrus Logic cs-amp-lib"
- depends on KUNIT
+ tristate "KUnit test for Cirrus Logic cs-amp-lib" if !KUNIT_ALL_TESTS
+ depends on SND_SOC_CS_AMP_LIB && KUNIT
default KUNIT_ALL_TESTS
- select SND_SOC_CS_AMP_LIB
help
This builds KUnit tests for the Cirrus Logic common
amplifier library.
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index 20e6ab6f0d4a..6165ac16c3a9 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -654,6 +654,10 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv)
reinit_completion(&priv->type_detect);
+ regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL,
+ CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK,
+ CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK);
+
cs42l43_start_hs_bias(priv, true);
regmap_update_bits(cs42l43->regmap, CS42L43_HS2,
CS42L43_HSDET_MODE_MASK, 0x3 << CS42L43_HSDET_MODE_SHIFT);
@@ -665,6 +669,9 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv)
CS42L43_HSDET_MODE_MASK, 0x2 << CS42L43_HSDET_MODE_SHIFT);
cs42l43_stop_hs_bias(priv);
+ regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL,
+ CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK, 0);
+
if (!time_left)
return -ETIMEDOUT;
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 3686d468506b..45e000f61ecc 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -544,7 +544,7 @@ static int imx_card_parse_of(struct imx_card_data *data)
if (!card->dai_link)
return -ENOMEM;
- data->link_data = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL);
+ data->link_data = devm_kcalloc(dev, num_links, sizeof(*link_data), GFP_KERNEL);
if (!data->link_data)
return -ENOMEM;
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index a1ccc300e68c..3ae2a212a2e3 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -1174,9 +1174,9 @@ void graph_util_parse_link_direction(struct device_node *np,
bool is_playback_only = of_property_read_bool(np, "playback-only");
bool is_capture_only = of_property_read_bool(np, "capture-only");
- if (is_playback_only)
+ if (playback_only)
*playback_only = is_playback_only;
- if (is_capture_only)
+ if (capture_only)
*capture_only = is_capture_only;
}
EXPORT_SYMBOL_GPL(graph_util_parse_link_direction);
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 6446cda0f857..0f3b8f44e701 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -576,6 +576,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF2 |
BYT_RT5640_MCLK_EN),
},
+ { /* Acer Aspire SW3-013 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-013"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/sound/soc/intel/catpt/dsp.c b/sound/soc/intel/catpt/dsp.c
index 5993819cc58a..008a20a2acbd 100644
--- a/sound/soc/intel/catpt/dsp.c
+++ b/sound/soc/intel/catpt/dsp.c
@@ -156,7 +156,7 @@ static void catpt_dsp_set_srampge(struct catpt_dev *cdev, struct resource *sram,
{
unsigned long old;
u32 off = sram->start;
- u32 b = __ffs(mask);
+ unsigned long b = __ffs(mask);
old = catpt_readl_pci(cdev, VDRTCTL0) & mask;
dev_dbg(cdev->dev, "SRAMPGE [0x%08lx] 0x%08lx -> 0x%08lx",
diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
index 6603d8de501c..c599eb43eeb1 100644
--- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
@@ -431,7 +431,8 @@ static const struct snd_soc_acpi_link_adr ptl_cs42l43_l3[] = {
.mask = BIT(3),
.num_adr = ARRAY_SIZE(cs42l43_3_adr),
.adr_d = cs42l43_3_adr,
- }
+ },
+ {}
};
static const struct snd_soc_acpi_link_adr ptl_rt722_only[] = {
diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c
index 3a0af4ca7ab6..0f7458a43901 100644
--- a/sound/soc/renesas/rz-ssi.c
+++ b/sound/soc/renesas/rz-ssi.c
@@ -1244,7 +1244,7 @@ static int rz_ssi_runtime_resume(struct device *dev)
static const struct dev_pm_ops rz_ssi_pm_ops = {
RUNTIME_PM_OPS(rz_ssi_runtime_suspend, rz_ssi_runtime_resume, NULL)
- SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static struct platform_driver rz_ssi_driver = {
diff --git a/sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c b/sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c
index 246e5c2e0af5..c7e55f443351 100644
--- a/sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c
+++ b/sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c
@@ -60,6 +60,10 @@ static int asoc_sdw_bridge_cs35l56_asp_init(struct snd_soc_pcm_runtime *rtd)
/* 4 x 16-bit sample slots and FSYNC=48000, BCLK=3.072 MHz */
for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ ret = asoc_sdw_cs35l56_volume_limit(card, codec_dai->component->name_prefix);
+ if (ret)
+ return ret;
+
ret = snd_soc_dai_set_tdm_slot(codec_dai, tx_mask, rx_mask, 4, 16);
if (ret < 0)
return ret;
diff --git a/sound/soc/sdw_utils/soc_sdw_cs42l43.c b/sound/soc/sdw_utils/soc_sdw_cs42l43.c
index 668c9d28a1c1..b415d45d520d 100644
--- a/sound/soc/sdw_utils/soc_sdw_cs42l43.c
+++ b/sound/soc/sdw_utils/soc_sdw_cs42l43.c
@@ -20,6 +20,8 @@
#include <sound/soc-dapm.h>
#include <sound/soc_sdw_utils.h>
+#define CS42L43_SPK_VOLUME_0DB 128 /* 0dB Max */
+
static const struct snd_soc_dapm_route cs42l43_hs_map[] = {
{ "Headphone", NULL, "cs42l43 AMP3_OUT" },
{ "Headphone", NULL, "cs42l43 AMP4_OUT" },
@@ -117,6 +119,14 @@ int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_so
return -ENOMEM;
}
+ ret = snd_soc_limit_volume(card, "cs42l43 Speaker Digital Volume",
+ CS42L43_SPK_VOLUME_0DB);
+ if (ret)
+ dev_err(card->dev, "cs42l43 speaker volume limit failed: %d\n", ret);
+ else
+ dev_info(card->dev, "Setting CS42L43 Speaker volume limit to %d\n",
+ CS42L43_SPK_VOLUME_0DB);
+
ret = snd_soc_dapm_add_routes(&card->dapm, cs42l43_spk_map,
ARRAY_SIZE(cs42l43_spk_map));
if (ret)
diff --git a/sound/soc/sdw_utils/soc_sdw_cs_amp.c b/sound/soc/sdw_utils/soc_sdw_cs_amp.c
index 4b6181cf2971..35b550bcd4de 100644
--- a/sound/soc/sdw_utils/soc_sdw_cs_amp.c
+++ b/sound/soc/sdw_utils/soc_sdw_cs_amp.c
@@ -16,6 +16,25 @@
#define CODEC_NAME_SIZE 8
#define CS_AMP_CHANNELS_PER_AMP 4
+#define CS35L56_SPK_VOLUME_0DB 400 /* 0dB Max */
+
+int asoc_sdw_cs35l56_volume_limit(struct snd_soc_card *card, const char *name_prefix)
+{
+ char *volume_ctl_name;
+ int ret;
+
+ volume_ctl_name = kasprintf(GFP_KERNEL, "%s Speaker Volume", name_prefix);
+ if (!volume_ctl_name)
+ return -ENOMEM;
+
+ ret = snd_soc_limit_volume(card, volume_ctl_name, CS35L56_SPK_VOLUME_0DB);
+ if (ret)
+ dev_err(card->dev, "%s limit set failed: %d\n", volume_ctl_name, ret);
+
+ kfree(volume_ctl_name);
+ return ret;
+}
+EXPORT_SYMBOL_NS(asoc_sdw_cs35l56_volume_limit, "SND_SOC_SDW_UTILS");
int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai)
{
@@ -40,6 +59,11 @@ int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai
snprintf(widget_name, sizeof(widget_name), "%s SPK",
codec_dai->component->name_prefix);
+
+ ret = asoc_sdw_cs35l56_volume_limit(card, codec_dai->component->name_prefix);
+ if (ret)
+ return ret;
+
ret = snd_soc_dapm_add_routes(&card->dapm, &route, 1);
if (ret)
return ret;
diff --git a/sound/soc/sdw_utils/soc_sdw_rt_dmic.c b/sound/soc/sdw_utils/soc_sdw_rt_dmic.c
index 46d917a99c51..97be110a59b6 100644
--- a/sound/soc/sdw_utils/soc_sdw_rt_dmic.c
+++ b/sound/soc/sdw_utils/soc_sdw_rt_dmic.c
@@ -29,6 +29,8 @@ int asoc_sdw_rt_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_da
mic_name = devm_kasprintf(card->dev, GFP_KERNEL, "rt715-sdca");
else
mic_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s", component->name_prefix);
+ if (!mic_name)
+ return -ENOMEM;
card->components = devm_kasprintf(card->dev, GFP_KERNEL,
"%s mic:%s", card->components,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 4308a6cbb2e6..43835197d1fe 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1584,10 +1584,13 @@ int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
/*
* Filter for systems with 'component_chaining' enabled.
* This helps to avoid unnecessary re-configuration of an
- * already active BE on such systems.
+ * already active BE on such systems and ensures the BE DAI
+ * widget is powered ON after hw_params() BE DAI callback.
*/
if (fe->card->component_chaining &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
continue;
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index e8c1abf1ae0a..bf5299ba11c3 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -409,11 +409,11 @@ static int stm32_sai_set_parent_rate(struct stm32_sai_sub_data *sai,
unsigned int rate)
{
struct platform_device *pdev = sai->pdev;
- unsigned int sai_ck_rate, sai_ck_max_rate, sai_curr_rate, sai_new_rate;
+ unsigned int sai_ck_rate, sai_ck_max_rate, sai_ck_min_rate, sai_curr_rate, sai_new_rate;
int div, ret;
/*
- * Set maximum expected kernel clock frequency
+ * Set minimum and maximum expected kernel clock frequency
* - mclk on or spdif:
* f_sai_ck = MCKDIV * mclk-fs * fs
* Here typical 256 ratio is assumed for mclk-fs
@@ -423,13 +423,16 @@ static int stm32_sai_set_parent_rate(struct stm32_sai_sub_data *sai,
* Set constraint MCKDIV * FRL <= 256, to ensure MCKDIV is in available range
* f_sai_ck = sai_ck_max_rate * pow_of_two(FRL) / 256
*/
+ sai_ck_min_rate = rate * 256;
if (!(rate % SAI_RATE_11K))
sai_ck_max_rate = SAI_MAX_SAMPLE_RATE_11K * 256;
else
sai_ck_max_rate = SAI_MAX_SAMPLE_RATE_8K * 256;
- if (!sai->sai_mclk && !STM_SAI_PROTOCOL_IS_SPDIF(sai))
+ if (!sai->sai_mclk && !STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+ sai_ck_min_rate = rate * sai->fs_length;
sai_ck_max_rate /= DIV_ROUND_CLOSEST(256, roundup_pow_of_two(sai->fs_length));
+ }
/*
* Request exclusivity, as the clock is shared by SAI sub-blocks and by
@@ -444,7 +447,10 @@ static int stm32_sai_set_parent_rate(struct stm32_sai_sub_data *sai,
* return immediately.
*/
sai_curr_rate = clk_get_rate(sai->sai_ck);
- if (stm32_sai_rate_accurate(sai_ck_max_rate, sai_curr_rate))
+ dev_dbg(&pdev->dev, "kernel clock rate: min [%u], max [%u], current [%u]",
+ sai_ck_min_rate, sai_ck_max_rate, sai_curr_rate);
+ if (stm32_sai_rate_accurate(sai_ck_max_rate, sai_curr_rate) &&
+ sai_curr_rate >= sai_ck_min_rate)
return 0;
/*
@@ -472,7 +478,7 @@ static int stm32_sai_set_parent_rate(struct stm32_sai_sub_data *sai,
/* Try a lower frequency */
div++;
sai_ck_rate = sai_ck_max_rate / div;
- } while (sai_ck_rate > rate);
+ } while (sai_ck_rate >= sai_ck_min_rate);
/* No accurate rate found */
dev_err(&pdev->dev, "Failed to find an accurate rate");
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index a29f28eb7d0c..f36ec98da460 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -926,6 +926,8 @@ static int endpoint_set_interface(struct snd_usb_audio *chip,
{
int altset = set ? ep->altsetting : 0;
int err;
+ int retries = 0;
+ const int max_retries = 5;
if (ep->iface_ref->altset == altset)
return 0;
@@ -935,8 +937,13 @@ static int endpoint_set_interface(struct snd_usb_audio *chip,
usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n",
ep->iface, altset, ep->ep_num);
+retry:
err = usb_set_interface(chip->dev, ep->iface, altset);
if (err < 0) {
+ if (err == -EPROTO && ++retries <= max_retries) {
+ msleep(5 * (1 << (retries - 1)));
+ goto retry;
+ }
usb_audio_err_ratelimited(
chip, "%d:%d: usb_set_interface failed (%d)\n",
ep->iface, altset, err);
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 9d32b21a5fbb..0ba4641a0eb1 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -260,7 +260,8 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
}
/* Jabra Evolve 65 headset */
- if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) {
+ if (chip->usb_id == USB_ID(0x0b0e, 0x030b) ||
+ chip->usb_id == USB_ID(0x0b0e, 0x030c)) {
/* only 48kHz for playback while keeping 16kHz for capture */
if (fp->nr_rates != 1)
return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index dcdd7e9e1ae9..cfed000f243a 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1885,10 +1885,18 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi,
}
port_info = find_port_info(umidi, number);
- name_format = port_info ? port_info->name :
- (jack_name != default_jack_name ? "%s %s" : "%s %s %d");
- snprintf(substream->name, sizeof(substream->name),
- name_format, umidi->card->shortname, jack_name, number + 1);
+ if (port_info || jack_name == default_jack_name ||
+ strncmp(umidi->card->shortname, jack_name, strlen(umidi->card->shortname)) != 0) {
+ name_format = port_info ? port_info->name :
+ (jack_name != default_jack_name ? "%s %s" : "%s %s %d");
+ snprintf(substream->name, sizeof(substream->name),
+ name_format, umidi->card->shortname, jack_name, number + 1);
+ } else {
+ /* The manufacturer included the iProduct name in the jack
+ * name, do not use both
+ */
+ strscpy(substream->name, jack_name);
+ }
*rsubstream = substream;
}
diff --git a/tools/arch/x86/include/asm/amd-ibs.h b/tools/arch/x86/include/asm/amd/ibs.h
index cb1740bc3da2..300b6e0765b2 100644
--- a/tools/arch/x86/include/asm/amd-ibs.h
+++ b/tools/arch/x86/include/asm/amd/ibs.h
@@ -4,7 +4,7 @@
* 55898 Rev 0.35 - Feb 5, 2021
*/
-#include "msr-index.h"
+#include "../msr-index.h"
/* IBS_OP_DATA2 DataSrc */
#define IBS_DATA_SRC_LOC_CACHE 2
diff --git a/tools/arch/x86/include/asm/inat.h b/tools/arch/x86/include/asm/inat.h
index 253690eb3c26..183aa662b165 100644
--- a/tools/arch/x86/include/asm/inat.h
+++ b/tools/arch/x86/include/asm/inat.h
@@ -82,6 +82,7 @@
#define INAT_NO_REX2 (1 << (INAT_FLAG_OFFS + 8))
#define INAT_REX2_VARIANT (1 << (INAT_FLAG_OFFS + 9))
#define INAT_EVEX_SCALABLE (1 << (INAT_FLAG_OFFS + 10))
+#define INAT_INV64 (1 << (INAT_FLAG_OFFS + 11))
/* Attribute making macros for attribute tables */
#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
@@ -242,4 +243,9 @@ static inline int inat_evex_scalable(insn_attr_t attr)
{
return attr & INAT_EVEX_SCALABLE;
}
+
+static inline int inat_is_invalid64(insn_attr_t attr)
+{
+ return attr & INAT_INV64;
+}
#endif
diff --git a/tools/arch/x86/kcpuid/cpuid.csv b/tools/arch/x86/kcpuid/cpuid.csv
index 8d25b0b49f3b..8d925ce9750f 100644
--- a/tools/arch/x86/kcpuid/cpuid.csv
+++ b/tools/arch/x86/kcpuid/cpuid.csv
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-# Generator: x86-cpuid-db v2.3
+# Generator: x86-cpuid-db v2.4
#
# Auto-generated file.
@@ -689,7 +689,7 @@
0x80000001, 0, edx, 19, mp , Out-of-spec AMD Multiprocessing bit
0x80000001, 0, edx, 20, nx , No-execute page protection
0x80000001, 0, edx, 22, mmxext , AMD MMX extensions
-0x80000001, 0, edx, 23, e_mmx , MMX instructions (Transmeta)
+0x80000001, 0, edx, 23, e_mmx , MMX instructions
0x80000001, 0, edx, 24, e_fxsr , FXSAVE and FXRSTOR instructions
0x80000001, 0, edx, 25, fxsr_opt , FXSAVE and FXRSTOR optimizations
0x80000001, 0, edx, 26, pdpe1gb , 1-GB large page support
diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c
index e91d4c4e1c16..bce69c6bfa69 100644
--- a/tools/arch/x86/lib/insn.c
+++ b/tools/arch/x86/lib/insn.c
@@ -324,6 +324,11 @@ int insn_get_opcode(struct insn *insn)
}
insn->attr = inat_get_opcode_attribute(op);
+ if (insn->x86_64 && inat_is_invalid64(insn->attr)) {
+ /* This instruction is invalid, like UD2. Stop decoding. */
+ insn->attr &= INAT_INV64;
+ }
+
while (inat_is_escape(insn->attr)) {
/* Get escaped opcode */
op = get_next(insn_byte_t, insn);
@@ -337,6 +342,7 @@ int insn_get_opcode(struct insn *insn)
insn->attr = 0;
return -EINVAL;
}
+
end:
opcode->got = 1;
return 0;
@@ -658,7 +664,6 @@ int insn_get_immediate(struct insn *insn)
}
if (!inat_has_immediate(insn->attr))
- /* no immediates */
goto done;
switch (inat_immediate_size(insn->attr)) {
diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
index f5dd84eb55dc..262f7ca1fb95 100644
--- a/tools/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/arch/x86/lib/x86-opcode-map.txt
@@ -35,7 +35,7 @@
# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
#
-# REX2 Prefix
+# REX2 Prefix Superscripts
# - (!REX2): REX2 is not allowed
# - (REX2): REX2 variant e.g. JMPABS
@@ -147,7 +147,7 @@ AVXcode:
# 0x60 - 0x6f
60: PUSHA/PUSHAD (i64)
61: POPA/POPAD (i64)
-62: BOUND Gv,Ma (i64) | EVEX (Prefix)
+62: BOUND Gv,Ma (i64) | EVEX (Prefix),(o64)
63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
64: SEG=FS (Prefix)
65: SEG=GS (Prefix)
@@ -253,8 +253,8 @@ c0: Grp2 Eb,Ib (1A)
c1: Grp2 Ev,Ib (1A)
c2: RETN Iw (f64)
c3: RETN
-c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
-c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix),(o64)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix),(o64)
c6: Grp11A Eb,Ib (1A)
c7: Grp11B Ev,Iz (1A)
c8: ENTER Iw,Ib
@@ -286,10 +286,10 @@ df: ESC
# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64) (!REX2)
-e1: LOOPE/LOOPZ Jb (f64) (!REX2)
-e2: LOOP Jb (f64) (!REX2)
-e3: JrCXZ Jb (f64) (!REX2)
+e0: LOOPNE/LOOPNZ Jb (f64),(!REX2)
+e1: LOOPE/LOOPZ Jb (f64),(!REX2)
+e2: LOOP Jb (f64),(!REX2)
+e3: JrCXZ Jb (f64),(!REX2)
e4: IN AL,Ib (!REX2)
e5: IN eAX,Ib (!REX2)
e6: OUT Ib,AL (!REX2)
@@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2)
# in "near" jumps and calls is 16-bit. For CALL,
# push of return address is 16-bit wide, RSP is decremented by 2
# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64) (!REX2)
-e9: JMP-near Jz (f64) (!REX2)
-ea: JMP-far Ap (i64) (!REX2)
-eb: JMP-short Jb (f64) (!REX2)
+e8: CALL Jz (f64),(!REX2)
+e9: JMP-near Jz (f64),(!REX2)
+ea: JMP-far Ap (i64),(!REX2)
+eb: JMP-short Jb (f64),(!REX2)
ec: IN AL,DX (!REX2)
ed: IN eAX,DX (!REX2)
ee: OUT DX,AL (!REX2)
@@ -478,22 +478,22 @@ AVXcode: 1
7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
# 0x0f 0x80-0x8f
# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64) (!REX2)
-81: JNO Jz (f64) (!REX2)
-82: JB/JC/JNAE Jz (f64) (!REX2)
-83: JAE/JNB/JNC Jz (f64) (!REX2)
-84: JE/JZ Jz (f64) (!REX2)
-85: JNE/JNZ Jz (f64) (!REX2)
-86: JBE/JNA Jz (f64) (!REX2)
-87: JA/JNBE Jz (f64) (!REX2)
-88: JS Jz (f64) (!REX2)
-89: JNS Jz (f64) (!REX2)
-8a: JP/JPE Jz (f64) (!REX2)
-8b: JNP/JPO Jz (f64) (!REX2)
-8c: JL/JNGE Jz (f64) (!REX2)
-8d: JNL/JGE Jz (f64) (!REX2)
-8e: JLE/JNG Jz (f64) (!REX2)
-8f: JNLE/JG Jz (f64) (!REX2)
+80: JO Jz (f64),(!REX2)
+81: JNO Jz (f64),(!REX2)
+82: JB/JC/JNAE Jz (f64),(!REX2)
+83: JAE/JNB/JNC Jz (f64),(!REX2)
+84: JE/JZ Jz (f64),(!REX2)
+85: JNE/JNZ Jz (f64),(!REX2)
+86: JBE/JNA Jz (f64),(!REX2)
+87: JA/JNBE Jz (f64),(!REX2)
+88: JS Jz (f64),(!REX2)
+89: JNS Jz (f64),(!REX2)
+8a: JP/JPE Jz (f64),(!REX2)
+8b: JNP/JPO Jz (f64),(!REX2)
+8c: JL/JNGE Jz (f64),(!REX2)
+8d: JNL/JGE Jz (f64),(!REX2)
+8e: JLE/JNG Jz (f64),(!REX2)
+8f: JNLE/JG Jz (f64),(!REX2)
# 0x0f 0x90-0x9f
90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
diff --git a/tools/arch/x86/tools/gen-insn-attr-x86.awk b/tools/arch/x86/tools/gen-insn-attr-x86.awk
index 5770c8097f32..2c19d7fc8a85 100644
--- a/tools/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/tools/arch/x86/tools/gen-insn-attr-x86.awk
@@ -64,6 +64,8 @@ BEGIN {
modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
+ invalid64_expr = "\\(i64\\)"
+ only64_expr = "\\(o64\\)"
rex_expr = "^((REX(\\.[XRWB]+)+)|(REX$))"
rex2_expr = "\\(REX2\\)"
no_rex2_expr = "\\(!REX2\\)"
@@ -319,6 +321,11 @@ function convert_operands(count,opnd, i,j,imm,mod)
if (match(ext, force64_expr))
flags = add_flags(flags, "INAT_FORCE64")
+ # check invalid in 64-bit (and no only64)
+ if (match(ext, invalid64_expr) &&
+ !match($0, only64_expr))
+ flags = add_flags(flags, "INAT_INV64")
+
# check REX2 not allowed
if (match(ext, no_rex2_expr))
flags = add_flags(flags, "INAT_NO_REX2")
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 04ba035d67e9..b9ce3aab15fe 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -24,6 +24,7 @@
#include <sys/poll.h>
#include <sys/utsname.h>
+#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
@@ -677,6 +678,88 @@ static void kvp_process_ipconfig_file(char *cmd,
pclose(file);
}
+static bool kvp_verify_ip_address(const void *address_string)
+{
+ char verify_buf[sizeof(struct in6_addr)];
+
+ if (inet_pton(AF_INET, address_string, verify_buf) == 1)
+ return true;
+ if (inet_pton(AF_INET6, address_string, verify_buf) == 1)
+ return true;
+ return false;
+}
+
+static void kvp_extract_routes(const char *line, void **output, size_t *remaining)
+{
+ static const char needle[] = "via ";
+ const char *match, *haystack = line;
+
+ while ((match = strstr(haystack, needle))) {
+ const char *address, *next_char;
+
+ /* Address starts after needle. */
+ address = match + strlen(needle);
+
+ /* The char following address is a space or end of line. */
+ next_char = strpbrk(address, " \t\\");
+ if (!next_char)
+ next_char = address + strlen(address) + 1;
+
+ /* Enough room for address and semicolon. */
+ if (*remaining >= (next_char - address) + 1) {
+ memcpy(*output, address, next_char - address);
+ /* Terminate string for verification. */
+ memcpy(*output + (next_char - address), "", 1);
+ if (kvp_verify_ip_address(*output)) {
+ /* Advance output buffer. */
+ *output += next_char - address;
+ *remaining -= next_char - address;
+
+ /* Each address needs a trailing semicolon. */
+ memcpy(*output, ";", 1);
+ *output += 1;
+ *remaining -= 1;
+ }
+ }
+ haystack = next_char;
+ }
+}
+
+static void kvp_get_gateway(void *buffer, size_t buffer_len)
+{
+ static const char needle[] = "default ";
+ FILE *f;
+ void *output = buffer;
+ char *line = NULL;
+ size_t alloc_size = 0, remaining = buffer_len - 1;
+ ssize_t num_chars;
+
+ /* Show route information in a single line, for each address family */
+ f = popen("ip --oneline -4 route show;ip --oneline -6 route show", "r");
+ if (!f) {
+ /* Convert buffer into C-String. */
+ memcpy(output, "", 1);
+ return;
+ }
+ while ((num_chars = getline(&line, &alloc_size, f)) > 0) {
+ /* Skip short lines. */
+ if (num_chars <= strlen(needle))
+ continue;
+ /* Skip lines without default route. */
+ if (memcmp(line, needle, strlen(needle)))
+ continue;
+ /* Remove trailing newline to simplify further parsing. */
+ if (line[num_chars - 1] == '\n')
+ line[num_chars - 1] = '\0';
+ /* Search routes after match. */
+ kvp_extract_routes(line + strlen(needle), &output, &remaining);
+ }
+ /* Convert buffer into C-String. */
+ memcpy(output, "", 1);
+ free(line);
+ pclose(f);
+}
+
static void kvp_get_ipconfig_info(char *if_name,
struct hv_kvp_ipaddr_value *buffer)
{
@@ -685,30 +768,7 @@ static void kvp_get_ipconfig_info(char *if_name,
char *p;
FILE *file;
- /*
- * Get the address of default gateway (ipv4).
- */
- sprintf(cmd, "%s %s", "ip route show dev", if_name);
- strcat(cmd, " | awk '/default/ {print $3 }'");
-
- /*
- * Execute the command to gather gateway info.
- */
- kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way,
- (MAX_GATEWAY_SIZE * 2), INET_ADDRSTRLEN, 0);
-
- /*
- * Get the address of default gateway (ipv6).
- */
- sprintf(cmd, "%s %s", "ip -f inet6 route show dev", if_name);
- strcat(cmd, " | awk '/default/ {print $3 }'");
-
- /*
- * Execute the command to gather gateway info (ipv6).
- */
- kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way,
- (MAX_GATEWAY_SIZE * 2), INET6_ADDRSTRLEN, 1);
-
+ kvp_get_gateway(buffer->gate_way, sizeof(buffer->gate_way));
/*
* Gather the DNS state.
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 28705ae67784..fd404729b115 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4968,6 +4968,9 @@ union bpf_attr {
* the netns switch takes place from ingress to ingress without
* going through the CPU's backlog queue.
*
+ * *skb*\ **->mark** and *skb*\ **->tstamp** are not cleared during
+ * the netns switch.
+ *
* The *flags* argument is reserved and must be 0. The helper is
* currently only supported for tc BPF program types at the
* ingress hook and for veth and netkit target device types. The
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index ffcfd777c451..7fbb50b74c00 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -42,6 +42,7 @@ libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
TEST_ARGS := $(if $(V),-v)
INCLUDES = \
+-I$(OUTPUT)arch/$(SRCARCH)/include/generated/uapi \
-I$(srctree)/tools/lib/perf/include \
-I$(srctree)/tools/lib/ \
-I$(srctree)/tools/include \
@@ -99,7 +100,16 @@ $(LIBAPI)-clean:
$(call QUIET_CLEAN, libapi)
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
-$(LIBPERF_IN): FORCE
+uapi-asm := $(OUTPUT)arch/$(SRCARCH)/include/generated/uapi/asm
+ifeq ($(SRCARCH),arm64)
+ syscall-y := $(uapi-asm)/unistd_64.h
+endif
+uapi-asm-generic:
+ $(if $(syscall-y),\
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-headers obj=$(uapi-asm) \
+ generic=include/uapi/asm-generic $(syscall-y),)
+
+$(LIBPERF_IN): uapi-asm-generic FORCE
$(Q)$(MAKE) $(build)=libperf
$(LIBPERF_A): $(LIBPERF_IN)
@@ -120,7 +130,7 @@ all: fixdep
clean: $(LIBAPI)-clean
$(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \
*.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \
- $(TESTS_STATIC) $(TESTS_SHARED)
+ $(TESTS_STATIC) $(TESTS_SHARED) $(syscall-y)
TESTS_IN = tests-in.o
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index ce32cb35007d..c4da34048ef8 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -364,7 +364,7 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
"Invalid attribute (binary %s)", policy->name);
return -1;
case YNL_PT_NUL_STR:
- if ((!policy->len || len <= policy->len) && !data[len - 1])
+ if (len && (!policy->len || len <= policy->len) && !data[len - 1])
break;
yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
"Invalid attribute (string %s)", policy->name);
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 3ce7b54003c2..331b9a744410 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -842,12 +842,14 @@ int arch_decode_hint_reg(u8 sp_reg, int *base)
bool arch_is_retpoline(struct symbol *sym)
{
- return !strncmp(sym->name, "__x86_indirect_", 15);
+ return !strncmp(sym->name, "__x86_indirect_", 15) ||
+ !strncmp(sym->name, "__pi___x86_indirect_", 20);
}
bool arch_is_rethunk(struct symbol *sym)
{
- return !strcmp(sym->name, "__x86_return_thunk");
+ return !strcmp(sym->name, "__x86_return_thunk") ||
+ !strcmp(sym->name, "__pi___x86_return_thunk");
}
bool arch_is_embedded_insn(struct symbol *sym)
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 3a411064fa34..b21b12ec88d9 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -227,6 +227,7 @@ static bool is_rust_noreturn(const struct symbol *func)
str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
+ str_ends_with(func->name, "_7___rustc17rust_begin_unwind") ||
strstr(func->name, "_4core9panicking13assert_failed") ||
strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
(strstr(func->name, "_4core5slice5index24slice_") &&
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index eea95c6c0c71..b7769a22fe1a 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -29,6 +29,7 @@ include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,SRCARCH)
CFLAGS += -I$(OUTPUT)arch/$(SRCARCH)/include/generated
+CFLAGS += -I$(OUTPUT)libperf/arch/$(SRCARCH)/include/generated/uapi
# Additional ARCH settings for ppc
ifeq ($(SRCARCH),powerpc)
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
index 7bb885b0c32d..422e186cf3cf 100644
--- a/tools/testing/kunit/configs/all_tests.config
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -20,6 +20,7 @@ CONFIG_VFAT_FS=y
CONFIG_PCI=y
CONFIG_USB4=y
+CONFIG_I2C=y
CONFIG_NET=y
CONFIG_MCTP=y
@@ -53,3 +54,4 @@ CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_TOPOLOGY_BUILD=y
+CONFIG_SND_SOC_CS35L56_I2C=y
diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
index 67503089e6a0..01e836fba488 100644
--- a/tools/testing/memblock/tests/basic_api.c
+++ b/tools/testing/memblock/tests/basic_api.c
@@ -2434,6 +2434,107 @@ static int memblock_overlaps_region_checks(void)
return 0;
}
+#ifdef CONFIG_NUMA
+static int memblock_set_node_check(void)
+{
+ unsigned long i, max_reserved;
+ struct memblock_region *rgn;
+ void *orig_region;
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ dummy_physical_memory_init();
+ memblock_add(dummy_physical_memory_base(), MEM_SIZE);
+ orig_region = memblock.reserved.regions;
+
+ /* Equally Split range to node 0 and 1*/
+ memblock_set_node(memblock_start_of_DRAM(),
+ memblock_phys_mem_size() / 2, &memblock.memory, 0);
+ memblock_set_node(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2,
+ memblock_phys_mem_size() / 2, &memblock.memory, 1);
+
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ rgn = &memblock.memory.regions[0];
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+ ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2);
+ ASSERT_EQ(memblock_get_region_node(rgn), 0);
+ rgn = &memblock.memory.regions[1];
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM() + memblock_phys_mem_size() / 2);
+ ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2);
+ ASSERT_EQ(memblock_get_region_node(rgn), 1);
+
+ /* Reserve 126 regions with the last one across node boundary */
+ for (i = 0; i < 125; i++)
+ memblock_reserve(memblock_start_of_DRAM() + SZ_16 * i, SZ_8);
+
+ memblock_reserve(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2 - SZ_8,
+ SZ_16);
+
+ /*
+ * Commit 61167ad5fecd ("mm: pass nid to reserve_bootmem_region()")
+ * do following process to set nid to each memblock.reserved region.
+ * But it may miss some region if memblock_set_node() double the
+ * array.
+ *
+ * By checking 'max', we make sure all region nid is set properly.
+ */
+repeat:
+ max_reserved = memblock.reserved.max;
+ for_each_mem_region(rgn) {
+ int nid = memblock_get_region_node(rgn);
+
+ memblock_set_node(rgn->base, rgn->size, &memblock.reserved, nid);
+ }
+ if (max_reserved != memblock.reserved.max)
+ goto repeat;
+
+ /* Confirm each region has valid node set */
+ for_each_reserved_mem_region(rgn) {
+ ASSERT_TRUE(numa_valid_node(memblock_get_region_node(rgn)));
+ if (rgn == (memblock.reserved.regions + memblock.reserved.cnt - 1))
+ ASSERT_EQ(1, memblock_get_region_node(rgn));
+ else
+ ASSERT_EQ(0, memblock_get_region_node(rgn));
+ }
+
+ dummy_physical_memory_cleanup();
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_set_node_checks(void)
+{
+ prefix_reset();
+ prefix_push("memblock_set_node");
+ test_print("Running memblock_set_node tests...\n");
+
+ memblock_set_node_check();
+
+ prefix_pop();
+
+ return 0;
+}
+#else
+static int memblock_set_node_checks(void)
+{
+ return 0;
+}
+#endif
+
int memblock_basic_checks(void)
{
memblock_initialization_check();
@@ -2444,6 +2545,7 @@ int memblock_basic_checks(void)
memblock_bottom_up_checks();
memblock_trim_memory_checks();
memblock_overlaps_region_checks();
+ memblock_set_node_checks();
return 0;
}
diff --git a/tools/testing/selftests/drivers/net/dsa/tc_taprio.sh b/tools/testing/selftests/drivers/net/dsa/tc_taprio.sh
new file mode 120000
index 000000000000..d16a65e7595d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/tc_taprio.sh
@@ -0,0 +1 @@
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/ocelot/psfp.sh b/tools/testing/selftests/drivers/net/ocelot/psfp.sh
index bed748dde4b0..8972f42dfe03 100755
--- a/tools/testing/selftests/drivers/net/ocelot/psfp.sh
+++ b/tools/testing/selftests/drivers/net/ocelot/psfp.sh
@@ -266,18 +266,14 @@ run_test()
"${base_time}" \
"${CYCLE_TIME_NS}" \
"${SHIFT_TIME_NS}" \
+ "${GATE_DURATION_NS}" \
"${NUM_PKTS}" \
"${STREAM_VID}" \
"${STREAM_PRIO}" \
"" \
"${isochron_dat}"
- # Count all received packets by looking at the non-zero RX timestamps
- received=$(isochron report \
- --input-file "${isochron_dat}" \
- --printf-format "%u\n" --printf-args "R" | \
- grep -w -v '0' | wc -l)
-
+ received=$(isochron_report_num_received "${isochron_dat}")
if [ "${received}" = "${expected}" ]; then
RET=0
else
diff --git a/tools/testing/selftests/drivers/net/ping.py b/tools/testing/selftests/drivers/net/ping.py
index 4b6822866066..af8df2313a3b 100755
--- a/tools/testing/selftests/drivers/net/ping.py
+++ b/tools/testing/selftests/drivers/net/ping.py
@@ -9,11 +9,11 @@ from lib.py import EthtoolFamily, NetDrvEpEnv
from lib.py import bkg, cmd, wait_port_listen, rand_port
from lib.py import defer, ethtool, ip
-remote_ifname=""
no_sleep=False
def _test_v4(cfg) -> None:
- cfg.require_ipver("4")
+ if not cfg.addr_v["4"]:
+ return
cmd("ping -c 1 -W0.5 " + cfg.remote_addr_v["4"])
cmd("ping -c 1 -W0.5 " + cfg.addr_v["4"], host=cfg.remote)
@@ -21,7 +21,8 @@ def _test_v4(cfg) -> None:
cmd("ping -s 65000 -c 1 -W0.5 " + cfg.addr_v["4"], host=cfg.remote)
def _test_v6(cfg) -> None:
- cfg.require_ipver("6")
+ if not cfg.addr_v["6"]:
+ return
cmd("ping -c 1 -W5 " + cfg.remote_addr_v["6"])
cmd("ping -c 1 -W5 " + cfg.addr_v["6"], host=cfg.remote)
@@ -57,7 +58,7 @@ def _set_offload_checksum(cfg, netnl, on) -> None:
def _set_xdp_generic_sb_on(cfg) -> None:
prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
- cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote)
+ cmd(f"ip link set dev {cfg.remote_ifname} mtu 1500", shell=True, host=cfg.remote)
cmd(f"ip link set dev {cfg.ifname} mtu 1500 xdpgeneric obj {prog} sec xdp", shell=True)
defer(cmd, f"ip link set dev {cfg.ifname} xdpgeneric off")
@@ -66,8 +67,8 @@ def _set_xdp_generic_sb_on(cfg) -> None:
def _set_xdp_generic_mb_on(cfg) -> None:
prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
- cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, host=cfg.remote)
- defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote)
+ cmd(f"ip link set dev {cfg.remote_ifname} mtu 9000", shell=True, host=cfg.remote)
+ defer(ip, f"link set dev {cfg.remote_ifname} mtu 1500", host=cfg.remote)
ip("link set dev %s mtu 9000 xdpgeneric obj %s sec xdp.frags" % (cfg.ifname, prog))
defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdpgeneric off")
@@ -76,7 +77,7 @@ def _set_xdp_generic_mb_on(cfg) -> None:
def _set_xdp_native_sb_on(cfg) -> None:
prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
- cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote)
+ cmd(f"ip link set dev {cfg.remote_ifname} mtu 1500", shell=True, host=cfg.remote)
cmd(f"ip -j link set dev {cfg.ifname} mtu 1500 xdp obj {prog} sec xdp", shell=True)
defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off")
xdp_info = ip("-d link show %s" % (cfg.ifname), json=True)[0]
@@ -93,8 +94,8 @@ def _set_xdp_native_sb_on(cfg) -> None:
def _set_xdp_native_mb_on(cfg) -> None:
prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
- cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, host=cfg.remote)
- defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote)
+ cmd(f"ip link set dev {cfg.remote_ifname} mtu 9000", shell=True, host=cfg.remote)
+ defer(ip, f"link set dev {cfg.remote_ifname} mtu 1500", host=cfg.remote)
try:
cmd(f"ip link set dev {cfg.ifname} mtu 9000 xdp obj {prog} sec xdp.frags", shell=True)
defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off")
@@ -112,18 +113,15 @@ def _set_xdp_offload_on(cfg) -> None:
except Exception as e:
raise KsftSkipEx('device does not support offloaded XDP')
defer(ip, f"link set dev {cfg.ifname} xdpoffload off")
- cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote)
+ cmd(f"ip link set dev {cfg.remote_ifname} mtu 1500", shell=True, host=cfg.remote)
if no_sleep != True:
time.sleep(10)
def get_interface_info(cfg) -> None:
- global remote_ifname
global no_sleep
- remote_info = cmd(f"ip -4 -o addr show to {cfg.remote_addr_v['4']} | awk '{{print $2}}'", shell=True, host=cfg.remote).stdout
- remote_ifname = remote_info.rstrip('\n')
- if remote_ifname == "":
+ if cfg.remote_ifname == "":
raise KsftFailEx('Can not get remote interface')
local_info = ip("-d link show %s" % (cfg.ifname), json=True)[0]
if 'parentbus' in local_info and local_info['parentbus'] == "netdevsim":
@@ -136,15 +134,25 @@ def set_interface_init(cfg) -> None:
cmd(f"ip link set dev {cfg.ifname} xdp off ", shell=True)
cmd(f"ip link set dev {cfg.ifname} xdpgeneric off ", shell=True)
cmd(f"ip link set dev {cfg.ifname} xdpoffload off", shell=True)
- cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote)
+ cmd(f"ip link set dev {cfg.remote_ifname} mtu 1500", shell=True, host=cfg.remote)
+
+def test_default_v4(cfg, netnl) -> None:
+ cfg.require_ipver("4")
-def test_default(cfg, netnl) -> None:
_set_offload_checksum(cfg, netnl, "off")
_test_v4(cfg)
- _test_v6(cfg)
_test_tcp(cfg)
_set_offload_checksum(cfg, netnl, "on")
_test_v4(cfg)
+ _test_tcp(cfg)
+
+def test_default_v6(cfg, netnl) -> None:
+ cfg.require_ipver("6")
+
+ _set_offload_checksum(cfg, netnl, "off")
+ _test_v6(cfg)
+ _test_tcp(cfg)
+ _set_offload_checksum(cfg, netnl, "on")
_test_v6(cfg)
_test_tcp(cfg)
@@ -202,7 +210,8 @@ def main() -> None:
with NetDrvEpEnv(__file__) as cfg:
get_interface_info(cfg)
set_interface_init(cfg)
- ksft_run([test_default,
+ ksft_run([test_default_v4,
+ test_default_v6,
test_xdp_generic_sb,
test_xdp_generic_mb,
test_xdp_native_sb,
diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
index 4a2d5c454fd1..59a71f22fb11 100644
--- a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
+++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
@@ -48,8 +48,16 @@ static uint64_t get_mnt_id(struct __test_metadata *const _metadata,
static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX";
+static const int mark_cmds[] = {
+ FAN_MARK_ADD,
+ FAN_MARK_REMOVE,
+ FAN_MARK_FLUSH
+};
+
+#define NUM_FAN_FDS ARRAY_SIZE(mark_cmds)
+
FIXTURE(fanotify) {
- int fan_fd;
+ int fan_fd[NUM_FAN_FDS];
char buf[256];
unsigned int rem;
void *next;
@@ -61,7 +69,7 @@ FIXTURE(fanotify) {
FIXTURE_SETUP(fanotify)
{
- int ret;
+ int i, ret;
ASSERT_EQ(unshare(CLONE_NEWNS), 0);
@@ -89,20 +97,34 @@ FIXTURE_SETUP(fanotify)
self->root_id = get_mnt_id(_metadata, "/");
ASSERT_NE(self->root_id, 0);
- self->fan_fd = fanotify_init(FAN_REPORT_MNT, 0);
- ASSERT_GE(self->fan_fd, 0);
-
- ret = fanotify_mark(self->fan_fd, FAN_MARK_ADD | FAN_MARK_MNTNS,
- FAN_MNT_ATTACH | FAN_MNT_DETACH, self->ns_fd, NULL);
- ASSERT_EQ(ret, 0);
+ for (i = 0; i < NUM_FAN_FDS; i++) {
+ self->fan_fd[i] = fanotify_init(FAN_REPORT_MNT | FAN_NONBLOCK,
+ 0);
+ ASSERT_GE(self->fan_fd[i], 0);
+ ret = fanotify_mark(self->fan_fd[i], FAN_MARK_ADD |
+ FAN_MARK_MNTNS,
+ FAN_MNT_ATTACH | FAN_MNT_DETACH,
+ self->ns_fd, NULL);
+ ASSERT_EQ(ret, 0);
+ // On fd[0] we do an extra ADD that changes nothing.
+ // On fd[1]/fd[2] we REMOVE/FLUSH which removes the mark.
+ ret = fanotify_mark(self->fan_fd[i], mark_cmds[i] |
+ FAN_MARK_MNTNS,
+ FAN_MNT_ATTACH | FAN_MNT_DETACH,
+ self->ns_fd, NULL);
+ ASSERT_EQ(ret, 0);
+ }
self->rem = 0;
}
FIXTURE_TEARDOWN(fanotify)
{
+ int i;
+
ASSERT_EQ(self->rem, 0);
- close(self->fan_fd);
+ for (i = 0; i < NUM_FAN_FDS; i++)
+ close(self->fan_fd[i]);
ASSERT_EQ(fchdir(self->orig_root), 0);
@@ -123,8 +145,21 @@ static uint64_t expect_notify(struct __test_metadata *const _metadata,
unsigned int thislen;
if (!self->rem) {
- ssize_t len = read(self->fan_fd, self->buf, sizeof(self->buf));
- ASSERT_GT(len, 0);
+ ssize_t len;
+ int i;
+
+ for (i = NUM_FAN_FDS - 1; i >= 0; i--) {
+ len = read(self->fan_fd[i], self->buf,
+ sizeof(self->buf));
+ if (i > 0) {
+ // Groups 1,2 should get EAGAIN
+ ASSERT_EQ(len, -1);
+ ASSERT_EQ(errno, EAGAIN);
+ } else {
+ // Group 0 should get events
+ ASSERT_GT(len, 0);
+ }
+ }
self->rem = len;
self->next = (void *) self->buf;
diff --git a/tools/testing/selftests/kexec/Makefile b/tools/testing/selftests/kexec/Makefile
index 67fe7a46cb62..e3000ccb9a5d 100644
--- a/tools/testing/selftests/kexec/Makefile
+++ b/tools/testing/selftests/kexec/Makefile
@@ -8,6 +8,13 @@ ifeq ($(ARCH_PROCESSED),$(filter $(ARCH_PROCESSED),x86 ppc64le))
TEST_PROGS := test_kexec_load.sh test_kexec_file_load.sh
TEST_FILES := kexec_common_lib.sh
+include ../../../scripts/Makefile.arch
+
+ifeq ($(IS_64_BIT)$(ARCH_PROCESSED),1x86)
+TEST_PROGS += test_kexec_jump.sh
+test_kexec_jump.sh: $(OUTPUT)/test_kexec_jump
+endif
+
include ../lib.mk
endif
diff --git a/tools/testing/selftests/kexec/test_kexec_jump.c b/tools/testing/selftests/kexec/test_kexec_jump.c
new file mode 100644
index 000000000000..fbce287866f5
--- /dev/null
+++ b/tools/testing/selftests/kexec/test_kexec_jump.c
@@ -0,0 +1,72 @@
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/kexec.h>
+#include <linux/reboot.h>
+#include <sys/reboot.h>
+#include <sys/syscall.h>
+
+asm(
+ " .code64\n"
+ " .data\n"
+ "purgatory_start:\n"
+
+ // Trigger kexec debug exception handling
+ " int3\n"
+
+ // Set load address for next time
+ " leaq purgatory_start_b(%rip), %r11\n"
+ " movq %r11, 8(%rsp)\n"
+
+ // Back to Linux
+ " ret\n"
+
+ // Same again
+ "purgatory_start_b:\n"
+
+ // Trigger kexec debug exception handling
+ " int3\n"
+
+ // Set load address for next time
+ " leaq purgatory_start(%rip), %r11\n"
+ " movq %r11, 8(%rsp)\n"
+
+ // Back to Linux
+ " ret\n"
+
+ "purgatory_end:\n"
+ ".previous"
+);
+extern char purgatory_start[], purgatory_end[];
+
+int main (void)
+{
+ struct kexec_segment segment = {};
+ int ret;
+
+ segment.buf = purgatory_start;
+ segment.bufsz = purgatory_end - purgatory_start;
+ segment.mem = (void *)0x400000;
+ segment.memsz = 0x1000;
+ ret = syscall(__NR_kexec_load, 0x400000, 1, &segment, KEXEC_PRESERVE_CONTEXT);
+ if (ret) {
+ perror("kexec_load");
+ exit(1);
+ }
+
+ ret = syscall(__NR_reboot, LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_KEXEC);
+ if (ret) {
+ perror("kexec reboot");
+ exit(1);
+ }
+
+ ret = syscall(__NR_reboot, LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_KEXEC);
+ if (ret) {
+ perror("kexec reboot");
+ exit(1);
+ }
+ printf("Success\n");
+ return 0;
+}
+
diff --git a/tools/testing/selftests/kexec/test_kexec_jump.sh b/tools/testing/selftests/kexec/test_kexec_jump.sh
new file mode 100755
index 000000000000..6ae977054ba2
--- /dev/null
+++ b/tools/testing/selftests/kexec/test_kexec_jump.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Prevent loading a kernel image via the kexec_load syscall when
+# signatures are required. (Dependent on CONFIG_IMA_ARCH_POLICY.)
+
+TEST="$0"
+. ./kexec_common_lib.sh
+
+# kexec requires root privileges
+require_root_privileges
+
+# get the kernel config
+get_kconfig
+
+kconfig_enabled "CONFIG_KEXEC_JUMP=y" "kexec_jump is enabled"
+if [ $? -eq 0 ]; then
+ log_skip "kexec_jump is not enabled"
+fi
+
+kconfig_enabled "CONFIG_IMA_APPRAISE=y" "IMA enabled"
+ima_appraise=$?
+
+kconfig_enabled "CONFIG_IMA_ARCH_POLICY=y" \
+ "IMA architecture specific policy enabled"
+arch_policy=$?
+
+get_secureboot_mode
+secureboot=$?
+
+if [ $secureboot -eq 1 ] && [ $arch_policy -eq 1 ]; then
+ log_skip "Secure boot and CONFIG_IMA_ARCH_POLICY are enabled"
+fi
+
+./test_kexec_jump
+if [ $? -eq 0 ]; then
+ log_pass "kexec_jump succeeded"
+else
+ # The more likely failure mode if anything went wrong is that the
+ # kernel just crashes. But if we get back here, sure, whine anyway.
+ log_fail "kexec_jump failed"
+fi
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index 322b9d3b0125..57708de2075d 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -129,10 +129,10 @@ static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 1),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 1),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 1),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 1),
REG_FTR_END,
};
diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
index 2c3a0eb6b22d..9bc4591c7b16 100644
--- a/tools/testing/selftests/mm/compaction_test.c
+++ b/tools/testing/selftests/mm/compaction_test.c
@@ -90,6 +90,8 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
int compaction_index = 0;
char nr_hugepages[20] = {0};
char init_nr_hugepages[24] = {0};
+ char target_nr_hugepages[24] = {0};
+ int slen;
snprintf(init_nr_hugepages, sizeof(init_nr_hugepages),
"%lu", initial_nr_hugepages);
@@ -106,11 +108,18 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
goto out;
}
- /* Request a large number of huge pages. The Kernel will allocate
- as much as it can */
- if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
- ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
- strerror(errno));
+ /*
+ * Request huge pages for about half of the free memory. The Kernel
+ * will allocate as much as it can, and we expect it will get at least 1/3
+ */
+ nr_hugepages_ul = mem_free / hugepage_size / 2;
+ snprintf(target_nr_hugepages, sizeof(target_nr_hugepages),
+ "%lu", nr_hugepages_ul);
+
+ slen = strlen(target_nr_hugepages);
+ if (write(fd, target_nr_hugepages, slen) != slen) {
+ ksft_print_msg("Failed to write %lu to /proc/sys/vm/nr_hugepages: %s\n",
+ nr_hugepages_ul, strerror(errno));
goto close_fd;
}
diff --git a/tools/testing/selftests/mm/guard-regions.c b/tools/testing/selftests/mm/guard-regions.c
index b3d0e2771096..eba43ead13ae 100644
--- a/tools/testing/selftests/mm/guard-regions.c
+++ b/tools/testing/selftests/mm/guard-regions.c
@@ -271,12 +271,16 @@ FIXTURE_SETUP(guard_regions)
self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
setup_sighandler();
- if (variant->backing == ANON_BACKED)
+ switch (variant->backing) {
+ case ANON_BACKED:
return;
-
- self->fd = open_file(
- variant->backing == SHMEM_BACKED ? "/tmp/" : "",
- self->path);
+ case LOCAL_FILE_BACKED:
+ self->fd = open_file("", self->path);
+ break;
+ case SHMEM_BACKED:
+ self->fd = memfd_create(self->path, 0);
+ break;
+ }
/* We truncate file to at least 100 pages, tests can modify as needed. */
ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
@@ -1696,7 +1700,7 @@ TEST_F(guard_regions, readonly_file)
char *ptr;
int i;
- if (variant->backing == ANON_BACKED)
+ if (variant->backing != LOCAL_FILE_BACKED)
SKIP(return, "Read-only test specific to file-backed");
/* Map shared so we can populate with pattern, populate it, unmap. */
diff --git a/tools/testing/selftests/mm/pkey-powerpc.h b/tools/testing/selftests/mm/pkey-powerpc.h
index 1bad310d282a..17bf2d1b0192 100644
--- a/tools/testing/selftests/mm/pkey-powerpc.h
+++ b/tools/testing/selftests/mm/pkey-powerpc.h
@@ -3,6 +3,8 @@
#ifndef _PKEYS_POWERPC_H
#define _PKEYS_POWERPC_H
+#include <sys/stat.h>
+
#ifndef SYS_pkey_alloc
# define SYS_pkey_alloc 384
# define SYS_pkey_free 385
@@ -102,8 +104,18 @@ static inline void expect_fault_on_read_execonly_key(void *p1, int pkey)
return;
}
+#define REPEAT_8(s) s s s s s s s s
+#define REPEAT_64(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) \
+ REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s)
+#define REPEAT_512(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) \
+ REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s)
+#define REPEAT_4096(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) \
+ REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s)
+#define REPEAT_16384(s) REPEAT_4096(s) REPEAT_4096(s) \
+ REPEAT_4096(s) REPEAT_4096(s)
+
/* 4-byte instructions * 16384 = 64K page */
-#define __page_o_noops() asm(".rept 16384 ; nop; .endr")
+#define __page_o_noops() asm(REPEAT_16384("nop\n"))
static inline void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey)
{
diff --git a/tools/testing/selftests/mm/pkey_util.c b/tools/testing/selftests/mm/pkey_util.c
index ca4ad0d44ab2..255b332f7a08 100644
--- a/tools/testing/selftests/mm/pkey_util.c
+++ b/tools/testing/selftests/mm/pkey_util.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#define __SANE_USERSPACE_TYPES__
#include <sys/syscall.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 124078b56fa4..70a38f485d4d 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -31,6 +31,7 @@ TEST_PROGS += veth.sh
TEST_PROGS += ioam6.sh
TEST_PROGS += gro.sh
TEST_PROGS += gre_gso.sh
+TEST_PROGS += gre_ipv6_lladdr.sh
TEST_PROGS += cmsg_so_mark.sh
TEST_PROGS += cmsg_so_priority.sh
TEST_PROGS += test_so_rcv.sh
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
index 90f8a244ea90..e59fba366a0a 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn other_tpid"
+ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn other_tpid 8021p drop_untagged"
NUM_NETIFS=4
CHECK_TC="yes"
source lib.sh
@@ -194,6 +194,100 @@ other_tpid()
tc qdisc del dev $h2 clsact
}
+8021p_do()
+{
+ local should_fail=$1; shift
+ local mac=de:ad:be:ef:13:37
+
+ tc filter add dev $h2 ingress protocol all pref 1 handle 101 \
+ flower dst_mac $mac action drop
+
+ $MZ -q $h1 -c 1 -b $mac -a own "81:00 00:00 08:00 aa-aa-aa-aa-aa-aa-aa-aa-aa"
+ sleep 1
+
+ tc -j -s filter show dev $h2 ingress \
+ | jq -e ".[] | select(.options.handle == 101) \
+ | select(.options.actions[0].stats.packets == 1)" &> /dev/null
+ check_err_fail $should_fail $? "802.1p-tagged reception"
+
+ tc filter del dev $h2 ingress pref 1
+}
+
+8021p()
+{
+ RET=0
+
+ tc qdisc add dev $h2 clsact
+ ip link set $h2 promisc on
+
+ # Test that with the default_pvid, 1, packets tagged with VID 0 are
+ # accepted.
+ 8021p_do 0
+
+ # Test that packets tagged with VID 0 are still accepted after changing
+ # the default_pvid.
+ ip link set br0 type bridge vlan_default_pvid 10
+ 8021p_do 0
+
+ log_test "Reception of 802.1p-tagged traffic"
+
+ ip link set $h2 promisc off
+ tc qdisc del dev $h2 clsact
+}
+
+send_untagged_and_8021p()
+{
+ ping_do $h1 192.0.2.2
+ check_fail $?
+
+ 8021p_do 1
+}
+
+drop_untagged()
+{
+ RET=0
+
+ tc qdisc add dev $h2 clsact
+ ip link set $h2 promisc on
+
+ # Test that with no PVID, untagged and 802.1p-tagged traffic is
+ # dropped.
+ ip link set br0 type bridge vlan_default_pvid 1
+
+ # First we reconfigure the default_pvid, 1, as a non-PVID VLAN.
+ bridge vlan add dev $swp1 vid 1 untagged
+ send_untagged_and_8021p
+ bridge vlan add dev $swp1 vid 1 pvid untagged
+
+ # Next we try to delete VID 1 altogether
+ bridge vlan del dev $swp1 vid 1
+ send_untagged_and_8021p
+ bridge vlan add dev $swp1 vid 1 pvid untagged
+
+ # Set up the bridge without a default_pvid, then check that the 8021q
+ # module, when the bridge port goes down and then up again, does not
+ # accidentally re-enable untagged packet reception.
+ ip link set br0 type bridge vlan_default_pvid 0
+ ip link set $swp1 down
+ ip link set $swp1 up
+ setup_wait
+ send_untagged_and_8021p
+
+ # Remove swp1 as a bridge port and let it rejoin the bridge while it
+ # has no default_pvid.
+ ip link set $swp1 nomaster
+ ip link set $swp1 master br0
+ send_untagged_and_8021p
+
+ # Restore settings
+ ip link set br0 type bridge vlan_default_pvid 1
+
+ log_test "Dropping of untagged and 802.1p-tagged traffic with no PVID"
+
+ ip link set $h2 promisc off
+ tc qdisc del dev $h2 clsact
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/tc_taprio.sh b/tools/testing/selftests/net/forwarding/tc_taprio.sh
new file mode 100755
index 000000000000..8992aeabfe0b
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/tc_taprio.sh
@@ -0,0 +1,421 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS=" \
+ test_clock_jump_backward \
+ test_taprio_after_ptp \
+ test_max_sdu \
+ test_clock_jump_backward_forward \
+"
+NUM_NETIFS=4
+source tc_common.sh
+source lib.sh
+source tsn_lib.sh
+
+require_command python3
+
+# The test assumes the usual topology from the README, where h1 is connected to
+# swp1, h2 to swp2, and swp1 and swp2 are together in a bridge.
+# Additional assumption: h1 and h2 use the same PHC, and so do swp1 and swp2.
+# By synchronizing h1 to swp1 via PTP, h2 is also implicitly synchronized to
+# swp1 (and both to CLOCK_REALTIME).
+h1=${NETIFS[p1]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p3]}
+h2=${NETIFS[p4]}
+
+UDS_ADDRESS_H1="/var/run/ptp4l_h1"
+UDS_ADDRESS_SWP1="/var/run/ptp4l_swp1"
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+# Tunables
+NUM_PKTS=100
+STREAM_VID=10
+STREAM_PRIO_1=6
+STREAM_PRIO_2=5
+STREAM_PRIO_3=4
+# PTP uses TC 0
+ALL_GATES=$((1 << 0 | 1 << STREAM_PRIO_1 | 1 << STREAM_PRIO_2))
+# Use a conservative cycle of 10 ms to allow the test to still pass when the
+# kernel has some extra overhead like lockdep etc
+CYCLE_TIME_NS=10000000
+# Create two Gate Control List entries, one OPEN and one CLOSE, of equal
+# durations
+GATE_DURATION_NS=$((CYCLE_TIME_NS / 2))
+# Give 2/3 of the cycle time to user space and 1/3 to the kernel
+FUDGE_FACTOR=$((CYCLE_TIME_NS / 3))
+# Shift the isochron base time by half the gate time, so that packets are
+# always received by swp1 close to the middle of the time slot, to minimize
+# inaccuracies due to network sync
+SHIFT_TIME_NS=$((GATE_DURATION_NS / 2))
+
+path_delay=
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+switch_create()
+{
+ local h2_mac_addr=$(mac_get $h2)
+
+ ip link set $swp1 up
+ ip link set $swp2 up
+
+ ip link add br0 type bridge vlan_filtering 1
+ ip link set $swp1 master br0
+ ip link set $swp2 master br0
+ ip link set br0 up
+
+ bridge vlan add dev $swp2 vid $STREAM_VID
+ bridge vlan add dev $swp1 vid $STREAM_VID
+ bridge fdb add dev $swp2 \
+ $h2_mac_addr vlan $STREAM_VID static master
+}
+
+switch_destroy()
+{
+ ip link del br0
+}
+
+ptp_setup()
+{
+ # Set up swp1 as a master PHC for h1, synchronized to the local
+ # CLOCK_REALTIME.
+ phc2sys_start $UDS_ADDRESS_SWP1
+ ptp4l_start $h1 true $UDS_ADDRESS_H1
+ ptp4l_start $swp1 false $UDS_ADDRESS_SWP1
+}
+
+ptp_cleanup()
+{
+ ptp4l_stop $swp1
+ ptp4l_stop $h1
+ phc2sys_stop
+}
+
+txtime_setup()
+{
+ local if_name=$1
+
+ tc qdisc add dev $if_name clsact
+ # Classify PTP on TC 7 and isochron on TC 6
+ tc filter add dev $if_name egress protocol 0x88f7 \
+ flower action skbedit priority 7
+ tc filter add dev $if_name egress protocol 802.1Q \
+ flower vlan_ethtype 0xdead action skbedit priority 6
+ tc qdisc add dev $if_name handle 100: parent root mqprio num_tc 8 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
+ map 0 1 2 3 4 5 6 7 \
+ hw 1
+ # Set up TC 5, 6, 7 for SO_TXTIME. tc-mqprio queues count from 1.
+ tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_1 + 1)) etf \
+ clockid CLOCK_TAI offload delta $FUDGE_FACTOR
+ tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_2 + 1)) etf \
+ clockid CLOCK_TAI offload delta $FUDGE_FACTOR
+ tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_3 + 1)) etf \
+ clockid CLOCK_TAI offload delta $FUDGE_FACTOR
+}
+
+txtime_cleanup()
+{
+ local if_name=$1
+
+ tc qdisc del dev $if_name clsact
+ tc qdisc del dev $if_name root
+}
+
+taprio_replace()
+{
+ local if_name="$1"; shift
+ local extra_args="$1"; shift
+
+ # STREAM_PRIO_1 always has an open gate.
+ # STREAM_PRIO_2 has a gate open for GATE_DURATION_NS (half the cycle time)
+ # STREAM_PRIO_3 always has a closed gate.
+ tc qdisc replace dev $if_name root stab overhead 24 taprio num_tc 8 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
+ map 0 1 2 3 4 5 6 7 \
+ sched-entry S $(printf "%x" $ALL_GATES) $GATE_DURATION_NS \
+ sched-entry S $(printf "%x" $((ALL_GATES & ~(1 << STREAM_PRIO_2)))) $GATE_DURATION_NS \
+ base-time 0 flags 0x2 $extra_args
+ taprio_wait_for_admin $if_name
+}
+
+taprio_cleanup()
+{
+ local if_name=$1
+
+ tc qdisc del dev $if_name root
+}
+
+probe_path_delay()
+{
+ local isochron_dat="$(mktemp)"
+ local received
+
+ log_info "Probing path delay"
+
+ isochron_do "$h1" "$h2" "$UDS_ADDRESS_H1" "" 0 \
+ "$CYCLE_TIME_NS" "" "" "$NUM_PKTS" \
+ "$STREAM_VID" "$STREAM_PRIO_1" "" "$isochron_dat"
+
+ received=$(isochron_report_num_received "$isochron_dat")
+ if [ "$received" != "$NUM_PKTS" ]; then
+ echo "Cannot establish basic data path between $h1 and $h2"
+ exit $ksft_fail
+ fi
+
+ printf "pdelay = {}\n" > isochron_data.py
+ isochron report --input-file "$isochron_dat" \
+ --printf-format "pdelay[%u] = %d - %d\n" \
+ --printf-args "qRT" \
+ >> isochron_data.py
+ cat <<-'EOF' > isochron_postprocess.py
+ #!/usr/bin/env python3
+
+ from isochron_data import pdelay
+ import numpy as np
+
+ w = np.array(list(pdelay.values()))
+ print("{}".format(np.max(w)))
+ EOF
+ path_delay=$(python3 ./isochron_postprocess.py)
+
+ log_info "Path delay from $h1 to $h2 estimated at $path_delay ns"
+
+ if [ "$path_delay" -gt "$GATE_DURATION_NS" ]; then
+ echo "Path delay larger than gate duration, aborting"
+ exit $ksft_fail
+ fi
+
+ rm -f ./isochron_data.py 2> /dev/null
+ rm -f ./isochron_postprocess.py 2> /dev/null
+ rm -f "$isochron_dat" 2> /dev/null
+}
+
+setup_prepare()
+{
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+
+ txtime_setup $h1
+
+ # Temporarily set up PTP just to probe the end-to-end path delay.
+ ptp_setup
+ probe_path_delay
+ ptp_cleanup
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ isochron_recv_stop
+ txtime_cleanup $h1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+run_test()
+{
+ local base_time=$1; shift
+ local stream_prio=$1; shift
+ local expected_delay=$1; shift
+ local should_fail=$1; shift
+ local test_name=$1; shift
+ local isochron_dat="$(mktemp)"
+ local received
+ local median_delay
+
+ RET=0
+
+ # Set the shift time equal to the cycle time, which effectively
+ # cancels the default advance time. Packets won't be sent early in
+ # software, which ensures that they won't prematurely enter through
+ # the open gate in __test_out_of_band(). Also, the gate is open for
+ # long enough that this won't cause a problem in __test_in_band().
+ isochron_do "$h1" "$h2" "$UDS_ADDRESS_H1" "" "$base_time" \
+ "$CYCLE_TIME_NS" "$SHIFT_TIME_NS" "$GATE_DURATION_NS" \
+ "$NUM_PKTS" "$STREAM_VID" "$stream_prio" "" "$isochron_dat"
+
+ received=$(isochron_report_num_received "$isochron_dat")
+ [ "$received" = "$NUM_PKTS" ]
+ check_err_fail $should_fail $? "Reception of $NUM_PKTS packets"
+
+ if [ $should_fail = 0 ] && [ "$received" = "$NUM_PKTS" ]; then
+ printf "pdelay = {}\n" > isochron_data.py
+ isochron report --input-file "$isochron_dat" \
+ --printf-format "pdelay[%u] = %d - %d\n" \
+ --printf-args "qRT" \
+ >> isochron_data.py
+ cat <<-'EOF' > isochron_postprocess.py
+ #!/usr/bin/env python3
+
+ from isochron_data import pdelay
+ import numpy as np
+
+ w = np.array(list(pdelay.values()))
+ print("{}".format(int(np.median(w))))
+ EOF
+ median_delay=$(python3 ./isochron_postprocess.py)
+
+ # If the condition below is true, packets were delayed by a closed gate
+ [ "$median_delay" -gt $((path_delay + expected_delay)) ]
+ check_fail $? "Median delay $median_delay is greater than expected delay $expected_delay plus path delay $path_delay"
+
+ # If the condition below is true, packets were sent expecting them to
+ # hit a closed gate in the switch, but were not delayed
+ [ "$expected_delay" -gt 0 ] && [ "$median_delay" -lt "$expected_delay" ]
+ check_fail $? "Median delay $median_delay is less than expected delay $expected_delay"
+ fi
+
+ log_test "$test_name"
+
+ rm -f ./isochron_data.py 2> /dev/null
+ rm -f ./isochron_postprocess.py 2> /dev/null
+ rm -f "$isochron_dat" 2> /dev/null
+}
+
+__test_always_open()
+{
+ run_test 0.000000000 $STREAM_PRIO_1 0 0 "Gate always open"
+}
+
+__test_always_closed()
+{
+ run_test 0.000000000 $STREAM_PRIO_3 0 1 "Gate always closed"
+}
+
+__test_in_band()
+{
+ # Send packets in-band with the OPEN gate entry
+ run_test 0.000000000 $STREAM_PRIO_2 0 0 "In band with gate"
+}
+
+__test_out_of_band()
+{
+ # Send packets in-band with the CLOSE gate entry
+ run_test 0.005000000 $STREAM_PRIO_2 \
+ $((GATE_DURATION_NS - SHIFT_TIME_NS)) 0 \
+ "Out of band with gate"
+}
+
+run_subtests()
+{
+ __test_always_open
+ __test_always_closed
+ __test_in_band
+ __test_out_of_band
+}
+
+test_taprio_after_ptp()
+{
+ log_info "Setting up taprio after PTP"
+ ptp_setup
+ taprio_replace $swp2
+ run_subtests
+ taprio_cleanup $swp2
+ ptp_cleanup
+}
+
+__test_under_max_sdu()
+{
+ # Limit max-sdu for STREAM_PRIO_1
+ taprio_replace "$swp2" "max-sdu 0 0 0 0 0 0 100 0"
+ run_test 0.000000000 $STREAM_PRIO_1 0 0 "Under maximum SDU"
+}
+
+__test_over_max_sdu()
+{
+ # Limit max-sdu for STREAM_PRIO_1
+ taprio_replace "$swp2" "max-sdu 0 0 0 0 0 0 20 0"
+ run_test 0.000000000 $STREAM_PRIO_1 0 1 "Over maximum SDU"
+}
+
+test_max_sdu()
+{
+ ptp_setup
+ __test_under_max_sdu
+ __test_over_max_sdu
+ taprio_cleanup $swp2
+ ptp_cleanup
+}
+
+# Perform a clock jump in the past without synchronization running, so that the
+# time base remains where it was set by phc_ctl.
+test_clock_jump_backward()
+{
+ # This is a more complex schedule specifically crafted in a way that
+ # has been problematic on NXP LS1028A. Not much to test with it other
+ # than the fact that it passes traffic.
+ tc qdisc replace dev $swp2 root stab overhead 24 taprio num_tc 8 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 map 0 1 2 3 4 5 6 7 \
+ base-time 0 sched-entry S 20 300000 sched-entry S 10 200000 \
+ sched-entry S 20 300000 sched-entry S 48 200000 \
+ sched-entry S 20 300000 sched-entry S 83 200000 \
+ sched-entry S 40 300000 sched-entry S 00 200000 flags 2
+
+ log_info "Forcing a backward clock jump"
+ phc_ctl $swp1 set 0
+
+ ping_test $h1 192.0.2.2
+ taprio_cleanup $swp2
+}
+
+# Test that taprio tolerates clock jumps.
+# Since ptp4l and phc2sys are running, it is expected for the time to
+# eventually recover (through yet another clock jump). Isochron waits
+# until that is the case.
+test_clock_jump_backward_forward()
+{
+ log_info "Forcing a backward and a forward clock jump"
+ taprio_replace $swp2
+ phc_ctl $swp1 set 0
+ ptp_setup
+ ping_test $h1 192.0.2.2
+ run_subtests
+ ptp_cleanup
+ taprio_cleanup $swp2
+}
+
+tc_offload_check
+if [[ $? -ne 0 ]]; then
+ log_test_skip "Could not test offloaded functionality"
+ exit $EXIT_STATUS
+fi
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tsn_lib.sh b/tools/testing/selftests/net/forwarding/tsn_lib.sh
index b91bcd8008a9..08c044ff6689 100644
--- a/tools/testing/selftests/net/forwarding/tsn_lib.sh
+++ b/tools/testing/selftests/net/forwarding/tsn_lib.sh
@@ -2,6 +2,8 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright 2021-2022 NXP
+tc_testing_scripts_dir=$(dirname $0)/../../tc-testing/scripts
+
REQUIRE_ISOCHRON=${REQUIRE_ISOCHRON:=yes}
REQUIRE_LINUXPTP=${REQUIRE_LINUXPTP:=yes}
@@ -18,6 +20,7 @@ fi
if [[ "$REQUIRE_LINUXPTP" = "yes" ]]; then
require_command phc2sys
require_command ptp4l
+ require_command phc_ctl
fi
phc2sys_start()
@@ -182,6 +185,7 @@ isochron_do()
local base_time=$1; shift
local cycle_time=$1; shift
local shift_time=$1; shift
+ local window_size=$1; shift
local num_pkts=$1; shift
local vid=$1; shift
local priority=$1; shift
@@ -212,6 +216,10 @@ isochron_do()
extra_args="${extra_args} --shift-time=${shift_time}"
fi
+ if ! [ -z "${window_size}" ]; then
+ extra_args="${extra_args} --window-size=${window_size}"
+ fi
+
if [ "${use_l2}" = "true" ]; then
extra_args="${extra_args} --l2 --etype=0xdead ${vid}"
receiver_extra_args="--l2 --etype=0xdead"
@@ -247,3 +255,21 @@ isochron_do()
cpufreq_restore ${ISOCHRON_CPU}
}
+
+isochron_report_num_received()
+{
+ local isochron_dat=$1; shift
+
+ # Count all received packets by looking at the non-zero RX timestamps
+ isochron report \
+ --input-file "${isochron_dat}" \
+ --printf-format "%u\n" --printf-args "R" | \
+ grep -w -v '0' | wc -l
+}
+
+taprio_wait_for_admin()
+{
+ local if_name="$1"; shift
+
+ "$tc_testing_scripts_dir/taprio_wait_for_admin.sh" "$(which tc)" "$if_name"
+}
diff --git a/tools/testing/selftests/net/gre_ipv6_lladdr.sh b/tools/testing/selftests/net/gre_ipv6_lladdr.sh
new file mode 100755
index 000000000000..5b34f6e1f831
--- /dev/null
+++ b/tools/testing/selftests/net/gre_ipv6_lladdr.sh
@@ -0,0 +1,177 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./lib.sh
+
+PAUSE_ON_FAIL="no"
+
+# The trap function handler
+#
+exit_cleanup_all()
+{
+ cleanup_all_ns
+
+ exit "${EXIT_STATUS}"
+}
+
+# Add fake IPv4 and IPv6 networks on the loopback device, to be used as
+# underlay by future GRE devices.
+#
+setup_basenet()
+{
+ ip -netns "${NS0}" link set dev lo up
+ ip -netns "${NS0}" address add dev lo 192.0.2.10/24
+ ip -netns "${NS0}" address add dev lo 2001:db8::10/64 nodad
+}
+
+# Check if network device has an IPv6 link-local address assigned.
+#
+# Parameters:
+#
+# * $1: The network device to test
+# * $2: An extra regular expression that should be matched (to verify the
+# presence of extra attributes)
+# * $3: The expected return code from grep (to allow checking the absence of
+# a link-local address)
+# * $4: The user visible name for the scenario being tested
+#
+check_ipv6_ll_addr()
+{
+ local DEV="$1"
+ local EXTRA_MATCH="$2"
+ local XRET="$3"
+ local MSG="$4"
+
+ RET=0
+ set +e
+ ip -netns "${NS0}" -6 address show dev "${DEV}" scope link | grep "fe80::" | grep -q "${EXTRA_MATCH}"
+ check_err_fail "${XRET}" $? ""
+ log_test "${MSG}"
+ set -e
+}
+
+# Create a GRE device and verify that it gets an IPv6 link-local address as
+# expected.
+#
+# Parameters:
+#
+# * $1: The device type (gre, ip6gre, gretap or ip6gretap)
+# * $2: The local underlay IP address (can be an IPv4, an IPv6 or "any")
+# * $3: The remote underlay IP address (can be an IPv4, an IPv6 or "any")
+# * $4: The IPv6 interface identifier generation mode to use for the GRE
+# device (eui64, none, stable-privacy or random).
+#
+test_gre_device()
+{
+ local GRE_TYPE="$1"
+ local LOCAL_IP="$2"
+ local REMOTE_IP="$3"
+ local MODE="$4"
+ local ADDR_GEN_MODE
+ local MATCH_REGEXP
+ local MSG
+
+ ip link add netns "${NS0}" name gretest type "${GRE_TYPE}" local "${LOCAL_IP}" remote "${REMOTE_IP}"
+
+ case "${MODE}" in
+ "eui64")
+ ADDR_GEN_MODE=0
+ MATCH_REGEXP=""
+ MSG="${GRE_TYPE}, mode: 0 (EUI64), ${LOCAL_IP} -> ${REMOTE_IP}"
+ XRET=0
+ ;;
+ "none")
+ ADDR_GEN_MODE=1
+ MATCH_REGEXP=""
+ MSG="${GRE_TYPE}, mode: 1 (none), ${LOCAL_IP} -> ${REMOTE_IP}"
+ XRET=1 # No link-local address should be generated
+ ;;
+ "stable-privacy")
+ ADDR_GEN_MODE=2
+ MATCH_REGEXP="stable-privacy"
+ MSG="${GRE_TYPE}, mode: 2 (stable privacy), ${LOCAL_IP} -> ${REMOTE_IP}"
+ XRET=0
+ # Initialise stable_secret (required for stable-privacy mode)
+ ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.stable_secret="2001:db8::abcd"
+ ;;
+ "random")
+ ADDR_GEN_MODE=3
+ MATCH_REGEXP="stable-privacy"
+ MSG="${GRE_TYPE}, mode: 3 (random), ${LOCAL_IP} -> ${REMOTE_IP}"
+ XRET=0
+ ;;
+ esac
+
+ # Check that IPv6 link-local address is generated when device goes up
+ ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode="${ADDR_GEN_MODE}"
+ ip -netns "${NS0}" link set dev gretest up
+ check_ipv6_ll_addr gretest "${MATCH_REGEXP}" "${XRET}" "config: ${MSG}"
+
+ # Now disable link-local address generation
+ ip -netns "${NS0}" link set dev gretest down
+ ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode=1
+ ip -netns "${NS0}" link set dev gretest up
+
+ # Check that link-local address generation works when re-enabled while
+ # the device is already up
+ ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode="${ADDR_GEN_MODE}"
+ check_ipv6_ll_addr gretest "${MATCH_REGEXP}" "${XRET}" "update: ${MSG}"
+
+ ip -netns "${NS0}" link del dev gretest
+}
+
+test_gre4()
+{
+ local GRE_TYPE
+ local MODE
+
+ for GRE_TYPE in "gre" "gretap"; do
+ printf "\n####\nTesting IPv6 link-local address generation on ${GRE_TYPE} devices\n####\n\n"
+
+ for MODE in "eui64" "none" "stable-privacy" "random"; do
+ test_gre_device "${GRE_TYPE}" 192.0.2.10 192.0.2.11 "${MODE}"
+ test_gre_device "${GRE_TYPE}" any 192.0.2.11 "${MODE}"
+ test_gre_device "${GRE_TYPE}" 192.0.2.10 any "${MODE}"
+ done
+ done
+}
+
+test_gre6()
+{
+ local GRE_TYPE
+ local MODE
+
+ for GRE_TYPE in "ip6gre" "ip6gretap"; do
+ printf "\n####\nTesting IPv6 link-local address generation on ${GRE_TYPE} devices\n####\n\n"
+
+ for MODE in "eui64" "none" "stable-privacy" "random"; do
+ test_gre_device "${GRE_TYPE}" 2001:db8::10 2001:db8::11 "${MODE}"
+ test_gre_device "${GRE_TYPE}" any 2001:db8::11 "${MODE}"
+ test_gre_device "${GRE_TYPE}" 2001:db8::10 any "${MODE}"
+ done
+ done
+}
+
+usage()
+{
+ echo "Usage: $0 [-p]"
+ exit 1
+}
+
+while getopts :p o
+do
+ case $o in
+ p) PAUSE_ON_FAIL="yes";;
+ *) usage;;
+ esac
+done
+
+setup_ns NS0
+
+set -e
+trap exit_cleanup_all EXIT
+
+setup_basenet
+
+test_gre4
+test_gre6
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
index e26bbc169783..a951c0d33cd2 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
@@ -352,5 +352,226 @@
"$TC qdisc del dev $DUMMY handle 1:0 root",
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
]
+ },
+ {
+ "id": "90ec",
+ "name": "Test DRR's enqueue reentrant behaviour with netem",
+ "category": [
+ "qdisc",
+ "drr"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1:0 root drr",
+ "$TC class replace dev $DUMMY parent 1:0 classid 1:1 drr",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%",
+ "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1"
+ ],
+ "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0",
+ "matchJSON": [
+ {
+ "kind": "drr",
+ "handle": "1:",
+ "bytes": 196,
+ "packets": 2
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "1f1f",
+ "name": "Test ETS's enqueue reentrant behaviour with netem",
+ "category": [
+ "qdisc",
+ "ets"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1:0 root ets bands 2",
+ "$TC class replace dev $DUMMY parent 1:0 classid 1:1 ets quantum 1500",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%",
+ "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1"
+ ],
+ "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j -s class show dev $DUMMY",
+ "matchJSON": [
+ {
+ "class": "ets",
+ "handle": "1:1",
+ "stats": {
+ "bytes": 196,
+ "packets": 2
+ }
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "5e6d",
+ "name": "Test QFQ's enqueue reentrant behaviour with netem",
+ "category": [
+ "qdisc",
+ "qfq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1:0 root qfq",
+ "$TC class replace dev $DUMMY parent 1:0 classid 1:1 qfq weight 100 maxpkt 1500",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%",
+ "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1"
+ ],
+ "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0",
+ "matchJSON": [
+ {
+ "kind": "qfq",
+ "handle": "1:",
+ "bytes": 196,
+ "packets": 2
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "bf1d",
+ "name": "Test HFSC's enqueue reentrant behaviour with netem",
+ "category": [
+ "qdisc",
+ "hfsc"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1:0 root hfsc",
+ "$TC class add dev $DUMMY parent 1:0 classid 1:1 hfsc ls m2 10Mbit",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%",
+ "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip dst 10.10.10.1/32 flowid 1:1",
+ "$TC class add dev $DUMMY parent 1:0 classid 1:2 hfsc ls m2 10Mbit",
+ "$TC qdisc add dev $DUMMY parent 1:2 handle 3:0 netem duplicate 100%",
+ "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 2 u32 match ip dst 10.10.10.2/32 flowid 1:2",
+ "ping -c 1 10.10.10.1 -I$DUMMY > /dev/null || true",
+ "$TC filter del dev $DUMMY parent 1:0 protocol ip prio 1",
+ "$TC class del dev $DUMMY classid 1:1"
+ ],
+ "cmdUnderTest": "ping -c 1 10.10.10.2 -I$DUMMY > /dev/null || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0",
+ "matchJSON": [
+ {
+ "kind": "hfsc",
+ "handle": "1:",
+ "bytes": 392,
+ "packets": 4
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "7c3b",
+ "name": "Test nested DRR's enqueue reentrant behaviour with netem",
+ "category": [
+ "qdisc",
+ "drr"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1:0 root drr",
+ "$TC class add dev $DUMMY parent 1:0 classid 1:1 drr",
+ "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1",
+ "$TC qdisc add dev $DUMMY handle 2:0 parent 1:1 drr",
+ "$TC class add dev $DUMMY classid 2:1 parent 2:0 drr",
+ "$TC filter add dev $DUMMY parent 2:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 2:1",
+ "$TC qdisc add dev $DUMMY parent 2:1 handle 3:0 netem duplicate 100%"
+ ],
+ "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0",
+ "matchJSON": [
+ {
+ "kind": "drr",
+ "handle": "1:",
+ "bytes": 196,
+ "packets": 2
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
+ },
+ {
+ "id": "62c4",
+ "name": "Test HTB with FQ_CODEL - basic functionality",
+ "category": [
+ "qdisc",
+ "htb",
+ "fq_codel"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin",
+ "scapyPlugin"
+ ]
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 root handle 1: htb default 11",
+ "$TC class add dev $DEV1 parent 1: classid 1:1 htb rate 10kbit",
+ "$TC class add dev $DEV1 parent 1:1 classid 1:11 htb rate 10kbit prio 0 quantum 1486",
+ "$TC qdisc add dev $DEV1 parent 1:11 fq_codel quantum 300 noecn",
+ "sleep 0.5"
+ ],
+ "scapy": {
+ "iface": "$DEV0",
+ "count": 5,
+ "packet": "Ether()/IP(dst='10.10.10.1', src='10.10.10.10')/TCP(sport=12345, dport=80)"
+ },
+ "cmdUnderTest": "$TC -s qdisc show dev $DEV1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DEV1 | grep -A 5 'qdisc fq_codel'",
+ "matchPattern": "Sent [0-9]+ bytes [0-9]+ pkt",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 handle 1: root"
+ ]
}
]
diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile
index ec4624a283bc..f34ac0bac696 100644
--- a/tools/testing/selftests/ublk/Makefile
+++ b/tools/testing/selftests/ublk/Makefile
@@ -9,6 +9,7 @@ TEST_PROGS += test_generic_03.sh
TEST_PROGS += test_generic_04.sh
TEST_PROGS += test_generic_05.sh
TEST_PROGS += test_generic_06.sh
+TEST_PROGS += test_generic_07.sh
TEST_PROGS += test_null_01.sh
TEST_PROGS += test_null_02.sh
diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c
index e57a1486bb48..842b40736a9b 100644
--- a/tools/testing/selftests/ublk/kublk.c
+++ b/tools/testing/selftests/ublk/kublk.c
@@ -536,12 +536,17 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag)
if (!(io->flags & UBLKSRV_IO_FREE))
return 0;
- /* we issue because we need either fetching or committing */
+ /*
+ * we issue because we need either fetching or committing or
+ * getting data
+ */
if (!(io->flags &
- (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP)))
+ (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_NEED_GET_DATA)))
return 0;
- if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP)
+ if (io->flags & UBLKSRV_NEED_GET_DATA)
+ cmd_op = UBLK_U_IO_NEED_GET_DATA;
+ else if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP)
cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ;
else if (io->flags & UBLKSRV_NEED_FETCH_RQ)
cmd_op = UBLK_U_IO_FETCH_REQ;
@@ -658,6 +663,9 @@ static void ublk_handle_cqe(struct io_uring *r,
assert(tag < q->q_depth);
if (q->tgt_ops->queue_io)
q->tgt_ops->queue_io(q, tag);
+ } else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) {
+ io->flags |= UBLKSRV_NEED_GET_DATA | UBLKSRV_IO_FREE;
+ ublk_queue_io_cmd(q, io, tag);
} else {
/*
* COMMIT_REQ will be completed immediately since no fetching
@@ -1237,7 +1245,7 @@ static void __cmd_create_help(char *exe, bool recovery)
printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n",
exe, recovery ? "recover" : "add");
- printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g 0|1]\n");
+ printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g]\n");
printf("\t[-e 0|1 ] [-i 0|1]\n");
printf("\t[target options] [backfile1] [backfile2] ...\n");
printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n");
@@ -1313,7 +1321,7 @@ int main(int argc, char *argv[])
opterr = 0;
optind = 2;
- while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:az",
+ while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:gaz",
longopts, &option_idx)) != -1) {
switch (opt) {
case 'a':
@@ -1351,9 +1359,7 @@ int main(int argc, char *argv[])
ctx.flags |= UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE;
break;
case 'g':
- value = strtol(optarg, NULL, 10);
- if (value)
- ctx.flags |= UBLK_F_NEED_GET_DATA;
+ ctx.flags |= UBLK_F_NEED_GET_DATA;
break;
case 0:
if (!strcmp(longopts[option_idx].name, "debug_mask"))
diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h
index 918db5cd633f..44ee1e4ac55b 100644
--- a/tools/testing/selftests/ublk/kublk.h
+++ b/tools/testing/selftests/ublk/kublk.h
@@ -115,6 +115,7 @@ struct ublk_io {
#define UBLKSRV_NEED_FETCH_RQ (1UL << 0)
#define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1)
#define UBLKSRV_IO_FREE (1UL << 2)
+#define UBLKSRV_NEED_GET_DATA (1UL << 3)
unsigned short flags;
unsigned short refs; /* used by target code only */
diff --git a/tools/testing/selftests/ublk/test_generic_07.sh b/tools/testing/selftests/ublk/test_generic_07.sh
new file mode 100755
index 000000000000..cba86451fa5e
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_generic_07.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="generic_07"
+ERR_CODE=0
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "generic" "test UBLK_F_NEED_GET_DATA"
+
+_create_backfile 0 256M
+dev_id=$(_add_ublk_dev -t loop -q 2 -g "${UBLK_BACKFILES[0]}")
+_check_add_dev $TID $?
+
+# run fio over the ublk disk
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
+ERR_CODE=$?
+if [ "$ERR_CODE" -eq 0 ]; then
+ _mkfs_mount_test /dev/ublkb"${dev_id}"
+ ERR_CODE=$?
+fi
+
+_cleanup_test "generic"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_05.sh b/tools/testing/selftests/ublk/test_stress_05.sh
index a7071b10224d..88601b48f1cd 100755
--- a/tools/testing/selftests/ublk/test_stress_05.sh
+++ b/tools/testing/selftests/ublk/test_stress_05.sh
@@ -47,15 +47,15 @@ _create_backfile 0 256M
_create_backfile 1 256M
for reissue in $(seq 0 1); do
- ublk_io_and_remove 8G -t null -q 4 -g 1 -r 1 -i "$reissue" &
- ublk_io_and_remove 256M -t loop -q 4 -g 1 -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" &
+ ublk_io_and_remove 8G -t null -q 4 -g -r 1 -i "$reissue" &
+ ublk_io_and_remove 256M -t loop -q 4 -g -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" &
wait
done
if _have_feature "ZERO_COPY"; then
for reissue in $(seq 0 1); do
- ublk_io_and_remove 8G -t null -q 4 -g 1 -z -r 1 -i "$reissue" &
- ublk_io_and_remove 256M -t loop -q 4 -g 1 -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" &
+ ublk_io_and_remove 8G -t null -q 4 -g -z -r 1 -i "$reissue" &
+ ublk_io_and_remove 256M -t loop -q 4 -g -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" &
wait
done
fi