aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--.mailmap5
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs32
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/rsb.rst268
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt5
-rw-r--r--Documentation/admin-guide/xfs.rst50
-rw-r--r--Documentation/arch/x86/cpuinfo.rst69
-rw-r--r--Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml18
-rw-r--r--Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml9
-rw-r--r--Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml11
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tpu.yaml56
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/microchip,usb5744.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/xlnx,usb2.yaml1
-rw-r--r--Documentation/filesystems/ext4/super.rst20
-rw-r--r--Documentation/netlink/specs/ovs_vport.yaml4
-rw-r--r--Documentation/netlink/specs/rt-link.yaml20
-rw-r--r--Documentation/netlink/specs/rt-neigh.yaml14
-rw-r--r--Documentation/userspace-api/mseal.rst2
-rw-r--r--Documentation/wmi/devices/msi-wmi-platform.rst4
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/include/asm/rqspinlock.h2
-rw-r--r--arch/s390/Kconfig19
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/include/asm/march.h4
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c11
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c167
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c3
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/tools/gen_facilities.c3
-rw-r--r--arch/x86/entry/entry.S9
-rw-r--r--arch/x86/include/asm/nospec-branch.h12
-rw-r--r--arch/x86/include/asm/smap.h12
-rw-r--r--arch/x86/kernel/acpi/boot.c11
-rw-r--r--arch/x86/kernel/cpu/amd.c1
-rw-r--r--arch/x86/kernel/cpu/bugs.c101
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c48
-rw-r--r--arch/x86/kernel/e820.c17
-rw-r--r--arch/x86/kernel/early_printk.c10
-rw-r--r--arch/x86/mm/tlb.c6
-rw-r--r--arch/x86/power/hibernate_asm_64.S4
-rw-r--r--arch/x86/xen/multicalls.c26
-rw-r--r--arch/x86/xen/smp_pv.c1
-rw-r--r--arch/x86/xen/xen-asm.S4
-rw-r--r--arch/x86/xen/xen-ops.h3
-rw-r--r--crypto/ahash.c76
-rw-r--r--crypto/scompress.c10
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c4
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c3
-rw-r--r--drivers/accel/ivpu/ivpu_ms.c24
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/ec.c28
-rw-r--r--drivers/acpi/pptt.c4
-rw-r--r--drivers/ata/libata-sata.c15
-rw-r--r--drivers/ata/pata_pxa.c6
-rw-r--r--drivers/ata/sata_sx4.c13
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/bluetooth/btnxpuart.c21
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c10
-rw-r--r--drivers/crypto/caam/qi.c6
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c5
-rw-r--r--drivers/dma-buf/udmabuf.c2
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c30
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c2
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c1
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c1
-rw-r--r--drivers/fpga/tests/fpga-region-test.c1
-rw-r--r--drivers/fwctl/main.c2
-rw-r--r--drivers/fwctl/pds/main.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c9
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c19
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c18
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c27
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c7
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c6
-rw-r--r--drivers/gpu/drm/sti/Makefile2
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c9
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c10
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c22
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c26
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c1
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c12
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c108
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c13
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c7
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/core/ucaps.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c32
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c4
-rw-r--r--drivers/iommu/intel/iommu.c1
-rw-r--r--drivers/iommu/intel/irq_remapping.c29
-rw-r--r--drivers/iommu/iommu.c6
-rw-r--r--drivers/iommu/ipmmu-vmsa.c27
-rw-r--r--drivers/iommu/mtk_iommu.c26
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c1
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c1
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c7
-rw-r--r--drivers/net/dsa/b53/b53_common.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c13
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c113
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c49
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c15
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c121
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c3
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c8
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c4
-rw-r--r--drivers/nvme/host/core.c9
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/fc.c60
-rw-r--r--drivers/nvme/target/fcloop.c74
-rw-r--r--drivers/pci/quirks.c12
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c4
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c4
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c8
-rw-r--r--drivers/platform/x86/amd/pmf/core.c14
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h1
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c12
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c6
-rw-r--r--drivers/platform/x86/asus-laptop.c9
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c48
-rw-r--r--drivers/platform/x86/msi-wmi-platform.c99
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c14
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c124
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h3
-rw-r--r--drivers/ptp/ptp_ocp.c1
-rw-r--r--drivers/pwm/core.c13
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c10
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c6
-rw-r--r--drivers/pwm/pwm-mediatek.c8
-rw-r--r--drivers/pwm/pwm-rcar.c24
-rw-r--r--drivers/pwm/pwm-stm32.c12
-rw-r--r--drivers/ras/amd/atl/internal.h3
-rw-r--r--drivers/ras/amd/atl/umc.c19
-rw-r--r--drivers/ras/amd/fmpm.c9
-rw-r--r--drivers/s390/virtio/virtio_ccw.c16
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c20
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c14
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/scsi/scsi_transport_srp.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c13
-rw-r--r--drivers/spi/spi-fsl-qspi.c8
-rw-r--r--drivers/ufs/core/ufs-sysfs.c54
-rw-r--r--drivers/ufs/core/ufshcd-priv.h1
-rw-r--r--drivers/ufs/core/ufshcd.c92
-rw-r--r--drivers/ufs/host/ufs-exynos.c85
-rw-r--r--drivers/ufs/host/ufs-exynos.h6
-rw-r--r--fs/afs/dynroot.c4
-rw-r--r--fs/bcachefs/Kconfig5
-rw-r--r--fs/bcachefs/bcachefs.h4
-rw-r--r--fs/bcachefs/btree_journal_iter.c5
-rw-r--r--fs/bcachefs/btree_node_scan.c6
-rw-r--r--fs/bcachefs/btree_write_buffer.c8
-rw-r--r--fs/bcachefs/checksum.c247
-rw-r--r--fs/bcachefs/checksum.h3
-rw-r--r--fs/bcachefs/data_update.c2
-rw-r--r--fs/bcachefs/dirent.c4
-rw-r--r--fs/bcachefs/fs-io-buffered.c17
-rw-r--r--fs/bcachefs/io_read.c3
-rw-r--r--fs/bcachefs/journal_io.c2
-rw-r--r--fs/bcachefs/recovery.c6
-rw-r--r--fs/bcachefs/super.c10
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/devpts/inode.c4
-rw-r--r--fs/erofs/erofs_fs.h8
-rw-r--r--fs/erofs/fileio.c2
-rw-r--r--fs/erofs/zdata.c1
-rw-r--r--fs/erofs/zmap.c5
-rw-r--r--fs/ext4/block_validity.c5
-rw-r--r--fs/ext4/inode.c75
-rw-r--r--fs/ext4/mballoc.c18
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/fuse/virtio_fs.c3
-rw-r--r--fs/hfs/bnode.c6
-rw-r--r--fs/hfsplus/bnode.c6
-rw-r--r--fs/iomap/buffered-io.c2
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/namei.c8
-rw-r--r--fs/namespace.c34
-rw-r--r--fs/netfs/main.c4
-rw-r--r--fs/overlayfs/overlayfs.h2
-rw-r--r--fs/overlayfs/super.c5
-rw-r--r--fs/smb/client/cifsencrypt.c16
-rw-r--r--fs/smb/client/cifsglob.h6
-rw-r--r--fs/smb/client/cifspdu.h2
-rw-r--r--fs/smb/client/connect.c2
-rw-r--r--fs/smb/client/inode.c25
-rw-r--r--fs/smb/client/reparse.c63
-rw-r--r--fs/smb/client/reparse.h5
-rw-r--r--fs/smb/client/sess.c60
-rw-r--r--fs/smb/client/smb1ops.c53
-rw-r--r--fs/smb/client/smb2ops.c14
-rw-r--r--fs/smb/client/smb2pdu.c11
-rw-r--r--fs/smb/common/smb2pdu.h6
-rw-r--r--fs/smb/server/smb_common.h2
-rw-r--r--fs/xfs/xfs_buf.c1
-rw-r--r--fs/xfs/xfs_buf_mem.c2
-rw-r--r--fs/xfs/xfs_dquot.c3
-rw-r--r--fs/xfs/xfs_fsmap.c51
-rw-r--r--fs/xfs/xfs_inode_item.c6
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_sysfs.c32
-rw-r--r--fs/xfs/xfs_trans_ail.c5
-rw-r--r--fs/xfs/xfs_trans_priv.h28
-rw-r--r--fs/xfs/xfs_zone_alloc.c7
-rw-r--r--fs/xfs/xfs_zone_gc.c16
-rw-r--r--include/crypto/hash.h6
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/drm/drm_kunit_helpers.h3
-rw-r--r--include/drm/intel/pciids.h1
-rw-r--r--include/linux/dcache.h106
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h1
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/irqchip/irq-davinci-aintc.h27
-rw-r--r--include/linux/local_lock.h58
-rw-r--r--include/linux/local_lock_internal.h207
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pgtable.h9
-rw-r--r--include/linux/platform_data/x86/intel_pmc_ipc.h4
-rw-r--r--include/net/fib_rules.h2
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/l3mdev.h27
-rw-r--r--include/rdma/ib_verbs.h7
-rw-r--r--include/uapi/cxl/features.h21
-rw-r--r--include/ufs/ufs.h5
-rw-r--r--include/ufs/ufshcd.h7
-rw-r--r--include/vdso/unaligned.h12
-rw-r--r--kernel/bpf/queue_stack_maps.c35
-rw-r--r--kernel/bpf/ringbuf.c17
-rw-r--r--kernel/bpf/rqspinlock.c2
-rw-r--r--kernel/events/core.c70
-rw-r--r--kernel/events/uprobes.c15
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/tick-common.c22
-rw-r--r--kernel/trace/ftrace.c314
-rw-r--r--kernel/trace/rv/rv.c7
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace_events_synth.c1
-rw-r--r--kernel/trace/trace_functions_graph.c11
-rw-r--r--lib/alloc_tag.c15
-rw-r--r--lib/asn1_decoder.c1
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/tests/slub_kunit.c1
-rw-r--r--lib/ucs2_string.c1
-rw-r--r--lib/zlib_inflate/inflate_syms.c1
-rw-r--r--mm/cma.c19
-rw-r--r--mm/compaction.c6
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/hugetlb.c23
-rw-r--r--mm/kasan/kasan_test_c.c1
-rw-r--r--mm/memcontrol.c39
-rw-r--r--mm/memory.c6
-rw-r--r--mm/page_alloc.c128
-rw-r--r--mm/slub.c10
-rw-r--r--mm/userfaultfd.c13
-rw-r--r--mm/vma.c51
-rw-r--r--mm/vma.h9
-rw-r--r--net/batman-adv/hard-interface.c1
-rw-r--r--net/bluetooth/hci_conn.c8
-rw-r--r--net/bluetooth/hci_event.c5
-rw-r--r--net/bluetooth/l2cap_core.c21
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/can/j1939/socket.c1
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/fib_rules.c48
-rw-r--r--net/core/filter.c80
-rw-r--r--net/core/rtnetlink.c5
-rw-r--r--net/dsa/dsa.c59
-rw-r--r--net/dsa/tag_8021q.c2
-rw-r--r--net/ethtool/cmis_cdb.c2
-rw-r--r--net/ipv6/route.c1
-rw-r--r--net/l3mdev/l3mdev.c4
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mctp/af_mctp.c3
-rw-r--r--net/netfilter/nf_flow_table_core.c10
-rw-r--r--net/openvswitch/flow_netlink.c3
-rw-r--r--net/smc/af_smc.c5
-rw-r--r--samples/livepatch/livepatch-callbacks-busymod.c1
-rw-r--r--samples/livepatch/livepatch-callbacks-demo.c1
-rw-r--r--samples/livepatch/livepatch-callbacks-mod.c1
-rw-r--r--samples/livepatch/livepatch-sample.c1
-rw-r--r--samples/livepatch/livepatch-shadow-fix1.c1
-rw-r--r--samples/livepatch/livepatch-shadow-fix2.c1
-rw-r--r--sound/isa/azt2320.c4
-rw-r--r--sound/pci/hda/Kconfig4
-rw-r--r--sound/pci/hda/patch_realtek.c23
-rw-r--r--sound/pci/hda/tas2781_hda_spi.c3
-rw-r--r--sound/soc/codecs/aw88081.c10
-rw-r--r--sound/soc/codecs/cs42l43-jack.c3
-rw-r--r--sound/soc/codecs/hdmi-codec.c22
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c117
-rw-r--r--sound/soc/dwc/dwc-i2s.c13
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c15
-rw-r--r--sound/soc/fsl/fsl_qmc_audio.c3
-rw-r--r--sound/soc/intel/avs/path.c72
-rw-r--r--sound/soc/intel/avs/path.h5
-rw-r--r--sound/soc/intel/avs/pcm.c52
-rw-r--r--sound/soc/intel/boards/sof_sdw.c1
-rw-r--r--sound/soc/qcom/lpass.h3
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h5
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h24
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h28
-rw-r--r--tools/arch/x86/include/asm/msr-index.h31
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--tools/arch/x86/include/uapi/asm/svm.h2
-rw-r--r--tools/arch/x86/lib/memset_64.S3
-rw-r--r--tools/include/linux/cfi_types.h45
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h1
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/linux/in.h2
-rw-r--r--tools/include/uapi/linux/kvm.h9
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/include/uapi/linux/stat.h99
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_c.py96
-rw-r--r--tools/objtool/arch/x86/decode.c18
-rw-r--r--tools/objtool/arch/x86/special.c2
-rw-r--r--tools/objtool/check.c59
-rw-r--r--tools/objtool/include/objtool/arch.h3
-rw-r--r--tools/perf/arch/arm/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl1
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/sh/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/sparc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_32.tbl3
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--tools/perf/arch/xtensa/entry/syscalls/syscall.tbl1
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h2
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fcntl.h4
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fs.h21
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/mount.h10
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/prctl.h11
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/stat.h99
-rw-r--r--tools/perf/trace/beauty/include/uapi/sound/asound.h8
-rw-r--r--tools/perf/util/evsel.c22
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/scripts/syscall.tbl1
-rw-r--r--tools/testing/memblock/internal.h6
-rw-r--r--tools/testing/memblock/linux/mutex.h14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/res_spin_lock.c7
-rw-r--r--tools/testing/selftests/bpf/progs/res_spin_lock.c10
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/iou-zcrx.py4
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc177
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c16
-rwxr-xr-xtools/testing/selftests/mm/charge_reserved_hugetlb.sh4
-rw-r--r--tools/testing/selftests/mm/cow.c2
-rwxr-xr-xtools/testing/selftests/mm/hugetlb_reparenting_test.sh2
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh34
-rw-r--r--tools/testing/selftests/net/skf_net_off.c244
-rwxr-xr-xtools/testing/selftests/net/skf_net_off.sh30
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/actions.json22
-rw-r--r--tools/testing/shared/linux.c4
-rw-r--r--tools/testing/shared/linux/cleanup.h2
451 files changed, 5256 insertions, 2617 deletions
diff --git a/.mailmap b/.mailmap
index 4f7cd8e23177..9afde79e1936 100644
--- a/.mailmap
+++ b/.mailmap
@@ -322,6 +322,7 @@ Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
+Jean-Michel Hautbois <jeanmichel.hautbois@yoseli.org> <jeanmichel.hautbois@ideasonboard.com>
Jean Tourrilhes <jt@hpl.hp.com>
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
Jeff Garzik <jgarzik@pretzel.yyz.us>
@@ -438,6 +439,8 @@ Linus Lüssing <linus.luessing@c0d3.blue> <ll@simonwunderlich.de>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
+Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@linaro.org>
+Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@intel.com>
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com>
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
@@ -685,6 +688,8 @@ Simon Wunderlich <sw@simonwunderlich.de> <simon.wunderlich@saxnet.de>
Simon Wunderlich <sw@simonwunderlich.de> <simon@open-mesh.com>
Simon Wunderlich <sw@simonwunderlich.de> <siwu@hrz.tu-chemnitz.de>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
+Srinivas Kandagatla <srini@kernel.org> <srinivas.kandagatla@st.com>
+Srinivas Kandagatla <srini@kernel.org> <srinivas.kandagatla@linaro.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index ae0191295d29..e36d2de16cbd 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1604,3 +1604,35 @@ Description:
prevent the UFS from frequently performing clock gating/ungating.
The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_count
+What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_count
+Date: March 2025
+Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
+Description:
+ This attribute is applicable to ufs devices compliant to the
+ JEDEC specifications version 4.1 or later. The
+ device_lvl_exception_count is a counter indicating the number of
+ times the device level exceptions have occurred since the last
+ time this variable is reset. Writing a 0 value to this
+ attribute will reset the device_lvl_exception_count. If the
+ device_lvl_exception_count reads a positive value, the user
+ application should read the device_lvl_exception_id attribute to
+ know more information about the exception.
+
+ The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_id
+What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_id
+Date: March 2025
+Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
+Description:
+ Reading the device_lvl_exception_id returns the
+ qDeviceLevelExceptionID attribute of the ufs device JEDEC
+ specification version 4.1. The definition of the
+ qDeviceLevelExceptionID is the ufs device vendor specific
+ implementation. Refer to the device manufacturer datasheet for
+ more information on the meaning of the qDeviceLevelExceptionID
+ attribute value.
+
+ The attribute is read only.
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index ff0b440ef2dc..451874b8135d 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -22,3 +22,4 @@ are configurable at compile, boot or run time.
srso
gather_data_sampling
reg-file-data-sampling
+ rsb
diff --git a/Documentation/admin-guide/hw-vuln/rsb.rst b/Documentation/admin-guide/hw-vuln/rsb.rst
new file mode 100644
index 000000000000..21dbf9cf25f8
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/rsb.rst
@@ -0,0 +1,268 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+RSB-related mitigations
+=======================
+
+.. warning::
+ Please keep this document up-to-date, otherwise you will be
+ volunteered to update it and convert it to a very long comment in
+ bugs.c!
+
+Since 2018 there have been many Spectre CVEs related to the Return Stack
+Buffer (RSB) (sometimes referred to as the Return Address Stack (RAS) or
+Return Address Predictor (RAP) on AMD).
+
+Information about these CVEs and how to mitigate them is scattered
+amongst a myriad of microarchitecture-specific documents.
+
+This document attempts to consolidate all the relevant information in
+once place and clarify the reasoning behind the current RSB-related
+mitigations. It's meant to be as concise as possible, focused only on
+the current kernel mitigations: what are the RSB-related attack vectors
+and how are they currently being mitigated?
+
+It's *not* meant to describe how the RSB mechanism operates or how the
+exploits work. More details about those can be found in the references
+below.
+
+Rather, this is basically a glorified comment, but too long to actually
+be one. So when the next CVE comes along, a kernel developer can
+quickly refer to this as a refresher to see what we're actually doing
+and why.
+
+At a high level, there are two classes of RSB attacks: RSB poisoning
+(Intel and AMD) and RSB underflow (Intel only). They must each be
+considered individually for each attack vector (and microarchitecture
+where applicable).
+
+----
+
+RSB poisoning (Intel and AMD)
+=============================
+
+SpectreRSB
+~~~~~~~~~~
+
+RSB poisoning is a technique used by SpectreRSB [#spectre-rsb]_ where
+an attacker poisons an RSB entry to cause a victim's return instruction
+to speculate to an attacker-controlled address. This can happen when
+there are unbalanced CALLs/RETs after a context switch or VMEXIT.
+
+* All attack vectors can potentially be mitigated by flushing out any
+ poisoned RSB entries using an RSB filling sequence
+ [#intel-rsb-filling]_ [#amd-rsb-filling]_ when transitioning between
+ untrusted and trusted domains. But this has a performance impact and
+ should be avoided whenever possible.
+
+ .. DANGER::
+ **FIXME**: Currently we're flushing 32 entries. However, some CPU
+ models have more than 32 entries. The loop count needs to be
+ increased for those. More detailed information is needed about RSB
+ sizes.
+
+* On context switch, the user->user mitigation requires ensuring the
+ RSB gets filled or cleared whenever IBPB gets written [#cond-ibpb]_
+ during a context switch:
+
+ * AMD:
+ On Zen 4+, IBPB (or SBPB [#amd-sbpb]_ if used) clears the RSB.
+ This is indicated by IBPB_RET in CPUID [#amd-ibpb-rsb]_.
+
+ On Zen < 4, the RSB filling sequence [#amd-rsb-filling]_ must be
+ always be done in addition to IBPB [#amd-ibpb-no-rsb]_. This is
+ indicated by X86_BUG_IBPB_NO_RET.
+
+ * Intel:
+ IBPB always clears the RSB:
+
+ "Software that executed before the IBPB command cannot control
+ the predicted targets of indirect branches executed after the
+ command on the same logical processor. The term indirect branch
+ in this context includes near return instructions, so these
+ predicted targets may come from the RSB." [#intel-ibpb-rsb]_
+
+* On context switch, user->kernel attacks are prevented by SMEP. User
+ space can only insert user space addresses into the RSB. Even
+ non-canonical addresses can't be inserted due to the page gap at the
+ end of the user canonical address space reserved by TASK_SIZE_MAX.
+ A SMEP #PF at instruction fetch prevents the kernel from speculatively
+ executing user space.
+
+ * AMD:
+ "Finally, branches that are predicted as 'ret' instructions get
+ their predicted targets from the Return Address Predictor (RAP).
+ AMD recommends software use a RAP stuffing sequence (mitigation
+ V2-3 in [2]) and/or Supervisor Mode Execution Protection (SMEP)
+ to ensure that the addresses in the RAP are safe for
+ speculation. Collectively, we refer to these mitigations as "RAP
+ Protection"." [#amd-smep-rsb]_
+
+ * Intel:
+ "On processors with enhanced IBRS, an RSB overwrite sequence may
+ not suffice to prevent the predicted target of a near return
+ from using an RSB entry created in a less privileged predictor
+ mode. Software can prevent this by enabling SMEP (for
+ transitions from user mode to supervisor mode) and by having
+ IA32_SPEC_CTRL.IBRS set during VM exits." [#intel-smep-rsb]_
+
+* On VMEXIT, guest->host attacks are mitigated by eIBRS (and PBRSB
+ mitigation if needed):
+
+ * AMD:
+ "When Automatic IBRS is enabled, the internal return address
+ stack used for return address predictions is cleared on VMEXIT."
+ [#amd-eibrs-vmexit]_
+
+ * Intel:
+ "On processors with enhanced IBRS, an RSB overwrite sequence may
+ not suffice to prevent the predicted target of a near return
+ from using an RSB entry created in a less privileged predictor
+ mode. Software can prevent this by enabling SMEP (for
+ transitions from user mode to supervisor mode) and by having
+ IA32_SPEC_CTRL.IBRS set during VM exits. Processors with
+ enhanced IBRS still support the usage model where IBRS is set
+ only in the OS/VMM for OSes that enable SMEP. To do this, such
+ processors will ensure that guest behavior cannot control the
+ RSB after a VM exit once IBRS is set, even if IBRS was not set
+ at the time of the VM exit." [#intel-eibrs-vmexit]_
+
+ Note that some Intel CPUs are susceptible to Post-barrier Return
+ Stack Buffer Predictions (PBRSB) [#intel-pbrsb]_, where the last
+ CALL from the guest can be used to predict the first unbalanced RET.
+ In this case the PBRSB mitigation is needed in addition to eIBRS.
+
+AMD RETBleed / SRSO / Branch Type Confusion
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+On AMD, poisoned RSB entries can also be created by the AMD RETBleed
+variant [#retbleed-paper]_ [#amd-btc]_ or by Speculative Return Stack
+Overflow [#amd-srso]_ (Inception [#inception-paper]_). The kernel
+protects itself by replacing every RET in the kernel with a branch to a
+single safe RET.
+
+----
+
+RSB underflow (Intel only)
+==========================
+
+RSB Alternate (RSBA) ("Intel Retbleed")
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some Intel Skylake-generation CPUs are susceptible to the Intel variant
+of RETBleed [#retbleed-paper]_ (Return Stack Buffer Underflow
+[#intel-rsbu]_). If a RET is executed when the RSB buffer is empty due
+to mismatched CALLs/RETs or returning from a deep call stack, the branch
+predictor can fall back to using the Branch Target Buffer (BTB). If a
+user forces a BTB collision then the RET can speculatively branch to a
+user-controlled address.
+
+* Note that RSB filling doesn't fully mitigate this issue. If there
+ are enough unbalanced RETs, the RSB may still underflow and fall back
+ to using a poisoned BTB entry.
+
+* On context switch, user->user underflow attacks are mitigated by the
+ conditional IBPB [#cond-ibpb]_ on context switch which effectively
+ clears the BTB:
+
+ * "The indirect branch predictor barrier (IBPB) is an indirect branch
+ control mechanism that establishes a barrier, preventing software
+ that executed before the barrier from controlling the predicted
+ targets of indirect branches executed after the barrier on the same
+ logical processor." [#intel-ibpb-btb]_
+
+* On context switch and VMEXIT, user->kernel and guest->host RSB
+ underflows are mitigated by IBRS or eIBRS:
+
+ * "Enabling IBRS (including enhanced IBRS) will mitigate the "RSBU"
+ attack demonstrated by the researchers. As previously documented,
+ Intel recommends the use of enhanced IBRS, where supported. This
+ includes any processor that enumerates RRSBA but not RRSBA_DIS_S."
+ [#intel-rsbu]_
+
+ However, note that eIBRS and IBRS do not mitigate intra-mode attacks.
+ Like RRSBA below, this is mitigated by clearing the BHB on kernel
+ entry.
+
+ As an alternative to classic IBRS, call depth tracking (combined with
+ retpolines) can be used to track kernel returns and fill the RSB when
+ it gets close to being empty.
+
+Restricted RSB Alternate (RRSBA)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some newer Intel CPUs have Restricted RSB Alternate (RRSBA) behavior,
+which, similar to RSBA described above, also falls back to using the BTB
+on RSB underflow. The only difference is that the predicted targets are
+restricted to the current domain when eIBRS is enabled:
+
+* "Restricted RSB Alternate (RRSBA) behavior allows alternate branch
+ predictors to be used by near RET instructions when the RSB is
+ empty. When eIBRS is enabled, the predicted targets of these
+ alternate predictors are restricted to those belonging to the
+ indirect branch predictor entries of the current prediction domain.
+ [#intel-eibrs-rrsba]_
+
+When a CPU with RRSBA is vulnerable to Branch History Injection
+[#bhi-paper]_ [#intel-bhi]_, an RSB underflow could be used for an
+intra-mode BTI attack. This is mitigated by clearing the BHB on
+kernel entry.
+
+However if the kernel uses retpolines instead of eIBRS, it needs to
+disable RRSBA:
+
+* "Where software is using retpoline as a mitigation for BHI or
+ intra-mode BTI, and the processor both enumerates RRSBA and
+ enumerates RRSBA_DIS controls, it should disable this behavior."
+ [#intel-retpoline-rrsba]_
+
+----
+
+References
+==========
+
+.. [#spectre-rsb] `Spectre Returns! Speculation Attacks using the Return Stack Buffer <https://arxiv.org/pdf/1807.07940.pdf>`_
+
+.. [#intel-rsb-filling] "Empty RSB Mitigation on Skylake-generation" in `Retpoline: A Branch Target Injection Mitigation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/retpoline-branch-target-injection-mitigation.html#inpage-nav-5-1>`_
+
+.. [#amd-rsb-filling] "Mitigation V2-3" in `Software Techniques for Managing Speculation <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/software-techniques-for-managing-speculation.pdf>`_
+
+.. [#cond-ibpb] Whether IBPB is written depends on whether the prev and/or next task is protected from Spectre attacks. It typically requires opting in per task or system-wide. For more details see the documentation for the ``spectre_v2_user`` cmdline option in Documentation/admin-guide/kernel-parameters.txt.
+
+.. [#amd-sbpb] IBPB without flushing of branch type predictions. Only exists for AMD.
+
+.. [#amd-ibpb-rsb] "Function 8000_0008h -- Processor Capacity Parameters and Extended Feature Identification" in `AMD64 Architecture Programmer's Manual Volume 3: General-Purpose and System Instructions <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24594.pdf>`_. SBPB behaves the same way according to `this email <https://lore.kernel.org/5175b163a3736ca5fd01cedf406735636c99a>`_.
+
+.. [#amd-ibpb-no-rsb] `Spectre Attacks: Exploiting Speculative Execution <https://comsec.ethz.ch/wp-content/files/ibpb_sp25.pdf>`_
+
+.. [#intel-ibpb-rsb] "Introduction" in `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
+
+.. [#amd-smep-rsb] "Existing Mitigations" in `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
+
+.. [#intel-smep-rsb] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
+
+.. [#amd-eibrs-vmexit] "Extended Feature Enable Register (EFER)" in `AMD64 Architecture Programmer's Manual Volume 2: System Programming <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24593.pdf>`_
+
+.. [#intel-eibrs-vmexit] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
+
+.. [#intel-pbrsb] `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
+
+.. [#retbleed-paper] `RETBleed: Arbitrary Speculative Code Execution with Return Instruction <https://comsec.ethz.ch/wp-content/files/retbleed_sec22.pdf>`_
+
+.. [#amd-btc] `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
+
+.. [#amd-srso] `Technical Update Regarding Speculative Return Stack Overflow <https://www.amd.com/content/dam/amd/en/documents/corporate/cr/speculative-return-stack-overflow-whitepaper.pdf>`_
+
+.. [#inception-paper] `Inception: Exposing New Attack Surfaces with Training in Transient Execution <https://comsec.ethz.ch/wp-content/files/inception_sec23.pdf>`_
+
+.. [#intel-rsbu] `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
+
+.. [#intel-ibpb-btb] `Indirect Branch Predictor Barrier' <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-predictor-barrier.html>`_
+
+.. [#intel-eibrs-rrsba] "Guidance for RSBU" in `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
+
+.. [#bhi-paper] `Branch History Injection: On the Effectiveness of Hardware Mitigations Against Cross-Privilege Spectre-v2 Attacks <http://download.vusec.net/papers/bhi-spectre-bhb_sec22.pdf>`_
+
+.. [#intel-bhi] `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
+
+.. [#intel-retpoline-rrsba] "Retpoline" in `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 76e538c77e31..d9fd26b95b34 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1407,18 +1407,15 @@
earlyprintk=serial[,0x...[,baudrate]]
earlyprintk=ttySn[,baudrate]
earlyprintk=dbgp[debugController#]
+ earlyprintk=mmio32,membase[,{nocfg|baudrate}]
earlyprintk=pciserial[,force],bus:device.function[,{nocfg|baudrate}]
earlyprintk=xdbc[xhciController#]
earlyprintk=bios
- earlyprintk=mmio,membase[,{nocfg|baudrate}]
earlyprintk is useful when the kernel crashes before
the normal console is initialized. It is not enabled by
default because it has some cosmetic problems.
- Only 32-bit memory addresses are supported for "mmio"
- and "pciserial" devices.
-
Use "nocfg" to skip UART configuration, assume
BIOS/firmware has configured UART correctly.
diff --git a/Documentation/admin-guide/xfs.rst b/Documentation/admin-guide/xfs.rst
index b67772cf36d6..3e76276bd488 100644
--- a/Documentation/admin-guide/xfs.rst
+++ b/Documentation/admin-guide/xfs.rst
@@ -124,6 +124,14 @@ When mounting an XFS filesystem, the following options are accepted.
controls the size of each buffer and so is also relevant to
this case.
+ lifetime (default) or nolifetime
+ Enable data placement based on write life time hints provided
+ by the user. This turns on co-allocation of data of similar
+ life times when statistically favorable to reduce garbage
+ collection cost.
+
+ These options are only available for zoned rt file systems.
+
logbsize=value
Set the size of each in-memory log buffer. The size may be
specified in bytes, or in kilobytes with a "k" suffix.
@@ -143,6 +151,14 @@ When mounting an XFS filesystem, the following options are accepted.
optional, and the log section can be separate from the data
section or contained within it.
+ max_open_zones=value
+ Specify the max number of zones to keep open for writing on a
+ zoned rt device. Many open zones aids file data separation
+ but may impact performance on HDDs.
+
+ If ``max_open_zones`` is not specified, the value is determined
+ by the capabilities and the size of the zoned rt device.
+
noalign
Data allocations will not be aligned at stripe unit
boundaries. This is only relevant to filesystems created
@@ -542,3 +558,37 @@ The interesting knobs for XFS workqueues are as follows:
nice Relative priority of scheduling the threads. These are the
same nice levels that can be applied to userspace processes.
============ ===========
+
+Zoned Filesystems
+=================
+
+For zoned file systems, the following attribute is exposed in:
+
+ /sys/fs/xfs/<dev>/zoned/
+
+ max_open_zones (Min: 1 Default: Varies Max: UINTMAX)
+ This read-only attribute exposes the maximum number of open zones
+ available for data placement. The value is determined at mount time and
+ is limited by the capabilities of the backing zoned device, file system
+ size and the max_open_zones mount option.
+
+Zoned Filesystems
+=================
+
+For zoned file systems, the following attributes are exposed in:
+
+ /sys/fs/xfs/<dev>/zoned/
+
+ max_open_zones (Min: 1 Default: Varies Max: UINTMAX)
+ This read-only attribute exposes the maximum number of open zones
+ available for data placement. The value is determined at mount time and
+ is limited by the capabilities of the backing zoned device, file system
+ size and the max_open_zones mount option.
+
+ zonegc_low_space (Min: 0 Default: 0 Max: 100)
+ Define a percentage for how much of the unused space that GC should keep
+ available for writing. A high value will reclaim more of the space
+ occupied by unused blocks, creating a larger buffer against write
+ bursts at the cost of increased write amplification. Regardless
+ of this value, garbage collection will always aim to free a minimum
+ amount of blocks to keep max_open_zones open for data placement purposes.
diff --git a/Documentation/arch/x86/cpuinfo.rst b/Documentation/arch/x86/cpuinfo.rst
index 6ef426a52cdc..f80e2a558d2a 100644
--- a/Documentation/arch/x86/cpuinfo.rst
+++ b/Documentation/arch/x86/cpuinfo.rst
@@ -79,8 +79,9 @@ feature flags.
How are feature flags created?
==============================
-a: Feature flags can be derived from the contents of CPUID leaves.
-------------------------------------------------------------------
+Feature flags can be derived from the contents of CPUID leaves
+--------------------------------------------------------------
+
These feature definitions are organized mirroring the layout of CPUID
leaves and grouped in words with offsets as mapped in enum cpuid_leafs
in cpufeatures.h (see arch/x86/include/asm/cpufeatures.h for details).
@@ -89,8 +90,9 @@ cpufeatures.h, and if it is detected at run time, the flags will be
displayed accordingly in /proc/cpuinfo. For example, the flag "avx2"
comes from X86_FEATURE_AVX2 in cpufeatures.h.
-b: Flags can be from scattered CPUID-based features.
-----------------------------------------------------
+Flags can be from scattered CPUID-based features
+------------------------------------------------
+
Hardware features enumerated in sparsely populated CPUID leaves get
software-defined values. Still, CPUID needs to be queried to determine
if a given feature is present. This is done in init_scattered_cpuid_features().
@@ -104,8 +106,9 @@ has only one feature and would waste 31 bits of space in the x86_capability[]
array. Since there is a struct cpuinfo_x86 for each possible CPU, the wasted
memory is not trivial.
-c: Flags can be created synthetically under certain conditions for hardware features.
--------------------------------------------------------------------------------------
+Flags can be created synthetically under certain conditions for hardware features
+---------------------------------------------------------------------------------
+
Examples of conditions include whether certain features are present in
MSR_IA32_CORE_CAPS or specific CPU models are identified. If the needed
conditions are met, the features are enabled by the set_cpu_cap or
@@ -114,8 +117,8 @@ the feature X86_FEATURE_SPLIT_LOCK_DETECT will be enabled and
"split_lock_detect" will be displayed. The flag "ring3mwait" will be
displayed only when running on INTEL_XEON_PHI_[KNL|KNM] processors.
-d: Flags can represent purely software features.
-------------------------------------------------
+Flags can represent purely software features
+--------------------------------------------
These flags do not represent hardware features. Instead, they represent a
software feature implemented in the kernel. For example, Kernel Page Table
Isolation is purely software feature and its feature flag X86_FEATURE_PTI is
@@ -130,14 +133,18 @@ x86_cap/bug_flags[] arrays in kernel/cpu/capflags.c. The names in the
resulting x86_cap/bug_flags[] are used to populate /proc/cpuinfo. The naming
of flags in the x86_cap/bug_flags[] are as follows:
-a: The name of the flag is from the string in X86_FEATURE_<name> by default.
-----------------------------------------------------------------------------
-By default, the flag <name> in /proc/cpuinfo is extracted from the respective
-X86_FEATURE_<name> in cpufeatures.h. For example, the flag "avx2" is from
-X86_FEATURE_AVX2.
+Flags do not appear by default in /proc/cpuinfo
+-----------------------------------------------
+
+Feature flags are omitted by default from /proc/cpuinfo as it does not make
+sense for the feature to be exposed to userspace in most cases. For example,
+X86_FEATURE_ALWAYS is defined in cpufeatures.h but that flag is an internal
+kernel feature used in the alternative runtime patching functionality. So the
+flag does not appear in /proc/cpuinfo.
+
+Specify a flag name if absolutely needed
+----------------------------------------
-b: The naming can be overridden.
---------------------------------
If the comment on the line for the #define X86_FEATURE_* starts with a
double-quote character (""), the string inside the double-quote characters
will be the name of the flags. For example, the flag "sse4_1" comes from
@@ -148,36 +155,31 @@ needed. For instance, /proc/cpuinfo is a userspace interface and must remain
constant. If, for some reason, the naming of X86_FEATURE_<name> changes, one
shall override the new naming with the name already used in /proc/cpuinfo.
-c: The naming override can be "", which means it will not appear in /proc/cpuinfo.
-----------------------------------------------------------------------------------
-The feature shall be omitted from /proc/cpuinfo if it does not make sense for
-the feature to be exposed to userspace. For example, X86_FEATURE_ALWAYS is
-defined in cpufeatures.h but that flag is an internal kernel feature used
-in the alternative runtime patching functionality. So, its name is overridden
-with "". Its flag will not appear in /proc/cpuinfo.
-
Flags are missing when one or more of these happen
==================================================
-a: The hardware does not enumerate support for it.
---------------------------------------------------
+The hardware does not enumerate support for it
+----------------------------------------------
+
For example, when a new kernel is running on old hardware or the feature is
not enabled by boot firmware. Even if the hardware is new, there might be a
problem enabling the feature at run time, the flag will not be displayed.
-b: The kernel does not know about the flag.
--------------------------------------------
+The kernel does not know about the flag
+---------------------------------------
+
For example, when an old kernel is running on new hardware.
-c: The kernel disabled support for it at compile-time.
-------------------------------------------------------
+The kernel disabled support for it at compile-time
+--------------------------------------------------
+
For example, if 5-level-paging is not enabled when building (i.e.,
CONFIG_X86_5LEVEL is not selected) the flag "la57" will not show up [#f1]_.
Even though the feature will still be detected via CPUID, the kernel disables
it by clearing via setup_clear_cpu_cap(X86_FEATURE_LA57).
-d: The feature is disabled at boot-time.
-----------------------------------------
+The feature is disabled at boot-time
+------------------------------------
A feature can be disabled either using a command-line parameter or because
it failed to be enabled. The command-line parameter clearcpuid= can be used
to disable features using the feature number as defined in
@@ -190,8 +192,9 @@ disable specific features. The list of parameters includes, but is not limited
to, nofsgsbase, nosgx, noxsave, etc. 5-level paging can also be disabled using
"no5lvl".
-e: The feature was known to be non-functional.
-----------------------------------------------
+The feature was known to be non-functional
+------------------------------------------
+
The feature was known to be non-functional because a dependency was
missing at runtime. For example, AVX flags will not show up if XSAVE feature
is disabled since they depend on XSAVE feature. Another example would be broken
diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
index 6ad78429dc74..c92341888a28 100644
--- a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
+++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ceva AHCI SATA Controller
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: |
diff --git a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
index 350fb8f400f0..5952e6448ed4 100644
--- a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
@@ -111,11 +111,27 @@ properties:
unevaluatedProperties: false
port@1:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
DSI output port node to the panel or the next bridge
in the chain
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ description: array of physical DSI data lane indexes.
+ minItems: 1
+ items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
+
required:
- port@0
- port@1
diff --git a/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml b/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
index bb93baa88879..e13e9d6dd148 100644
--- a/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
+++ b/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
@@ -12,7 +12,6 @@ description:
PS_MODE). Every pin can be configured as input/output.
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
index 6076ddf56bb5..c49688be1058 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
@@ -19,6 +19,7 @@ properties:
- fsl,imx8mp-irqsteer
- fsl,imx8qm-irqsteer
- fsl,imx8qxp-irqsteer
+ - fsl,imx94-irqsteer
- const: fsl,imx-irqsteer
reg:
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
index a4dfa09344dd..f85ee5d20ccb 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
@@ -9,15 +9,6 @@ title: Renesas R-Car Timer Pulse Unit PWM Controller
maintainers:
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
-select:
- properties:
- compatible:
- contains:
- const: renesas,tpu
- required:
- - compatible
- - '#pwm-cells'
-
properties:
compatible:
items:
diff --git a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
index 1f1b42dde94d..1db85fc9966f 100644
--- a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Zynq UltraScale+ MPSoC and Versal reset
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: |
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
index 31295be91013..234089b5954d 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale Layerscape Reset Registers Module
maintainers:
- - Frank Li
+ - Frank Li <Frank.Li@nxp.com>
description:
Reset Module includes chip reset, service processor control and Reset Control
diff --git a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
index 891cca009528..6b80b060672e 100644
--- a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
@@ -18,9 +18,14 @@ description: |
properties:
compatible:
- enum:
- - nxp,imx95-sysctr-timer
- - nxp,sysctr-timer
+ oneOf:
+ - enum:
+ - nxp,imx95-sysctr-timer
+ - nxp,sysctr-timer
+ - items:
+ - enum:
+ - nxp,imx94-sysctr-timer
+ - const: nxp,imx95-sysctr-timer
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/timer/renesas,tpu.yaml b/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
deleted file mode 100644
index 7a473b302775..000000000000
--- a/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/timer/renesas,tpu.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Renesas H8/300 Timer Pulse Unit
-
-maintainers:
- - Yoshinori Sato <ysato@users.sourceforge.jp>
-
-description:
- The TPU is a 16bit timer/counter with configurable clock inputs and
- programmable compare match.
- This implementation supports only cascade mode.
-
-select:
- properties:
- compatible:
- contains:
- const: renesas,tpu
- '#pwm-cells': false
- required:
- - compatible
-
-properties:
- compatible:
- const: renesas,tpu
-
- reg:
- items:
- - description: First channel
- - description: Second channel
-
- clocks:
- maxItems: 1
-
- clock-names:
- const: fck
-
-required:
- - compatible
- - reg
- - clocks
- - clock-names
-
-additionalProperties: false
-
-examples:
- - |
- tpu: tpu@ffffe0 {
- compatible = "renesas,tpu";
- reg = <0xffffe0 16>, <0xfffff0 12>;
- clocks = <&pclk>;
- clock-names = "fck";
- };
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
index b5843f4d17d8..379dacacb526 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx SuperSpeed DWC3 USB SoC controller
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
diff --git a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
index e2a72deae776..c68c04da3399 100644
--- a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
+++ b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
@@ -17,7 +17,6 @@ description:
maintainers:
- Michal Simek <michal.simek@amd.com>
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
diff --git a/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml b/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
index a7f75fe36665..f295aa9d9ee7 100644
--- a/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
+++ b/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx udc controller
maintainers:
- - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
diff --git a/Documentation/filesystems/ext4/super.rst b/Documentation/filesystems/ext4/super.rst
index a1eb4a11a1d0..1b240661bfa3 100644
--- a/Documentation/filesystems/ext4/super.rst
+++ b/Documentation/filesystems/ext4/super.rst
@@ -328,9 +328,13 @@ The ext4 superblock is laid out as follows in
- s_checksum_type
- Metadata checksum algorithm type. The only valid value is 1 (crc32c).
* - 0x176
- - __le16
- - s_reserved_pad
- -
+ - \_\_u8
+ - s\_encryption\_level
+ - Versioning level for encryption.
+ * - 0x177
+ - \_\_u8
+ - s\_reserved\_pad
+ - Padding to next 32bits.
* - 0x178
- __le64
- s_kbytes_written
@@ -466,9 +470,13 @@ The ext4 superblock is laid out as follows in
- s_last_error_time_hi
- Upper 8 bits of the s_last_error_time field.
* - 0x27A
- - __u8
- - s_pad[2]
- - Zero padding.
+ - \_\_u8
+ - s\_first\_error\_errcode
+ -
+ * - 0x27B
+ - \_\_u8
+ - s\_last\_error\_errcode
+ -
* - 0x27C
- __le16
- s_encoding
diff --git a/Documentation/netlink/specs/ovs_vport.yaml b/Documentation/netlink/specs/ovs_vport.yaml
index 86ba9ac2a521..b538bb99ee9b 100644
--- a/Documentation/netlink/specs/ovs_vport.yaml
+++ b/Documentation/netlink/specs/ovs_vport.yaml
@@ -123,12 +123,12 @@ attribute-sets:
operations:
name-prefix: ovs-vport-cmd-
+ fixed-header: ovs-header
list:
-
name: new
doc: Create a new OVS vport
attribute-set: vport
- fixed-header: ovs-header
do:
request:
attributes:
@@ -141,7 +141,6 @@ operations:
name: del
doc: Delete existing OVS vport from a data path
attribute-set: vport
- fixed-header: ovs-header
do:
request:
attributes:
@@ -152,7 +151,6 @@ operations:
name: get
doc: Get / dump OVS vport configuration and state
attribute-set: vport
- fixed-header: ovs-header
do: &vport-get-op
request:
attributes:
diff --git a/Documentation/netlink/specs/rt-link.yaml b/Documentation/netlink/specs/rt-link.yaml
index a50d9d7d882e..726dfa083d14 100644
--- a/Documentation/netlink/specs/rt-link.yaml
+++ b/Documentation/netlink/specs/rt-link.yaml
@@ -1119,11 +1119,10 @@ attribute-sets:
-
name: prop-list
type: nest
- nested-attributes: link-attrs
+ nested-attributes: prop-list-link-attrs
-
name: alt-ifname
type: string
- multi-attr: true
-
name: perm-address
type: binary
@@ -1170,6 +1169,13 @@ attribute-sets:
name: netns-immutable
type: u8
-
+ name: prop-list-link-attrs
+ subset-of: link-attrs
+ attributes:
+ -
+ name: alt-ifname
+ multi-attr: true
+ -
name: af-spec-attrs
attributes:
-
@@ -1591,7 +1597,7 @@ attribute-sets:
name: nf-call-iptables
type: u8
-
- name: nf-call-ip6-tables
+ name: nf-call-ip6tables
type: u8
-
name: nf-call-arptables
@@ -2083,7 +2089,7 @@ attribute-sets:
name: id
type: u16
-
- name: flag
+ name: flags
type: binary
struct: ifla-vlan-flags
-
@@ -2171,7 +2177,7 @@ attribute-sets:
type: binary
struct: ifla-cacheinfo
-
- name: icmp6-stats
+ name: icmp6stats
type: binary
struct: ifla-icmp6-stats
-
@@ -2185,9 +2191,10 @@ attribute-sets:
type: u32
-
name: mctp-attrs
+ name-prefix: ifla-mctp-
attributes:
-
- name: mctp-net
+ name: net
type: u32
-
name: phys-binding
@@ -2469,7 +2476,6 @@ operations:
- min-mtu
- max-mtu
- prop-list
- - alt-ifname
- perm-address
- proto-down-reason
- parent-dev-name
diff --git a/Documentation/netlink/specs/rt-neigh.yaml b/Documentation/netlink/specs/rt-neigh.yaml
index e670b6dc07be..a843caa72259 100644
--- a/Documentation/netlink/specs/rt-neigh.yaml
+++ b/Documentation/netlink/specs/rt-neigh.yaml
@@ -13,25 +13,25 @@ definitions:
type: struct
members:
-
- name: family
+ name: ndm-family
type: u8
-
- name: pad
+ name: ndm-pad
type: pad
len: 3
-
- name: ifindex
+ name: ndm-ifindex
type: s32
-
- name: state
+ name: ndm-state
type: u16
enum: nud-state
-
- name: flags
+ name: ndm-flags
type: u8
enum: ntf-flags
-
- name: type
+ name: ndm-type
type: u8
enum: rtm-type
-
@@ -189,7 +189,7 @@ attribute-sets:
type: binary
display-hint: ipv4
-
- name: lladr
+ name: lladdr
type: binary
display-hint: mac
-
diff --git a/Documentation/userspace-api/mseal.rst b/Documentation/userspace-api/mseal.rst
index 1dabfc29be0d..7195a7f91107 100644
--- a/Documentation/userspace-api/mseal.rst
+++ b/Documentation/userspace-api/mseal.rst
@@ -27,7 +27,7 @@ SYSCALL
=======
mseal syscall signature
-----------------------
- ``int mseal(void \* addr, size_t len, unsigned long flags)``
+ ``int mseal(void *addr, size_t len, unsigned long flags)``
**addr**/**len**: virtual memory address range.
The address range set by **addr**/**len** must meet:
diff --git a/Documentation/wmi/devices/msi-wmi-platform.rst b/Documentation/wmi/devices/msi-wmi-platform.rst
index 31a136942892..73197b31926a 100644
--- a/Documentation/wmi/devices/msi-wmi-platform.rst
+++ b/Documentation/wmi/devices/msi-wmi-platform.rst
@@ -138,6 +138,10 @@ input data, the meaning of which depends on the subfeature being accessed.
The output buffer contains a single byte which signals success or failure (``0x00`` on failure)
and 31 bytes of output data, the meaning if which depends on the subfeature being accessed.
+.. note::
+ The ACPI control method responsible for handling the WMI method calls is not thread-safe.
+ This is a firmware bug that needs to be handled inside the driver itself.
+
WMI method Get_EC()
-------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 350009769173..657a67f9031e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6335,6 +6335,7 @@ F: Documentation/process/cve.rst
CW1200 WLAN driver
S: Orphan
+L: linux-wireless@vger.kernel.org
F: drivers/net/wireless/st/
F: include/linux/platform_data/net-cw1200.h
@@ -10947,6 +10948,7 @@ F: include/linux/platform_data/huawei-gaokun-ec.h
HUGETLB SUBSYSTEM
M: Muchun Song <muchun.song@linux.dev>
+R: Oscar Salvador <osalvador@suse.de>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
@@ -12803,6 +12805,7 @@ F: lib/Kconfig.kcsan
F: scripts/Makefile.kcsan
KDUMP
+M: Andrew Morton <akpm@linux-foundation.org>
M: Baoquan He <bhe@redhat.com>
R: Vivek Goyal <vgoyal@redhat.com>
R: Dave Young <dyoung@redhat.com>
@@ -13104,6 +13107,8 @@ F: fs/kernfs/
F: include/linux/kernfs.h
KEXEC
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Baoquan He <bhe@redhat.com>
L: kexec@lists.infradead.org
W: http://kernel.org/pub/linux/utils/kernel/kexec/
F: include/linux/kexec.h
@@ -14276,6 +14281,7 @@ S: Odd fixes
F: drivers/net/ethernet/marvell/sk*
MARVELL LIBERTAS WIRELESS DRIVER
+L: linux-wireless@vger.kernel.org
L: libertas-dev@lists.infradead.org
S: Orphan
F: drivers/net/wireless/marvell/libertas/
@@ -17361,7 +17367,7 @@ T: git git://git.infradead.org/nvme.git
F: drivers/nvme/target/
NVMEM FRAMEWORK
-M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Srinivas Kandagatla <srini@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/srini/nvmem.git
F: Documentation/ABI/stable/sysfs-bus-nvmem
@@ -19577,7 +19583,7 @@ S: Supported
F: drivers/crypto/intel/qat/
QCOM AUDIO (ASoC) DRIVERS
-M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Srinivas Kandagatla <srini@kernel.org>
L: linux-sound@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Supported
@@ -19750,6 +19756,7 @@ F: drivers/media/tuners/qt1010*
QUALCOMM ATH12K WIRELESS DRIVER
M: Jeff Johnson <jjohnson@kernel.org>
+L: linux-wireless@vger.kernel.org
L: ath12k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
@@ -19759,6 +19766,7 @@ N: ath12k
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Jeff Johnson <jjohnson@kernel.org>
+L: linux-wireless@vger.kernel.org
L: ath10k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
@@ -19768,6 +19776,7 @@ N: ath10k
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Jeff Johnson <jjohnson@kernel.org>
+L: linux-wireless@vger.kernel.org
L: ath11k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k
@@ -19877,7 +19886,7 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
QUALCOMM FASTRPC DRIVER
-M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Srinivas Kandagatla <srini@kernel.org>
M: Amol Maheshwari <amahesh@qti.qualcomm.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
@@ -21925,7 +21934,7 @@ S: Maintained
F: drivers/media/rc/serial_ir.c
SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
-M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Srinivas Kandagatla <srini@kernel.org>
L: linux-sound@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/slimbus/
@@ -22141,6 +22150,7 @@ F: drivers/platform/x86/touchscreen_dmi.c
SILICON LABS WIRELESS DRIVERS (for WFxxx series)
M: Jérôme Pouiller <jerome.pouiller@silabs.com>
+L: linux-wireless@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
F: drivers/net/wireless/silabs/
diff --git a/Makefile b/Makefile
index f42418556507..c91afd55099e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arm64/include/asm/rqspinlock.h b/arch/arm64/include/asm/rqspinlock.h
index 5b80785324b6..9ea0a74e5892 100644
--- a/arch/arm64/include/asm/rqspinlock.h
+++ b/arch/arm64/include/asm/rqspinlock.h
@@ -86,7 +86,7 @@
#endif
-#define res_smp_cond_load_acquire_timewait(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
+#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
#include <asm-generic/rqspinlock.h>
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index db8161ebb43c..99fb986fca6e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -332,6 +332,10 @@ config HAVE_MARCH_Z16_FEATURES
def_bool n
select HAVE_MARCH_Z15_FEATURES
+config HAVE_MARCH_Z17_FEATURES
+ def_bool n
+ select HAVE_MARCH_Z16_FEATURES
+
choice
prompt "Processor type"
default MARCH_Z196
@@ -397,6 +401,14 @@ config MARCH_Z16
Select this to enable optimizations for IBM z16 (3931 and
3932 series).
+config MARCH_Z17
+ bool "IBM z17"
+ select HAVE_MARCH_Z17_FEATURES
+ depends on $(cc-option,-march=z17)
+ help
+ Select this to enable optimizations for IBM z17 (9175 and
+ 9176 series).
+
endchoice
config MARCH_Z10_TUNE
@@ -420,6 +432,9 @@ config MARCH_Z15_TUNE
config MARCH_Z16_TUNE
def_bool TUNE_Z16 || MARCH_Z16 && TUNE_DEFAULT
+config MARCH_Z17_TUNE
+ def_bool TUNE_Z17 || MARCH_Z17 && TUNE_DEFAULT
+
choice
prompt "Tune code generation"
default TUNE_DEFAULT
@@ -464,6 +479,10 @@ config TUNE_Z16
bool "IBM z16"
depends on $(cc-option,-mtune=z16)
+config TUNE_Z17
+ bool "IBM z17"
+ depends on $(cc-option,-mtune=z17)
+
endchoice
config 64BIT
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index b06dc53bfed5..7679bc16b692 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -48,6 +48,7 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13
mflags-$(CONFIG_MARCH_Z14) := -march=z14
mflags-$(CONFIG_MARCH_Z15) := -march=z15
mflags-$(CONFIG_MARCH_Z16) := -march=z16
+mflags-$(CONFIG_MARCH_Z17) := -march=z17
export CC_FLAGS_MARCH := $(mflags-y)
@@ -61,6 +62,7 @@ cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
cflags-$(CONFIG_MARCH_Z14_TUNE) += -mtune=z14
cflags-$(CONFIG_MARCH_Z15_TUNE) += -mtune=z15
cflags-$(CONFIG_MARCH_Z16_TUNE) += -mtune=z16
+cflags-$(CONFIG_MARCH_Z17_TUNE) += -mtune=z17
cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h
index fd9eef3be44c..11a71bd14954 100644
--- a/arch/s390/include/asm/march.h
+++ b/arch/s390/include/asm/march.h
@@ -33,6 +33,10 @@
#define MARCH_HAS_Z16_FEATURES 1
#endif
+#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES
+#define MARCH_HAS_Z17_FEATURES 1
+#endif
+
#endif /* __DECOMPRESSOR */
#endif /* __ASM_S390_MARCH_H */
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 33205dd410e4..e657fad7e376 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -442,7 +442,7 @@ static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
ctrset_size = 48;
else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
ctrset_size = 128;
- else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
+ else if (cpumf_ctr_info.csvn >= 6 && cpumf_ctr_info.csvn <= 8)
ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
@@ -858,18 +858,13 @@ static int cpumf_pmu_event_type(struct perf_event *event)
static int cpumf_pmu_event_init(struct perf_event *event)
{
unsigned int type = event->attr.type;
- int err;
+ int err = -ENOENT;
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
err = __hw_perf_event_init(event, type);
else if (event->pmu->type == type)
/* Registered as unknown PMU */
err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
- else
- return -ENOENT;
-
- if (unlikely(err) && event->destroy)
- event->destroy(event);
return err;
}
@@ -1819,8 +1814,6 @@ static int cfdiag_event_init(struct perf_event *event)
event->destroy = hw_perf_event_destroy;
err = cfdiag_event_init2(event);
- if (unlikely(err))
- event->destroy(event);
out:
return err;
}
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index e4a6bfc91080..690a293eb10d 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -237,7 +237,6 @@ CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
-
CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082);
@@ -365,6 +364,83 @@ CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d);
CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e);
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+CPUMF_EVENT_ATTR(cf_z17, L1D_RO_EXCL_WRITES, 0x0080);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_MISSES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z17, CRSTE_1MB_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z17, DTLB2_GPAGE_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_z17, ITLB2_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z17, ITLB2_MISSES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_PTE_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_CRSTE_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z17, TLB2_ENGINES_BUSY, 0x008b);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TEND, 0x008c);
+CPUMF_EVENT_ATTR(cf_z17, TX_NC_TEND, 0x008d);
+CPUMF_EVENT_ATTR(cf_z17, L1C_TLB2_MISSES, 0x008f);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ, 0x0091);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_IV, 0x0092);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_CHIP_HIT, 0x0093);
+CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_DRAWER_HIT, 0x0094);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP, 0x0095);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_IV, 0x0096);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_CHIP_HIT, 0x0097);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT, 0x0098);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE, 0x0099);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER, 0x009a);
+CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER, 0x009b);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_MEMORY, 0x009c);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE_MEMORY, 0x009d);
+CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER_MEMORY, 0x009e);
+CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER_MEMORY, 0x009f);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_IV, 0x00a0);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT, 0x00a1);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_IV, 0x00a3);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_IV, 0x00a6);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7);
+CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ, 0x00a9);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_IV, 0x00aa);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_CHIP_HIT, 0x00ab);
+CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_DRAWER_HIT, 0x00ac);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP, 0x00ad);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_IV, 0x00ae);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_CHIP_HIT, 0x00af);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT, 0x00b0);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_MODULE, 0x00b1);
+CPUMF_EVENT_ATTR(cf_z17, ICW_ON_DRAWER, 0x00b2);
+CPUMF_EVENT_ATTR(cf_z17, ICW_OFF_DRAWER, 0x00b3);
+CPUMF_EVENT_ATTR(cf_z17, CYCLES_SAMETHRD, 0x00ca);
+CPUMF_EVENT_ATTR(cf_z17, CYCLES_DIFFTHRD, 0x00cb);
+CPUMF_EVENT_ATTR(cf_z17, INST_SAMETHRD, 0x00cc);
+CPUMF_EVENT_ATTR(cf_z17, INST_DIFFTHRD, 0x00cd);
+CPUMF_EVENT_ATTR(cf_z17, WRONG_BRANCH_PREDICTION, 0x00ce);
+CPUMF_EVENT_ATTR(cf_z17, VX_BCD_EXECUTION_SLOTS, 0x00e1);
+CPUMF_EVENT_ATTR(cf_z17, DECIMAL_INSTRUCTIONS, 0x00e2);
+CPUMF_EVENT_ATTR(cf_z17, LAST_HOST_TRANSLATIONS, 0x00e8);
+CPUMF_EVENT_ATTR(cf_z17, TX_NC_TABORT, 0x00f4);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_NO_SPECIAL, 0x00f5);
+CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_SPECIAL, 0x00f6);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_ACCESS, 0x00f8);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CYCLES, 0x00fd);
+CPUMF_EVENT_ATTR(cf_z17, SORTL, 0x0100);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CC, 0x0109);
+CPUMF_EVENT_ATTR(cf_z17, DFLT_CCFINISH, 0x010a);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INVOCATIONS, 0x010b);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPLETIONS, 0x010c);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_WAIT_LOCK, 0x010d);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_HOLD_LOCK, 0x010e);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_ONCHIP, 0x0110);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_OFFCHIP, 0x0111);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_DIFF, 0x0112);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_4K_PREFETCH, 0x0114);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPL_LOCK, 0x0115);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK, 0x0116);
+CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO, 0x0117);
+CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
+CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
@@ -414,7 +490,7 @@ static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
NULL,
};
-static struct attribute *cpumcf_svn_67_pmu_event_attr[] __initdata = {
+static struct attribute *cpumcf_svn_678_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
@@ -779,6 +855,87 @@ static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = {
NULL,
};
+static struct attribute *cpumcf_z17_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z17, L1D_RO_EXCL_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, CRSTE_1MB_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, DTLB2_GPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, ITLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, ITLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z17, TLB2_ENGINES_BUSY),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TEND),
+ CPUMF_EVENT_PTR(cf_z17, TX_NC_TEND),
+ CPUMF_EVENT_PTR(cf_z17, L1C_TLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_IV),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_REQ_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_IV),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_IV),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_IV),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_REQ_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_IV),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_MODULE),
+ CPUMF_EVENT_PTR(cf_z17, ICW_ON_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, ICW_OFF_DRAWER),
+ CPUMF_EVENT_PTR(cf_z17, CYCLES_SAMETHRD),
+ CPUMF_EVENT_PTR(cf_z17, CYCLES_DIFFTHRD),
+ CPUMF_EVENT_PTR(cf_z17, INST_SAMETHRD),
+ CPUMF_EVENT_PTR(cf_z17, INST_DIFFTHRD),
+ CPUMF_EVENT_PTR(cf_z17, WRONG_BRANCH_PREDICTION),
+ CPUMF_EVENT_PTR(cf_z17, VX_BCD_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z17, DECIMAL_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_z17, LAST_HOST_TRANSLATIONS),
+ CPUMF_EVENT_PTR(cf_z17, TX_NC_TABORT),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_NO_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_ACCESS),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CYCLES),
+ CPUMF_EVENT_PTR(cf_z17, SORTL),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CC),
+ CPUMF_EVENT_PTR(cf_z17, DFLT_CCFINISH),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INVOCATIONS),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_COMPLETIONS),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_WAIT_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_HOLD_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_ONCHIP),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_OFFCHIP),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_INST_DIFF),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_4K_PREFETCH),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_COMPL_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK),
+ CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO),
+ CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
+ CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
+ NULL,
+};
+
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
static struct attribute_group cpumcf_pmu_events_group = {
@@ -859,7 +1016,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
if (ci.csvn >= 1 && ci.csvn <= 5)
csvn = cpumcf_svn_12345_pmu_event_attr;
else if (ci.csvn >= 6)
- csvn = cpumcf_svn_67_pmu_event_attr;
+ csvn = cpumcf_svn_678_pmu_event_attr;
/* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
@@ -892,6 +1049,10 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
case 0x3932:
model = cpumcf_z16_pmu_event_attr;
break;
+ case 0x9175:
+ case 0x9176:
+ model = cpumcf_z17_pmu_event_attr;
+ break;
default:
model = none;
break;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 5f60248cb468..ad22799d8a7d 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -885,9 +885,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
event->attr.exclude_idle = 0;
err = __hw_perf_event_init(event);
- if (unlikely(err))
- if (event->destroy)
- event->destroy(event);
return err;
}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 54e281436a28..80b1f7a29f11 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -294,6 +294,10 @@ static int __init setup_elf_platform(void)
case 0x3932:
strcpy(elf_platform, "z16");
break;
+ case 0x9175:
+ case 0x9176:
+ strcpy(elf_platform, "z17");
+ break;
}
return 0;
}
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 855f818deb98..d5c68ade71ab 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -54,6 +54,9 @@ static struct facility_def facility_defs[] = {
#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
61, /* miscellaneous-instruction-extension 3 */
#endif
+#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES
+ 84, /* miscellaneous-instruction-extension 4 */
+#endif
-1 /* END */
}
},
diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
index d3caa31240ed..175958b02f2b 100644
--- a/arch/x86/entry/entry.S
+++ b/arch/x86/entry/entry.S
@@ -17,19 +17,20 @@
.pushsection .noinstr.text, "ax"
-SYM_FUNC_START(entry_ibpb)
+/* Clobbers AX, CX, DX */
+SYM_FUNC_START(write_ibpb)
ANNOTATE_NOENDBR
movl $MSR_IA32_PRED_CMD, %ecx
- movl $PRED_CMD_IBPB, %eax
+ movl _ASM_RIP(x86_pred_cmd), %eax
xorl %edx, %edx
wrmsr
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
-SYM_FUNC_END(entry_ibpb)
+SYM_FUNC_END(write_ibpb)
/* For KVM */
-EXPORT_SYMBOL_GPL(entry_ibpb);
+EXPORT_SYMBOL_GPL(write_ibpb);
.popsection
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 8a5cc8e70439..5c43f145454d 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -269,7 +269,7 @@
* typically has NO_MELTDOWN).
*
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
- * entry_ibpb() will clobber AX, CX, DX.
+ * write_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
* where we have a stack but before any RET instruction.
@@ -279,7 +279,7 @@
VALIDATE_UNRET_END
CALL_UNTRAIN_RET
ALTERNATIVE_2 "", \
- "call entry_ibpb", \ibpb_feature, \
+ "call write_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
.endm
@@ -368,7 +368,7 @@ extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
extern void entry_untrain_ret(void);
-extern void entry_ibpb(void);
+extern void write_ibpb(void);
#ifdef CONFIG_X86_64
extern void clear_bhb_loop(void);
@@ -514,11 +514,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
: "memory");
}
-extern u64 x86_pred_cmd;
-
static inline void indirect_branch_prediction_barrier(void)
{
- alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
+ asm_inline volatile(ALTERNATIVE("", "call write_ibpb", X86_FEATURE_IBPB)
+ : ASM_CALL_CONSTRAINT
+ :: "rax", "rcx", "rdx", "memory");
}
/* The Intel SPEC CTRL MSR base value cache */
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 55a5e656e4b9..4f84d421d1cf 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -16,23 +16,23 @@
#ifdef __ASSEMBLER__
#define ASM_CLAC \
- ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "clac", X86_FEATURE_SMAP
+ ALTERNATIVE "", "clac", X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "stac", X86_FEATURE_SMAP
+ ALTERNATIVE "", "stac", X86_FEATURE_SMAP
#else /* __ASSEMBLER__ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP);
+ alternative("", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP);
+ alternative("", "stac", X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
@@ -59,9 +59,9 @@ static __always_inline void smap_restore(unsigned long flags)
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
#define ASM_CLAC_UNSAFE \
ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index dae6a73be40e..9fa321a95eb3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -23,6 +23,8 @@
#include <linux/serial_core.h>
#include <linux/pgtable.h>
+#include <xen/xen.h>
+
#include <asm/e820/api.h>
#include <asm/irqdomain.h>
#include <asm/pci_x86.h>
@@ -1729,6 +1731,15 @@ int __init acpi_mps_check(void)
{
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
+
+ /*
+ * Xen disables ACPI in PV DomU guests but it still emulates APIC and
+ * supports SMP. Returning early here ensures that APIC is not disabled
+ * unnecessarily and the guest is not limited to a single vCPU.
+ */
+ if (xen_pv_domain() && !xen_initial_domain())
+ return 0;
+
if (acpi_disabled || acpi_noirq) {
pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 79569f72b8ee..a839ff506f45 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -805,6 +805,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static const struct x86_cpu_id erratum_1386_microcode[] = {
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e),
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052),
+ {}
};
static void fix_erratum_1386(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4386aa6c69e1..362602b705cc 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -59,7 +59,6 @@ DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
-EXPORT_SYMBOL_GPL(x86_pred_cmd);
static u64 __ro_after_init x86_arch_cap_msr;
@@ -1142,7 +1141,7 @@ do_cmd_auto:
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -1592,51 +1591,54 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
rrsba_disabled = true;
}
-static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
{
/*
- * Similar to context switches, there are two types of RSB attacks
- * after VM exit:
+ * WARNING! There are many subtleties to consider when changing *any*
+ * code related to RSB-related mitigations. Before doing so, carefully
+ * read the following document, and update if necessary:
*
- * 1) RSB underflow
+ * Documentation/admin-guide/hw-vuln/rsb.rst
*
- * 2) Poisoned RSB entry
+ * In an overly simplified nutshell:
*
- * When retpoline is enabled, both are mitigated by filling/clearing
- * the RSB.
+ * - User->user RSB attacks are conditionally mitigated during
+ * context switches by cond_mitigation -> write_ibpb().
*
- * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
- * prediction isolation protections, RSB still needs to be cleared
- * because of #2. Note that SMEP provides no protection here, unlike
- * user-space-poisoned RSB entries.
+ * - User->kernel and guest->host attacks are mitigated by eIBRS or
+ * RSB filling.
*
- * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
- * bug is present then a LITE version of RSB protection is required,
- * just a single call needs to retire before a RET is executed.
+ * Though, depending on config, note that other alternative
+ * mitigations may end up getting used instead, e.g., IBPB on
+ * entry/vmexit, call depth tracking, or return thunks.
*/
+
switch (mode) {
case SPECTRE_V2_NONE:
- return;
+ break;
- case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
+ case SPECTRE_V2_EIBRS_LFENCE:
+ case SPECTRE_V2_EIBRS_RETPOLINE:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
- setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
}
- return;
+ break;
- case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
+ pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
- pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
- return;
- }
+ break;
- pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
- dump_stack();
+ default:
+ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
+ dump_stack();
+ break;
+ }
}
/*
@@ -1830,48 +1832,7 @@ static void __init spectre_v2_select_mitigation(void)
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
- /*
- * If Spectre v2 protection has been enabled, fill the RSB during a
- * context switch. In general there are two types of RSB attacks
- * across context switches, for which the CALLs/RETs may be unbalanced.
- *
- * 1) RSB underflow
- *
- * Some Intel parts have "bottomless RSB". When the RSB is empty,
- * speculated return targets may come from the branch predictor,
- * which could have a user-poisoned BTB or BHB entry.
- *
- * AMD has it even worse: *all* returns are speculated from the BTB,
- * regardless of the state of the RSB.
- *
- * When IBRS or eIBRS is enabled, the "user -> kernel" attack
- * scenario is mitigated by the IBRS branch prediction isolation
- * properties, so the RSB buffer filling wouldn't be necessary to
- * protect against this type of attack.
- *
- * The "user -> user" attack scenario is mitigated by RSB filling.
- *
- * 2) Poisoned RSB entry
- *
- * If the 'next' in-kernel return stack is shorter than 'prev',
- * 'next' could be tricked into speculating with a user-poisoned RSB
- * entry.
- *
- * The "user -> kernel" attack scenario is mitigated by SMEP and
- * eIBRS.
- *
- * The "user -> user" scenario, also known as SpectreBHB, requires
- * RSB clearing.
- *
- * So to mitigate all cases, unconditionally fill RSB on context
- * switches.
- *
- * FIXME: Is this pointless for retbleed-affected AMD?
- */
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
-
- spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
+ spectre_v2_select_rsb_mitigation(mode);
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
@@ -2676,7 +2637,7 @@ static void __init srso_select_mitigation(void)
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -2701,7 +2662,7 @@ ibpb_on_vmexit:
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 93ec829015f1..cc4a54145c83 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -3553,6 +3553,22 @@ static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
free_rmid(rgrp->closid, rgrp->mon.rmid);
}
+/*
+ * We allow creating mon groups only with in a directory called "mon_groups"
+ * which is present in every ctrl_mon group. Check if this is a valid
+ * "mon_groups" directory.
+ *
+ * 1. The directory should be named "mon_groups".
+ * 2. The mon group itself should "not" be named "mon_groups".
+ * This makes sure "mon_groups" directory always has a ctrl_mon group
+ * as parent.
+ */
+static bool is_mon_groups(struct kernfs_node *kn, const char *name)
+{
+ return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
+ strcmp(name, "mon_groups"));
+}
+
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
const char *name, umode_t mode,
enum rdt_group_type rtype, struct rdtgroup **r)
@@ -3568,6 +3584,15 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
goto out_unlock;
}
+ /*
+ * Check that the parent directory for a monitor group is a "mon_groups"
+ * directory.
+ */
+ if (rtype == RDTMON_GROUP && !is_mon_groups(parent_kn, name)) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
if (rtype == RDTMON_GROUP &&
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
@@ -3751,22 +3776,6 @@ out_unlock:
return ret;
}
-/*
- * We allow creating mon groups only with in a directory called "mon_groups"
- * which is present in every ctrl_mon group. Check if this is a valid
- * "mon_groups" directory.
- *
- * 1. The directory should be named "mon_groups".
- * 2. The mon group itself should "not" be named "mon_groups".
- * This makes sure "mon_groups" directory always has a ctrl_mon group
- * as parent.
- */
-static bool is_mon_groups(struct kernfs_node *kn, const char *name)
-{
- return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
- strcmp(name, "mon_groups"));
-}
-
static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
umode_t mode)
{
@@ -3782,11 +3791,8 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
- /*
- * If RDT monitoring is supported and the parent directory is a valid
- * "mon_groups" directory, add a monitoring subdirectory.
- */
- if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name))
+ /* Else, attempt to add a monitoring subdirectory. */
+ if (resctrl_arch_mon_capable())
return rdtgroup_mkdir_mon(parent_kn, name, mode);
return -EPERM;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 57120f0749cc..9d8dd8deb2a7 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
void __init e820__register_nosave_regions(unsigned long limit_pfn)
{
int i;
- unsigned long pfn = 0;
+ u64 last_addr = 0;
for (i = 0; i < e820_table->nr_entries; i++) {
struct e820_entry *entry = &e820_table->entries[i];
- if (pfn < PFN_UP(entry->addr))
- register_nosave_region(pfn, PFN_UP(entry->addr));
-
- pfn = PFN_DOWN(entry->addr + entry->size);
-
if (entry->type != E820_TYPE_RAM)
- register_nosave_region(PFN_UP(entry->addr), pfn);
+ continue;
- if (pfn >= limit_pfn)
- break;
+ if (last_addr < entry->addr)
+ register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
+
+ last_addr = entry->addr + entry->size;
}
+
+ register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
}
#ifdef CONFIG_ACPI
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 611f27e3890c..3aad78bfcb26 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -389,10 +389,10 @@ static int __init setup_early_printk(char *buf)
keep = (strstr(buf, "keep") != NULL);
while (*buf != '\0') {
- if (!strncmp(buf, "mmio", 4)) {
- early_mmio_serial_init(buf + 4);
+ if (!strncmp(buf, "mmio32", 6)) {
+ buf += 6;
+ early_mmio_serial_init(buf);
early_console_register(&early_serial_console, keep);
- buf += 4;
}
if (!strncmp(buf, "serial", 6)) {
buf += 6;
@@ -407,9 +407,9 @@ static int __init setup_early_printk(char *buf)
}
#ifdef CONFIG_PCI
if (!strncmp(buf, "pciserial", 9)) {
- early_pci_serial_init(buf + 9);
+ buf += 9; /* Keep from match the above "pciserial" */
+ early_pci_serial_init(buf);
early_console_register(&early_serial_console, keep);
- buf += 9; /* Keep from match the above "serial" */
}
#endif
if (!strncmp(buf, "vga", 3) &&
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index e459d97ef397..eb83348f9305 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -667,9 +667,9 @@ static void cond_mitigation(struct task_struct *next)
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
/*
- * Avoid user/user BTB poisoning by flushing the branch predictor
- * when switching between processes. This stops one process from
- * doing Spectre-v2 attacks on another.
+ * Avoid user->user BTB/RSB poisoning by flushing them when switching
+ * between processes. This stops one process from doing Spectre-v2
+ * attacks on another.
*
* Both, the conditional and the always IBPB mode use the mm
* pointer to avoid the IBPB when switching between tasks of the
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 8c534c36adfa..66f066b8feda 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -26,7 +26,7 @@
/* code below belongs to the image kernel */
.align PAGE_SIZE
SYM_FUNC_START(restore_registers)
- ANNOTATE_NOENDBR
+ ENDBR
/* go back to the original page tables */
movq %r9, %cr3
@@ -120,7 +120,7 @@ SYM_FUNC_END(restore_image)
/* code below has been relocated to a safe page */
SYM_FUNC_START(core_restore_code)
- ANNOTATE_NOENDBR
+ ENDBR
/* switch to temporary page tables */
movq %rax, %cr3
/* flush TLB */
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 10c660fae8b3..7237d56a9d3f 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -54,14 +54,20 @@ struct mc_debug_data {
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
static struct mc_debug_data mc_debug_data_early __initdata;
-static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) =
- &mc_debug_data_early;
static struct mc_debug_data __percpu *mc_debug_data_ptr;
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
static struct static_key mc_debug __ro_after_init;
static bool mc_debug_enabled __initdata;
+static struct mc_debug_data * __ref get_mc_debug(void)
+{
+ if (!mc_debug_data_ptr)
+ return &mc_debug_data_early;
+
+ return this_cpu_ptr(mc_debug_data_ptr);
+}
+
static int __init xen_parse_mc_debug(char *arg)
{
mc_debug_enabled = true;
@@ -71,20 +77,16 @@ static int __init xen_parse_mc_debug(char *arg)
}
early_param("xen_mc_debug", xen_parse_mc_debug);
-void mc_percpu_init(unsigned int cpu)
-{
- per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu);
-}
-
static int __init mc_debug_enable(void)
{
unsigned long flags;
+ struct mc_debug_data __percpu *mcdb;
if (!mc_debug_enabled)
return 0;
- mc_debug_data_ptr = alloc_percpu(struct mc_debug_data);
- if (!mc_debug_data_ptr) {
+ mcdb = alloc_percpu(struct mc_debug_data);
+ if (!mcdb) {
pr_err("xen_mc_debug inactive\n");
static_key_slow_dec(&mc_debug);
return -ENOMEM;
@@ -93,7 +95,7 @@ static int __init mc_debug_enable(void)
/* Be careful when switching to percpu debug data. */
local_irq_save(flags);
xen_mc_flush();
- mc_percpu_init(0);
+ mc_debug_data_ptr = mcdb;
local_irq_restore(flags);
pr_info("xen_mc_debug active\n");
@@ -155,7 +157,7 @@ void xen_mc_flush(void)
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
if (static_key_false(&mc_debug)) {
- mcdb = __this_cpu_read(mc_debug_data);
+ mcdb = get_mc_debug();
memcpy(mcdb->entries, b->entries,
b->mcidx * sizeof(struct multicall_entry));
}
@@ -235,7 +237,7 @@ struct multicall_space __xen_mc_entry(size_t args)
ret.mc = &b->entries[b->mcidx];
if (static_key_false(&mc_debug)) {
- struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data);
+ struct mc_debug_data *mcdb = get_mc_debug();
mcdb->caller[b->mcidx] = __builtin_return_address(0);
mcdb->argsz[b->mcidx] = args;
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 688ff59318ae..9bb8ff8bff30 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -305,7 +305,6 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
return rc;
xen_pmu_init(cpu);
- mc_percpu_init(cpu);
/*
* Why is this a BUG? If the hypercall fails then everything can be
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 109af12f7647..461bb1526502 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -226,9 +226,7 @@ SYM_CODE_END(xen_early_idt_handler_array)
push %rax
mov $__HYPERVISOR_iret, %eax
syscall /* Do the IRET. */
-#ifdef CONFIG_MITIGATION_SLS
- int3
-#endif
+ ud2 /* The SYSCALL should never return. */
.endm
SYM_CODE_START(xen_iret)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 63c13a2ccf55..25e318ef27d6 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -261,9 +261,6 @@ void xen_mc_callback(void (*fn)(void *), void *data);
*/
struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
-/* Do percpu data initialization for multicalls. */
-void mc_percpu_init(unsigned int cpu);
-
extern bool is_xen_pmu;
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 9f57b925b116..2d9eec2b2b1c 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -315,16 +315,7 @@ EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static bool ahash_request_hasvirt(struct ahash_request *req)
{
- struct ahash_request *r2;
-
- if (ahash_request_isvirt(req))
- return true;
-
- list_for_each_entry(r2, &req->base.list, base.list)
- if (ahash_request_isvirt(r2))
- return true;
-
- return false;
+ return ahash_request_isvirt(req);
}
static int ahash_reqchain_virt(struct ahash_save_req_state *state,
@@ -472,7 +463,6 @@ static int ahash_do_req_chain(struct ahash_request *req,
bool update = op == crypto_ahash_alg(tfm)->update;
struct ahash_save_req_state *state;
struct ahash_save_req_state state0;
- struct ahash_request *r2;
u8 *page = NULL;
int err;
@@ -509,7 +499,6 @@ static int ahash_do_req_chain(struct ahash_request *req,
state->offset = 0;
state->nbytes = 0;
INIT_LIST_HEAD(&state->head);
- list_splice_init(&req->base.list, &state->head);
if (page)
sg_init_one(&state->sg, page, PAGE_SIZE);
@@ -540,9 +529,6 @@ out_free_page:
out_set_chain:
req->base.err = err;
- list_for_each_entry(r2, &req->base.list, base.list)
- r2->base.err = err;
-
return err;
}
@@ -551,19 +537,10 @@ int crypto_ahash_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
int err;
err = crypto_shash_init(prepare_shash_desc(req, tfm));
req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
-
- desc = prepare_shash_desc(r2, tfm);
- r2->base.err = crypto_shash_init(desc);
- }
-
return err;
}
@@ -620,19 +597,10 @@ int crypto_ahash_update(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
int err;
err = shash_ahash_update(req, ahash_request_ctx(req));
req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
-
- desc = ahash_request_ctx(r2);
- r2->base.err = shash_ahash_update(r2, desc);
- }
-
return err;
}
@@ -645,19 +613,10 @@ int crypto_ahash_final(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
int err;
err = crypto_shash_final(ahash_request_ctx(req), req->result);
req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
-
- desc = ahash_request_ctx(r2);
- r2->base.err = crypto_shash_final(desc, r2->result);
- }
-
return err;
}
@@ -670,19 +629,10 @@ int crypto_ahash_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
int err;
err = shash_ahash_finup(req, ahash_request_ctx(req));
req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
-
- desc = ahash_request_ctx(r2);
- r2->base.err = shash_ahash_finup(r2, desc);
- }
-
return err;
}
@@ -757,19 +707,10 @@ int crypto_ahash_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
- struct ahash_request *r2;
int err;
err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
req->base.err = err;
-
- list_for_each_entry(r2, &req->base.list, base.list) {
- struct shash_desc *desc;
-
- desc = prepare_shash_desc(r2, tfm);
- r2->base.err = shash_ahash_digest(r2, desc);
- }
-
return err;
}
@@ -1133,20 +1074,5 @@ int ahash_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
-void ahash_request_free(struct ahash_request *req)
-{
- struct ahash_request *tmp;
- struct ahash_request *r2;
-
- if (unlikely(!req))
- return;
-
- list_for_each_entry_safe(r2, tmp, &req->base.list, base.list)
- kfree_sensitive(r2);
-
- kfree_sensitive(req);
-}
-EXPORT_SYMBOL_GPL(ahash_request_free);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/scompress.c b/crypto/scompress.c
index d435d4b24469..5762fcc63b51 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -111,10 +111,14 @@ static void scomp_free_streams(struct scomp_alg *alg)
struct crypto_acomp_stream __percpu *stream = alg->stream;
int i;
+ alg->stream = NULL;
+ if (!stream)
+ return;
+
for_each_possible_cpu(i) {
struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
- if (!ps->ctx)
+ if (IS_ERR_OR_NULL(ps->ctx))
break;
alg->free_ctx(ps->ctx);
@@ -132,6 +136,8 @@ static int scomp_alloc_streams(struct scomp_alg *alg)
if (!stream)
return -ENOMEM;
+ alg->stream = stream;
+
for_each_possible_cpu(i) {
struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
@@ -143,8 +149,6 @@ static int scomp_alloc_streams(struct scomp_alg *alg)
spin_lock_init(&ps->lock);
}
-
- alg->stream = stream;
return 0;
}
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 0825851656a2..f0dad0c9ce33 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -332,7 +332,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
ivpu_pm_trigger_recovery(vdev, "debugfs");
@@ -383,7 +383,7 @@ static int dct_active_set(void *data, u64 active_percent)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
- if (ret)
+ if (ret < 0)
return ret;
if (active_percent)
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 0e096fd9b95d..39f83225c181 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -302,7 +302,8 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
struct ivpu_ipc_consumer cons;
int ret;
- drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
+ drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
+ pm_runtime_enabled(vdev->drm.dev));
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
index ffe7b10f8a76..2a043baf10ca 100644
--- a/drivers/accel/ivpu/ivpu_ms.c
+++ b/drivers/accel/ivpu/ivpu_ms.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_file.h>
+#include <linux/pm_runtime.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
@@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
@@ -96,6 +101,8 @@ err_free_ms:
kfree(ms);
unlock:
mutex_unlock(&file_priv->ms_lock);
+
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
if (!args->metric_group_mask)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
unlock:
mutex_unlock(&file_priv->ms_lock);
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_stop *args = data;
+ struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
+ int ret;
if (!args->metric_group_mask)
return -EINVAL;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
mutex_unlock(&file_priv->ms_lock);
+ ivpu_rpm_put(vdev);
return ms ? 0 : -EINVAL;
}
@@ -281,6 +300,9 @@ unlock:
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
{
struct ivpu_ms_instance *ms, *tmp;
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&file_priv->ms_lock);
@@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
}
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 90b09840536d..0a7026040188 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -458,7 +458,7 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
acpi_pm_wakeup_event(&device->dev);
button = acpi_driver_data(device);
- if (button->suspended)
+ if (button->suspended || event == ACPI_BUTTON_NOTIFY_WAKE)
return;
input = button->input;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 8db09d81918f..3c5f34892734 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
+ /*
+ * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3929
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ }
+ },
{ },
};
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index a35dd0e41c27..f73ce6e13065 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
while ((unsigned long)entry + proc_sz < table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
@@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
while ((unsigned long)entry + proc_sz < table_end) {
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index ba300cc0a3a3..2e4463d3a356 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1510,6 +1510,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
u64 sense_valid, val;
+ u16 extended_sense;
+ bool aux_icc_valid;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
@@ -1529,6 +1531,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
((u64)buf[10] << 16) | ((u64)buf[11] << 24);
+ extended_sense = get_unaligned_le16(&buf[14]);
+ aux_icc_valid = extended_sense & BIT(15);
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
@@ -1556,6 +1560,17 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
continue;
}
+ qc->result_tf.nsect = sense[6];
+ qc->result_tf.hob_nsect = sense[7];
+ qc->result_tf.lbal = sense[8];
+ qc->result_tf.lbam = sense[9];
+ qc->result_tf.lbah = sense[10];
+ qc->result_tf.hob_lbal = sense[11];
+ qc->result_tf.hob_lbam = sense[12];
+ qc->result_tf.hob_lbah = sense[13];
+ if (aux_icc_valid)
+ qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
+
/* Set sense without also setting scsicmd->result */
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
qc->scsicmd->sense_buffer, sk,
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 434f380114af..03dbaf4a13a7 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
+ if (!ap->ioaddr.cmd_addr)
+ return -ENOMEM;
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
resource_size(ctl_res));
+ if (!ap->ioaddr.ctl_addr)
+ return -ENOMEM;
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
resource_size(dma_res));
+ if (!ap->ioaddr.bmdma_addr)
+ return -ENOMEM;
/*
* Adjust register offsets
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index a482741eb181..c3042eca6332 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
mmio += PDC_CHIP0_OFS;
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- pdc_i2c_read_data[i].reg,
- &spd0[pdc_i2c_read_data[i].ofs]);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ pdc_i2c_read_data[i].reg,
+ &spd0[pdc_i2c_read_data[i].ofs])) {
+ dev_err(host->dev,
+ "Failed in i2c read at index %d: device=%#x, reg=%#x\n",
+ i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
+ return -EIO;
+ }
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
+ if (size < 0)
+ return size;
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 3bb9cee0a9b5..aa163ae9b2aa 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -2031,7 +2031,7 @@ static int null_add_dev(struct nullb_device *dev)
nullb->disk->minors = 1;
nullb->disk->fops = &null_ops;
nullb->disk->private_data = nullb;
- strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ strscpy(nullb->disk->disk_name, nullb->disk_name);
if (nullb->dev->zoned) {
rv = null_register_zoned_dev(nullb);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 5091dea762a0..604ab2bba231 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1286,7 +1286,9 @@ static void nxp_coredump(struct hci_dev *hdev)
u8 pcmd = 2;
skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd);
- if (!IS_ERR(skb))
+ if (IS_ERR(skb))
+ bt_dev_err(hdev, "Failed to trigger FW Dump. (%ld)", PTR_ERR(skb));
+ else
kfree_skb(skb);
}
@@ -1445,9 +1447,6 @@ static int nxp_shutdown(struct hci_dev *hdev)
/* HCI_NXP_IND_RESET command may not returns any response */
if (!IS_ERR(skb))
kfree_skb(skb);
- } else if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
- nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
- nxp_set_baudrate_cmd(hdev, NULL);
}
return 0;
@@ -1799,13 +1798,15 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
- }
-
- if (test_bit(HCI_RUNNING, &hdev->flags)) {
- /* Ensure shutdown callback is executed before unregistering, so
- * that baudrate is reset to initial value.
+ } else {
+ /* Restore FW baudrate to fw_init_baudrate if changed.
+ * This will ensure FW baudrate is in sync with
+ * driver baudrate in case this driver is re-inserted.
*/
- nxp_shutdown(hdev);
+ if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
+ nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
}
ps_cleanup(nxpdev);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 3d6778b95e00..edefb9dc76aa 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -889,7 +889,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T)
variant = "t";
else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S)
- variant = "u";
+ variant = "s";
snprintf(config.fwname, sizeof(config.fwname),
"qca/cmnv%02x%s.bin", rom_ver, variant);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index d3eba0d4a57d..7838c89e529e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -1215,6 +1215,8 @@ next:
rtl_dev_err(hdev, "mandatory config file %s not found",
btrtl_dev->ic_info->cfg_name);
ret = btrtl_dev->cfg_len;
+ if (!ret)
+ ret = -EINVAL;
goto err_free;
}
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index a51935d37e5d..59f4d7bdffdc 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev)
static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
{
- char buf[80];
+ const char *buf;
- snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n");
+ buf = "Controller Name: vhci_ctrl\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n");
+ buf = "Firmware Version: vhci_fw\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Driver: vhci_drv\n");
+ buf = "Driver: vhci_drv\n";
skb_put_data(skb, buf, strlen(buf));
- snprintf(buf, sizeof(buf), "Vendor: vhci\n");
+ buf = "Vendor: vhci\n";
skb_put_data(skb, buf, strlen(buf));
}
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 7701d00bcb3a..b6e7c0b29d4e 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
qm_fd_addr_set64(&fd, addr);
do {
+ refcount_inc(&req->drv_ctx->refcnt);
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
- if (likely(!ret)) {
- refcount_inc(&req->drv_ctx->refcnt);
+ if (likely(!ret))
return 0;
- }
+ refcount_dec(&req->drv_ctx->refcnt);
if (ret != -EBUSY)
break;
num_retries++;
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index ca9d0cca1f74..0e07d0523291 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -269,7 +269,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
unsigned int cmdlen, key1_id, key2_id;
int ret;
- rctx->iv = (u32 *)req->iv;
+ rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
rctx->len = req->cryptlen;
key1_id = ctx->key1_id;
key2_id = ctx->key2_id;
@@ -498,9 +498,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
if (!req->cryptlen)
return 0;
- if (ctx->alg == SE_ALG_ECB)
- req->iv = NULL;
-
rctx->encrypt = encrypt;
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cc7398cc17d6..e74e36a8ecda 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -393,7 +393,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf)
return -ENOMEM;
- pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
pgoff_t subpgcnt;
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
index 161272e47bda..73412bcef50c 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -462,36 +462,6 @@ unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *pri
EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
/**
- * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
- *
- * @priv: Pointer to struct cs_dsp_test.
- *
- * Return: Firmware version word value.
- */
-unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
-{
- unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
- union {
- struct wmfw_id_hdr adsp2;
- struct wmfw_v3_id_hdr halo;
- } hdr;
-
- switch (priv->dsp->type) {
- case WMFW_ADSP2:
- regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
- return be32_to_cpu(hdr.adsp2.ver);
- case WMFW_HALO:
- regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
- return be32_to_cpu(hdr.halo.ver);
- default:
- KUNIT_FAIL(priv->test, NULL);
- return 0;
- }
-}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
- "FW_CS_DSP_KUNIT_TEST_UTILS");
-
-/**
* cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
*
* @header: Pointer to struct cs_dsp_mock_xm_header.
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
index 1e161bbc5b4a..163b7faecff4 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -2198,7 +2198,7 @@ static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
priv->local->bin_builder =
cs_dsp_mock_bin_init(priv, 1,
- cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ cs_dsp_mock_xm_header_get_fw_version(xm_hdr));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
/* We must provide a dummy wmfw to load */
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
index 8748874f0552..a7ec956d2724 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -451,7 +451,7 @@ static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *ds
local->bin_builder =
cs_dsp_mock_bin_init(priv, 1,
- cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ cs_dsp_mock_xm_header_get_fw_version(local->xm_header));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
/* Init cs_dsp */
diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c
index b9ab29809e96..124ba40e32b1 100644
--- a/drivers/fpga/tests/fpga-bridge-test.c
+++ b/drivers/fpga/tests/fpga-bridge-test.c
@@ -170,4 +170,5 @@ static struct kunit_suite fpga_bridge_suite = {
kunit_test_suite(fpga_bridge_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Bridge");
MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
index 9cb37aefbac4..8748babb0504 100644
--- a/drivers/fpga/tests/fpga-mgr-test.c
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -330,4 +330,5 @@ static struct kunit_suite fpga_mgr_suite = {
kunit_test_suite(fpga_mgr_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Manager");
MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
index 6a108cafded8..020ceac48509 100644
--- a/drivers/fpga/tests/fpga-region-test.c
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -214,4 +214,5 @@ static struct kunit_suite fpga_region_suite = {
kunit_test_suite(fpga_region_suite);
+MODULE_DESCRIPTION("KUnit test for the FPGA Region");
MODULE_LICENSE("GPL");
diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c
index cb1ac9c40239..bc6378506296 100644
--- a/drivers/fwctl/main.c
+++ b/drivers/fwctl/main.c
@@ -105,7 +105,7 @@ static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd)
if (!test_and_set_bit(0, &fwctl_tainted)) {
dev_warn(
&fwctl->dev,
- "%s(%d): has requested full access to the physical device device",
+ "%s(%d): has requested full access to the physical device",
current->comm, task_pid_nr(current));
add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK);
}
diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c
index 284c4165fdd4..9b9d1f6b5556 100644
--- a/drivers/fwctl/pds/main.c
+++ b/drivers/fwctl/pds/main.c
@@ -105,12 +105,14 @@ static int pdsfc_identify(struct pdsfc_dev *pdsfc)
static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc)
{
struct device *dev = &pdsfc->fwctl.dev;
+ u32 num_endpoints;
int i;
if (!pdsfc->endpoints)
return;
- for (i = 0; pdsfc->endpoint_info && i < pdsfc->endpoints->num_entries; i++)
+ num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
+ for (i = 0; pdsfc->endpoint_info && i < num_endpoints; i++)
mutex_destroy(&pdsfc->endpoint_info[i].lock);
vfree(pdsfc->endpoint_info);
pdsfc->endpoint_info = NULL;
@@ -199,7 +201,7 @@ static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc)
ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries;
for (i = 0; i < num_endpoints; i++) {
mutex_init(&pdsfc->endpoint_info[i].lock);
- pdsfc->endpoint_info[i].endpoint = ep_entry[i].id;
+ pdsfc->endpoint_info[i].endpoint = le32_to_cpu(ep_entry[i].id);
}
return 0;
@@ -214,6 +216,7 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
struct pds_fwctl_query_data *data;
union pds_core_adminq_cmd cmd;
dma_addr_t data_pa;
+ u32 num_entries;
int err;
int i;
@@ -246,8 +249,9 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
*pa = data_pa;
entries = (struct pds_fwctl_query_data_operation *)data->entries;
- dev_dbg(dev, "num_entries %d\n", data->num_entries);
- for (i = 0; i < data->num_entries; i++) {
+ num_entries = le32_to_cpu(data->num_entries);
+ dev_dbg(dev, "num_entries %d\n", num_entries);
+ for (i = 0; i < num_entries; i++) {
/* Translate FW command attribute to fwctl scope */
switch (entries[i].scope) {
@@ -267,7 +271,7 @@ static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc
break;
}
dev_dbg(dev, "endpoint %d operation: id %x scope %d\n",
- ep, entries[i].id, entries[i].scope);
+ ep, le32_to_cpu(entries[i].id), entries[i].scope);
}
return data;
@@ -280,24 +284,26 @@ static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
struct pds_fwctl_query_data_operation *op_entry;
struct pdsfc_rpc_endpoint_info *ep_info = NULL;
struct device *dev = &pdsfc->fwctl.dev;
+ u32 num_entries;
int i;
/* validate rpc in_len & out_len based
* on ident.max_req_sz & max_resp_sz
*/
- if (rpc->in.len > pdsfc->ident.max_req_sz) {
+ if (rpc->in.len > le32_to_cpu(pdsfc->ident.max_req_sz)) {
dev_dbg(dev, "Invalid request size %u, max %u\n",
- rpc->in.len, pdsfc->ident.max_req_sz);
+ rpc->in.len, le32_to_cpu(pdsfc->ident.max_req_sz));
return -EINVAL;
}
- if (rpc->out.len > pdsfc->ident.max_resp_sz) {
+ if (rpc->out.len > le32_to_cpu(pdsfc->ident.max_resp_sz)) {
dev_dbg(dev, "Invalid response size %u, max %u\n",
- rpc->out.len, pdsfc->ident.max_resp_sz);
+ rpc->out.len, le32_to_cpu(pdsfc->ident.max_resp_sz));
return -EINVAL;
}
- for (i = 0; i < pdsfc->endpoints->num_entries; i++) {
+ num_entries = le32_to_cpu(pdsfc->endpoints->num_entries);
+ for (i = 0; i < num_entries; i++) {
if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) {
ep_info = &pdsfc->endpoint_info[i];
break;
@@ -326,8 +332,9 @@ static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
/* reject unsupported and/or out of scope commands */
op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries;
- for (i = 0; i < ep_info->operations->num_entries; i++) {
- if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, op_entry[i].id)) {
+ num_entries = le32_to_cpu(ep_info->operations->num_entries);
+ for (i = 0; i < num_entries; i++) {
+ if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, le32_to_cpu(op_entry[i].id))) {
if (scope < op_entry[i].scope)
return -EPERM;
return 0;
@@ -402,7 +409,7 @@ static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
cmd = (union pds_core_adminq_cmd) {
.fwctl_rpc = {
.opcode = PDS_FWCTL_CMD_RPC,
- .flags = PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP,
+ .flags = cpu_to_le16(PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP),
.ep = cpu_to_le32(rpc->in.ep),
.op = cpu_to_le32(rpc->in.op),
.req_pa = cpu_to_le64(in_payload_dma_addr),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6d83ccfa42ee..2c04ae133848 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -353,7 +353,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a30111d2c3ea..b34b915203f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3643,6 +3643,13 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev, adev->ip_blocks[i].version->type))
continue;
+ /* Since we skip suspend for S0i3, we need to cancel the delayed
+ * idle work here as the suspend callback never gets called.
+ */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
/* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index dc2713ec95a5..9e738fae2b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -120,6 +120,8 @@ MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 9f627caedc3f..667080cc9ae1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -75,11 +75,25 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct dma_buf *dmabuf = attach->dmabuf;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
+ u32 domains = bo->preferred_domains;
- /* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ dma_resv_assert_held(dmabuf->resv);
+
+ /*
+ * Try pinning into VRAM to allow P2P with RDMA NICs without ODP
+ * support if all attachments can do P2P. If any attachment can't do
+ * P2P just pin into GTT instead.
+ */
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (!attach->peer2peer)
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+
+ if (domains & AMDGPU_GEM_DOMAIN_VRAM)
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ return amdgpu_bo_pin(bo, domains);
}
/**
@@ -134,9 +148,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
-
- } else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
- return ERR_PTR(-EBUSY);
}
switch (bo->tbo.resource->mem_type) {
@@ -184,7 +195,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- if (sgt->sgl->page_link) {
+ if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 464625282872..ecb74ccf1d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -699,12 +699,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
- u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
- adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- int r;
+ int r, cnt = 0;
uint32_t seq;
/*
@@ -761,10 +759,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
- }
+ } else
+ r = 0;
}
error_unlock_reset:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 80cd6f5273db..0b9987781f76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -163,8 +163,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
- if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
- !(adev->flags & AMD_IS_APU))
+ if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
+ domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 6da8994e0469..2d7f82e98df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -24,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@@ -907,6 +908,9 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct ttm_resource_manager *man = &mgr->manager;
int err;
+ man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index e65916ada23b..ef9538fbbf53 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -894,6 +894,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
{
int pipe;
+ /* return early if we have already fetched these */
+ if (adev->mes.sched_version && adev->mes.kiq_version)
+ return;
+
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 183dd3346da5..e6ab617b9a40 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -1392,17 +1392,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
mes_v12_0_queue_init_register(ring);
}
- /* get MES scheduler/KIQ versions */
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, 3, pipe, 0, 0);
+ if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) ||
+ ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) {
+ /* get MES scheduler/KIQ versions */
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 3, pipe, 0, 0);
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
- adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
+ adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e477d7509646..9bbee484d57c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1983,9 +1983,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
- if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
- dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
-
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4))
@@ -2001,7 +1998,11 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
- dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+ if (!amdgpu_sriov_vf(dev->gpu->adev))
+ dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+
+ if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d0d8ad5368c3..9fed4471405f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1726,9 +1726,30 @@ static const struct dmi_system_id dmi_quirk_table[] = {
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
},
},
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
+ },
+ },
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 36a830a7440f..e8bdd7f0c460 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
*
* Panel Replay and PSR SU
* - Enable when:
+ * - VRR is disabled
* - vblank counter is disabled
* - entry is allowed: usermode demonstrates an adequate number of fast
* commits)
@@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
bool is_sr_active = (link->replay_settings.replay_allow_active ||
link->psr_settings.psr_allow_active);
bool is_crc_window_active = false;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
is_crc_window_active =
amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
#endif
- if (link->replay_settings.replay_feature_enabled &&
+ if (link->replay_settings.replay_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
amdgpu_dm_psr_disable(vblank_work->stream, false);
- } else if (link->psr_settings.psr_feature_enabled &&
+ } else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
struct amdgpu_dm_connector *aconn =
@@ -244,6 +246,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -271,8 +275,15 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
dc_allow_idle_optimizations(dm->dc, true);
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
+ }
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index be54f0e696ce..94e99e540691 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -86,6 +86,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
/* Store configuration options */
(*dml_ctx)->config = *config;
+ DC_FP_START();
+
/*Initialize SOCBB and DCNIP params */
dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
@@ -96,6 +98,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
/*Initialize DML21 instance */
dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
+
+ DC_FP_END();
}
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
@@ -283,11 +287,16 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
{
bool out = false;
+ DC_FP_START();
+
/* Use dml_validate_only for fast_validate path */
- if (fast_validate) {
+ if (fast_validate)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
- } else
+ else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
+
+ DC_FP_END();
+
return out;
}
@@ -426,8 +435,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
+ DC_FP_START();
+
/* need to initialize copied instance for internal references to be correct */
dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
+
+ DC_FP_END();
}
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 939ee0708bd2..f549a778f6f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -732,11 +732,16 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
return out;
}
+ DC_FP_START();
+
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
out = dml2_validate_only(context);
else
out = dml2_validate_and_build_resource(in_dc, context);
+
+ DC_FP_END();
+
return out;
}
@@ -779,11 +784,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
break;
}
+ DC_FP_START();
+
initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+
+ DC_FP_END();
}
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 2a9606118d89..21dc956b5f35 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -429,6 +429,7 @@ struct amd_pm_funcs {
int (*set_pp_table)(void *handle, const char *buf, size_t size);
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
+ int (*pause_power_profile)(void *handle, bool pause);
/* export to amdgpu */
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 81e9b443ca0a..3533d43ed1e7 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -349,6 +349,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
+ bool pause)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (pp_funcs && pp_funcs->pause_power_profile) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->pause_power_profile(
+ adev->powerplay.pp_handle, pause);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate)
{
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index f93d287dbf13..4c0f7ad14816 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -410,6 +410,8 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en);
+int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
+ bool pause);
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 033c3229b555..46cce1d2aaf3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2398,7 +2398,11 @@ static int smu_switch_power_profile(void *handle,
smu_power_profile_mode_get(smu, type);
else
smu_power_profile_mode_put(smu, type);
- ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ /* don't switch the active workload when paused */
+ if (smu->pause_workload)
+ ret = 0;
+ else
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
if (ret) {
if (enable)
smu_power_profile_mode_put(smu, type);
@@ -2411,6 +2415,35 @@ static int smu_switch_power_profile(void *handle,
return 0;
}
+static int smu_pause_power_profile(void *handle,
+ bool pause)
+{
+ struct smu_context *smu = handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ int ret;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ smu->pause_workload = pause;
+
+ /* force to bootup default profile */
+ if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu,
+ workload_mask,
+ NULL,
+ 0);
+ else
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ return ret;
+ }
+
+ return 0;
+}
+
static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
@@ -3733,6 +3766,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_pp_table = smu_sys_get_pp_table,
.set_pp_table = smu_sys_set_pp_table,
.switch_power_profile = smu_switch_power_profile,
+ .pause_power_profile = smu_pause_power_profile,
/* export to amdgpu */
.dispatch_tasks = smu_handle_dpm_task,
.load_firmware = smu_load_microcode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3ba169639f54..dd6d0e7aa242 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -558,6 +558,7 @@ struct smu_context {
/* asic agnostic workload mask */
uint32_t workload_mask;
+ bool pause_workload;
/* default/user workload preference */
uint32_t power_profile_mode;
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 78391d8f35a9..25fabf336a64 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1204,7 +1204,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
- if (speed == 0)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 048be2872247..98b898a1de8f 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -244,6 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = 4;
break;
case INTEL_DRAM_GDDR:
+ case INTEL_DRAM_GDDR_ECC:
qi->channel_width = 32;
break;
default:
@@ -398,6 +399,12 @@ static const struct intel_sa_info xe2_hpd_sa_info = {
/* Other values not used by simplified algorithm */
};
+static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
+ .derating = 45,
+ .deprogbwlimit = 53,
+ /* Other values not used by simplified algorithm */
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -740,10 +747,15 @@ static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
+ const struct dram_info *dram_info = &dev_priv->dram_info;
+
if (!HAS_DISPLAY(dev_priv))
return;
- if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
+ if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) &&
+ dram_info->type == INTEL_DRAM_GDDR_ECC)
+ xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info);
+ else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
else if (DISPLAY_VER(dev_priv) >= 14)
tgl_get_bw_info(dev_priv, &mtl_sa_info);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3afb85fe8536..3b509c70fb58 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -968,7 +968,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
- old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
+ old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
+ old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
+ old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
}
static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index a236b5fc7a3d..9476aaa91900 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -172,10 +172,28 @@ int intel_dp_link_symbol_clock(int rate)
static int max_dprx_rate(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int max_rate;
+
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
- return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+ max_rate = drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+ else
+ max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
- return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+ /*
+ * Some broken eDP sinks illegally declare support for
+ * HBR3 without TPS4, and are unable to produce a stable
+ * output. Reject HBR3 when TPS4 is not available.
+ */
+ if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
+ encoder->base.base.id, encoder->base.name);
+ max_rate = 540000;
+ }
+
+ return max_rate;
}
static int max_dprx_lane_count(struct intel_dp *intel_dp)
@@ -4170,6 +4188,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
static void
intel_edp_set_sink_rates(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
intel_dp->num_sink_rates = 0;
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
@@ -4180,10 +4201,7 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
sink_rates, sizeof(sink_rates));
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
- int val = le16_to_cpu(sink_rates[i]);
-
- if (val == 0)
- break;
+ int rate;
/* Value read multiplied by 200kHz gives the per-lane
* link rate in kHz. The source rates are, however,
@@ -4191,7 +4209,24 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
* back to symbols is
* (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
*/
- intel_dp->sink_rates[i] = (val * 200) / 10;
+ rate = le16_to_cpu(sink_rates[i]) * 200 / 10;
+
+ if (rate == 0)
+ break;
+
+ /*
+ * Some broken eDP sinks illegally declare support for
+ * HBR3 without TPS4, and are unable to produce a stable
+ * output. Reject HBR3 when TPS4 is not available.
+ */
+ if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ intel_dp->sink_rates[i] = rate;
}
intel_dp->num_sink_rates = i;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 4efd4f7d497a..7b240ce681a0 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -222,7 +222,9 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (DISPLAY_VER(display) == 2)
+ if (DISPLAY_VER(display) >= 20 || display->platform.battlemage)
+ return 1;
+ else if (DISPLAY_VER(display) == 2)
return -1;
else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return 2;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9378d5901c49..9ca42589da4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -117,21 +117,10 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
- /*
- * BSpec 52698 - Render powergating must be off.
- * FIXME BSpec is outdated, disabling powergating for MTL is just
- * temporary wa and should be removed after fixing real cause
- * of forcewake timeouts.
- */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
- pg_enable =
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE;
- else
- pg_enable =
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ pg_enable =
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d791f9baa11d..456d3372eef8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -317,6 +317,11 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
+void intel_huc_fini_late(struct intel_huc *huc)
+{
+ delayed_huc_load_fini(huc);
+}
+
#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
static int check_huc_loading_mode(struct intel_huc *huc)
{
@@ -414,12 +419,6 @@ out:
void intel_huc_fini(struct intel_huc *huc)
{
- /*
- * the fence is initialized in init_early, so we need to clean it up
- * even if HuC loading is off.
- */
- delayed_huc_load_fini(huc);
-
if (huc->heci_pkt)
i915_vma_unpin_and_release(&huc->heci_pkt, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index d5e441b9e08d..921ad4b1687f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -55,6 +55,7 @@ struct intel_huc {
int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
+void intel_huc_fini_late(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 90ba1b0b4c9d..4a3493e8d433 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -136,6 +136,7 @@ void intel_uc_init_late(struct intel_uc *uc)
void intel_uc_driver_late_release(struct intel_uc *uc)
{
+ intel_huc_fini_late(&uc->huc);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 509f9ccae3a9..dbad4d853d3a 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
u8 *buf;
struct opregion_header *header;
struct vbt v;
- const char opregion_signature[16] = OPREGION_SIGNATURE;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
@@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf;
- memcpy(header->signature, opregion_signature,
- sizeof(opregion_signature));
+
+ static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1);
+ memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature));
+
header->size = 0x8;
header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ffc346379cc2..54538b6f85df 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -305,6 +305,7 @@ struct drm_i915_private {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index fee76c1d2f45..889281819c5b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -23,7 +23,9 @@
#include <linux/random.h>
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_regs.h"
#include "gt/uc/intel_gsc_fw.h"
#include "i915_driver.h"
@@ -253,11 +255,27 @@ int i915_mock_selftests(void)
int i915_live_selftests(struct pci_dev *pdev)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_uncore *uncore = &i915->uncore;
int err;
+ u32 pg_enable;
+ intel_wakeref_t wakeref;
if (!i915_selftest.live)
return 0;
+ /*
+ * FIXME Disable render powergating, this is temporary wa and should be removed
+ * after fixing real cause of forcewake timeouts.
+ */
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 00), IP_VER(12, 74))) {
+ pg_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE);
+ if (pg_enable & GEN9_RENDER_PG_ENABLE)
+ intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
+ pg_enable & ~GEN9_RENDER_PG_ENABLE);
+ }
+ }
+
__wait_gsc_proxy_completed(i915);
__wait_gsc_huc_load_completed(i915);
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 9e310f4099f4..f60eedb0e92c 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -687,6 +687,10 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
dram_info->type = INTEL_DRAM_GDDR;
break;
+ case 9:
+ drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
+ dram_info->type = INTEL_DRAM_GDDR_ECC;
+ break;
default:
MISSING_CASE(val);
return -EINVAL;
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
index 3debc9870a82..d09c4c684116 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.c
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -732,7 +732,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
fw_mem->core_data, fw_mem->core_code_alloc_size);
if (err)
- goto err_free_fw_core_data_obj;
+ goto err_free_kdata;
memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
@@ -742,10 +742,14 @@ pvr_fw_process(struct pvr_device *pvr_dev)
memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
/* We're finished with the firmware section memory on the CPU, unmap. */
- if (fw_core_data_ptr)
+ if (fw_core_data_ptr) {
pvr_fw_object_vunmap(fw_mem->core_data_obj);
- if (fw_core_code_ptr)
+ fw_core_data_ptr = NULL;
+ }
+ if (fw_core_code_ptr) {
pvr_fw_object_vunmap(fw_mem->core_code_obj);
+ fw_core_code_ptr = NULL;
+ }
pvr_fw_object_vunmap(fw_mem->data_obj);
fw_data_ptr = NULL;
pvr_fw_object_vunmap(fw_mem->code_obj);
@@ -753,7 +757,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
if (err)
- goto err_free_fw_core_data_obj;
+ goto err_free_kdata;
return 0;
@@ -763,13 +767,16 @@ err_free_kdata:
kfree(fw_mem->data);
kfree(fw_mem->code);
-err_free_fw_core_data_obj:
if (fw_core_data_ptr)
- pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj);
+ pvr_fw_object_vunmap(fw_mem->core_data_obj);
+ if (fw_mem->core_data_obj)
+ pvr_fw_object_destroy(fw_mem->core_data_obj);
err_free_fw_core_code_obj:
if (fw_core_code_ptr)
- pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj);
+ pvr_fw_object_vunmap(fw_mem->core_code_obj);
+ if (fw_mem->core_code_obj)
+ pvr_fw_object_destroy(fw_mem->core_code_obj);
err_free_fw_data_obj:
if (fw_data_ptr)
@@ -836,6 +843,12 @@ pvr_fw_cleanup(struct pvr_device *pvr_dev)
struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
pvr_fw_fini_fwif_connection_ctl(pvr_dev);
+
+ kfree(fw_mem->core_data);
+ kfree(fw_mem->core_code);
+ kfree(fw_mem->data);
+ kfree(fw_mem->code);
+
if (fw_mem->core_code_obj)
pvr_fw_object_destroy(fw_mem->core_code_obj);
if (fw_mem->core_data_obj)
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 1cdb3cfd058d..59b334d094fa 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -671,6 +671,13 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
geom_job->paired_job = frag_job;
frag_job->paired_job = geom_job;
+ /* The geometry job pvr_job structure is used when the fragment
+ * job is being prepared by the GPU scheduler. Have the fragment
+ * job hold a reference on the geometry job to prevent it being
+ * freed until the fragment job has finished with it.
+ */
+ pvr_job_get(geom_job);
+
/* Skip the fragment job we just paired to the geometry job. */
i++;
}
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index eba69309bb6c..5e9bc0992824 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -866,6 +866,10 @@ static void pvr_queue_free_job(struct drm_sched_job *sched_job)
struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
drm_sched_job_cleanup(sched_job);
+
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job)
+ pvr_job_put(job->paired_job);
+
job->paired_job = NULL;
pvr_job_put(job);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db961eade225..2016c1e7242f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+ if (bo->base.import_attach)
+ drm_prime_gem_destroy(&bo->base, bo->sg);
+
/*
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9ae2cee1c7c5..67e3c99de73a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
}
- if (gem->import_attach)
- drm_prime_gem_destroy(gem, nvbo->bo.sg);
-
ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index 3d1dddb34603..7d531b6f4c09 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -94,6 +94,7 @@ struct rockchip_hdmi_qp {
struct gpio_desc *enable_gpio;
struct delayed_work hpd_work;
int port_id;
+ const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops;
};
struct rockchip_hdmi_qp_ctrl_ops {
@@ -461,6 +462,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return -ENODEV;
}
+ hdmi->ctrl_ops = cfg->ctrl_ops;
hdmi->dev = &pdev->dev;
hdmi->port_id = -ENODEV;
@@ -600,27 +602,8 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
- u32 val;
- val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
- HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
- HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
- HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap,
- hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
- val);
-
- val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
- RK3588_SET_HPD_PATH_MASK);
- regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
-
- if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
- RK3588_HDMI1_GRANT_SEL);
- else
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
+ hdmi->ctrl_ops->io_init(hdmi);
dw_hdmi_qp_resume(dev, hdmi->hdmi);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 14958d6b3d2e..0a2840cbe8e2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -1754,9 +1754,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_DP1:
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_DP1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_DP1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP1_MUX, vp->id);
dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
break;
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f203ac5514ae..f778a4eee7c9 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -7,8 +7,6 @@ sti-drm-y := \
sti_compositor.o \
sti_crtc.o \
sti_plane.o \
- sti_crtc.o \
- sti_plane.o \
sti_hdmi.o \
sti_hdmi_tx3g4c28phy.o \
sti_dvo.o \
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index 7516f6cb36e4..b2fdb1a774fe 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -95,6 +95,9 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_ASSERT_TRUE(test,
drm_mode_parse_command_line_for_connector(cmdline,
connector,
@@ -129,7 +132,8 @@ static void drm_test_pick_cmdline_named(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- const struct drm_display_mode *expected_mode, *mode;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *expected_mode;
const char *cmdline = params->cmdline;
int ret;
@@ -149,6 +153,9 @@ static void drm_test_pick_cmdline_named(struct kunit *test)
expected_mode = params->func(drm);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
}
diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
index 59c8408c453c..1cfcb597b088 100644
--- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
+++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
@@ -7,6 +7,7 @@
#include <kunit/test.h>
#include <drm/drm_connector.h>
+#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
static const struct drm_connector no_connector = {};
@@ -955,8 +956,15 @@ struct drm_cmdline_tv_option_test {
static void drm_test_cmdline_tv_options(struct kunit *test)
{
const struct drm_cmdline_tv_option_test *params = test->param_value;
- const struct drm_display_mode *expected_mode = params->mode_fn(NULL);
+ struct drm_display_mode *expected_mode;
struct drm_cmdline_mode mode = { };
+ int ret;
+
+ expected_mode = params->mode_fn(NULL);
+ KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
&no_connector, &mode));
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index a4eb68f0decc..6f6616cf4966 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -279,6 +279,28 @@ static void kunit_action_drm_mode_destroy(void *ptr)
}
/**
+ * drm_kunit_add_mode_destroy_action() - Add a drm_destroy_mode kunit action
+ * @test: The test context object
+ * @mode: The drm_display_mode to destroy eventually
+ *
+ * Registers a kunit action that will destroy the drm_display_mode at
+ * the end of the test.
+ *
+ * If an error occurs, the drm_display_mode will be destroyed.
+ *
+ * Returns:
+ * 0 on success, an error code otherwise.
+ */
+int drm_kunit_add_mode_destroy_action(struct kunit *test,
+ struct drm_display_mode *mode)
+{
+ return kunit_add_action_or_reset(test,
+ kunit_action_drm_mode_destroy,
+ mode);
+}
+EXPORT_SYMBOL_GPL(drm_kunit_add_mode_destroy_action);
+
+/**
* drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object
* @dev: DRM device
diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c
index 6ed51f99e133..f5b20f92df8b 100644
--- a/drivers/gpu/drm/tests/drm_modes_test.c
+++ b/drivers/gpu/drm/tests/drm_modes_test.c
@@ -40,6 +40,7 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
@@ -47,6 +48,9 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
@@ -70,6 +74,7 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
+ int ret;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
@@ -77,9 +82,15 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
mode = drm_mode_analog_ntsc_480i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
@@ -87,6 +98,7 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
@@ -94,6 +106,9 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
@@ -117,6 +132,7 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
+ int ret;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
@@ -124,9 +140,15 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
mode = drm_mode_analog_pal_576i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
@@ -134,6 +156,7 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
+ int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_MONOCHROME,
@@ -141,6 +164,9 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
+ ret = drm_kunit_add_mode_destroy_action(test, mode);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c
index bc09ff38aca1..db0e4f5df275 100644
--- a/drivers/gpu/drm/tests/drm_probe_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c
@@ -98,7 +98,7 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_display_mode *mode;
- const struct drm_display_mode *expected;
+ struct drm_display_mode *expected;
size_t len;
int ret;
@@ -134,6 +134,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
}
if (params->num_expected_modes >= 2) {
@@ -145,6 +148,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+
+ ret = drm_kunit_add_mode_destroy_action(test, expected);
+ KUNIT_ASSERT_EQ(test, ret, 0);
}
mutex_unlock(&priv->drm->mode_config.mutex);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index dde8fc1a3689..90c99d83c4cf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -115,13 +115,14 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
if (!vgdev->has_context_init)
virtio_gpu_create_context(obj->dev, file);
- objs = virtio_gpu_array_alloc(1);
- if (!objs)
- return -ENOMEM;
- virtio_gpu_array_add_obj(objs, obj);
+ if (vfpriv->context_created) {
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
- if (vfpriv->ctx_id)
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
+ }
out_notify:
virtio_gpu_notify(vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a6f5a78f436a..87e584add042 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -366,12 +366,6 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
obj = new_state->fb->obj[0];
- if (obj->import_attach) {
- ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
- if (ret)
- return ret;
- }
-
if (bo->dumb || obj->import_attach) {
vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
vgdev->fence_drv.context,
@@ -380,7 +374,21 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return -ENOMEM;
}
+ if (obj->import_attach) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ goto err_fence;
+ }
+
return 0;
+
+err_fence:
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
+ }
+
+ return ret;
}
static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index fe6a0b018571..4de2a63ccd18 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -321,6 +321,7 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
obj = &bo->base.base;
+ obj->resv = buf->resv;
obj->funcs = &virtgpu_gem_dma_buf_funcs;
drm_gem_private_object_init(dev, obj, buf->size);
diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
index a255946b6f77..8cfcd3360896 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
@@ -41,6 +41,7 @@
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE BIT(10) /* gen12 */
#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH BIT(9) /* gen12 */
#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 72ef0b6fc425..9f8667ebba85 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -585,6 +585,7 @@ struct xe_device {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 03072e094991..084cbdeba8ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -322,6 +322,13 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return 0;
}
+/*
+ * Ensure that roundup_pow_of_two(length) doesn't overflow.
+ * Note that roundup_pow_of_two() operates on unsigned long,
+ * not on u64.
+ */
+#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
+
/**
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
@@ -346,6 +353,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_device *xe = gt_to_xe(gt);
#define MAX_TLB_INVALIDATION_LEN 7
u32 action[MAX_TLB_INVALIDATION_LEN];
+ u64 length = end - start;
int len = 0;
xe_gt_assert(gt, fence);
@@ -358,11 +366,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
- if (!xe->info.has_range_tlb_invalidation) {
+ if (!xe->info.has_range_tlb_invalidation ||
+ length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
} else {
u64 orig_start = start;
- u64 length = end - start;
u64 align;
if (length < SZ_4K)
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 85215313976c..43b1192ba61c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -1070,6 +1070,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
+ ret = -EIO;
goto out;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 8c05fd30b7df..93241fd0a4ba 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -389,12 +389,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
blit_cctl_val,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- /* Use Fixed slice CCS mode */
- { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
- XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
- XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
- RCU_MODE_FIXED_SLICE_CCS_MODE))
- },
/* Disable WMTP if HW doesn't support it */
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
@@ -461,6 +455,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ /* Use Fixed slice CCS mode */
+ { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
+ XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
+ XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
+ RCU_MODE_FIXED_SLICE_CCS_MODE))
+ },
};
xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index b53e8d2accdb..a440442b4d72 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -32,14 +32,61 @@ bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max)
return timeout >= min && timeout <= max;
}
-static void kobj_xe_hw_engine_release(struct kobject *kobj)
+static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
+static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->show(kobj, kattr, buf);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->store(kobj, kattr, buf, count);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
+ .show = xe_hw_engine_class_sysfs_attr_show,
+ .store = xe_hw_engine_class_sysfs_attr_store,
+};
+
static const struct kobj_type kobj_xe_hw_engine_type = {
- .release = kobj_xe_hw_engine_release,
- .sysfs_ops = &kobj_sysfs_ops
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
+};
+
+static const struct kobj_type kobj_xe_hw_engine_type_def = {
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
};
static ssize_t job_timeout_max_store(struct kobject *kobj,
@@ -543,7 +590,7 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
if (!kobj)
return -ENOMEM;
- kobject_init(kobj, &kobj_xe_hw_engine_type);
+ kobject_init(kobj, &kobj_xe_hw_engine_type_def);
err = kobject_add(kobj, parent, "%s", ".defaults");
if (err)
goto err_object;
@@ -559,57 +606,6 @@ err_object:
return err;
}
-static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
-{
- kfree(kobj);
-}
-
-static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct xe_device *xe = kobj_to_xe(kobj);
- struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
-
- kattr = container_of(attr, struct kobj_attribute, attr);
- if (kattr->show) {
- xe_pm_runtime_get(xe);
- ret = kattr->show(kobj, kattr, buf);
- xe_pm_runtime_put(xe);
- }
-
- return ret;
-}
-
-static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t count)
-{
- struct xe_device *xe = kobj_to_xe(kobj);
- struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
-
- kattr = container_of(attr, struct kobj_attribute, attr);
- if (kattr->store) {
- xe_pm_runtime_get(xe);
- ret = kattr->store(kobj, kattr, buf, count);
- xe_pm_runtime_put(xe);
- }
-
- return ret;
-}
-
-static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
- .show = xe_hw_engine_class_sysfs_attr_show,
- .store = xe_hw_engine_class_sysfs_attr_store,
-};
-
-static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
- .release = xe_hw_engine_sysfs_kobj_release,
- .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
-};
static void hw_engine_class_sysfs_fini(void *arg)
{
@@ -640,7 +636,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
if (!kobj)
return -ENOMEM;
- kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type);
+ kobject_init(kobj, &kobj_xe_hw_engine_type);
err = kobject_add(kobj, gt->sysfs, "engines");
if (err)
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index df4282c71bf0..5a3e89022c38 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1177,7 +1177,7 @@ err:
err_sync:
/* Sync partial copies if any. FIXME: job_mutex? */
if (fence) {
- dma_fence_wait(m->fence, false);
+ dma_fence_wait(fence, false);
dma_fence_put(fence);
}
@@ -1547,7 +1547,7 @@ void xe_migrate_wait(struct xe_migrate *m)
static u32 pte_update_cmd_size(u64 size)
{
u32 num_dword;
- u64 entries = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
/*
@@ -1558,7 +1558,7 @@ static u32 pte_update_cmd_size(u64 size)
* 2 dword for the page table's physical location
* 2*n dword for value of pte to fill (each pte entry is 2 dwords)
*/
- num_dword = (1 + 2) * DIV_ROUND_UP(entries, 0x1ff);
+ num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff);
num_dword += entries * 2;
return num_dword;
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 917fc16de866..a7582b097ae6 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -137,7 +137,8 @@ emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset,
static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
int i)
{
- u32 flags = PIPE_CONTROL_CS_STALL |
+ u32 flags0 = 0;
+ u32 flags1 = PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@@ -148,11 +149,15 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
PIPE_CONTROL_STORE_DATA_INDEX;
if (invalidate_tlb)
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
- flags &= ~mask_flags;
+ flags1 &= ~mask_flags;
- return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
+ if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
+ flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
+
+ return emit_pipe_control(dw, i, flags0, flags1,
+ LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 3e829c87d7b4..f8c128524d9f 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -696,11 +696,14 @@ retry:
list_for_each_entry(block, blocks, link)
block->private = vr;
+ xe_bo_get(bo);
err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
&bo->devmem_allocation, ctx);
- xe_bo_unlock(bo);
if (err)
- xe_bo_put(bo); /* Creation ref */
+ xe_svm_devmem_release(&bo->devmem_allocation);
+
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
unlock:
mmap_read_unlock(mm);
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 0c738af24f7c..9b9e176992a8 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -32,8 +32,10 @@
GRAPHICS_VERSION(3001)
14022293748 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019794406 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fedcdb56fb6b..ab31eefa916b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -72,6 +72,8 @@ static const char * const cma_events[] = {
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
enum ib_gid_type gid_type);
+static void cma_netevent_work_handler(struct work_struct *_work);
+
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{
size_t index = event;
@@ -1047,6 +1049,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
id_priv->id.route.addr.dev_addr.net = get_net(net);
id_priv->seq_num &= 0x00ffffff;
+ INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler);
rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
if (parent)
@@ -5241,7 +5244,6 @@ static int cma_netevent_callback(struct notifier_block *self,
if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
neigh->ha, ETH_ALEN))
continue;
- INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
cma_id_get(current_id);
queue_work(cma_wq, &current_id->id.net_work);
}
diff --git a/drivers/infiniband/core/ucaps.c b/drivers/infiniband/core/ucaps.c
index 6853c6d078f9..de5cb8bf0a61 100644
--- a/drivers/infiniband/core/ucaps.c
+++ b/drivers/infiniband/core/ucaps.c
@@ -170,7 +170,7 @@ int ib_create_ucap(enum rdma_user_cap type)
ucap->dev.class = &ucaps_class;
ucap->dev.devt = MKDEV(MAJOR(ucaps_base_dev), type);
ucap->dev.release = ucap_dev_release;
- ret = dev_set_name(&ucap->dev, ucap_names[type]);
+ ret = dev_set_name(&ucap->dev, "%s", ucap_names[type]);
if (ret)
goto err_device;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e9fa22d31c23..c48ef6083020 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -76,12 +76,14 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
npfns = (end - start) >> PAGE_SHIFT;
umem_odp->pfn_list = kvcalloc(
- npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
+ npfns, sizeof(*umem_odp->pfn_list),
+ GFP_KERNEL | __GFP_NOWARN);
if (!umem_odp->pfn_list)
return -ENOMEM;
umem_odp->dma_list = kvcalloc(
- ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
+ ndmas, sizeof(*umem_odp->dma_list),
+ GFP_KERNEL | __GFP_NOWARN);
if (!umem_odp->dma_list) {
ret = -ENOMEM;
goto out_pfn_list;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 9082b3fd2b47..063801384b2b 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1774,10 +1774,7 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
ib_srq);
struct bnxt_re_dev *rdev = srq->rdev;
struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
- struct bnxt_qplib_nq *nq = NULL;
- if (qplib_srq->cq)
- nq = qplib_srq->cq->nq;
if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
free_page((unsigned long)srq->uctx_srq_page);
hash_del(&srq->hash_entry);
@@ -1785,8 +1782,6 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
ib_umem_release(srq->umem);
atomic_dec(&rdev->stats.res.srq_count);
- if (nq)
- nq->budget--;
return 0;
}
@@ -1827,7 +1822,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct ib_udata *udata)
{
struct bnxt_qplib_dev_attr *dev_attr;
- struct bnxt_qplib_nq *nq = NULL;
struct bnxt_re_ucontext *uctx;
struct bnxt_re_dev *rdev;
struct bnxt_re_srq *srq;
@@ -1873,7 +1867,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
- nq = &rdev->nqr->nq[0];
if (udata) {
rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
@@ -1908,8 +1901,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
goto fail;
}
}
- if (nq)
- nq->budget++;
active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
if (active_srqs > rdev->stats.res.srq_watermark)
rdev->stats.res.srq_watermark = active_srqs;
@@ -3079,7 +3070,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
ib_umem_release(cq->umem);
atomic_dec(&rdev->stats.res.cq_count);
- nq->budget--;
kfree(cq->cql);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index cf89a8db4f64..8d0b63d4b50a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -763,7 +763,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
if (ret)
return ret;
}
- dma_set_max_seg_size(dev, UINT_MAX);
+ dma_set_max_seg_size(dev, SZ_2G);
ret = ib_register_device(ib_dev, "hns_%d", dev);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 251246c73b33..0ff9f18a71e8 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -3461,7 +3461,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
&UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
const struct uapi_definition mlx5_ib_flow_defs[] = {
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_FLOW_MATCHER),
UAPI_DEF_CHAIN_OBJ_TREE(
@@ -3472,7 +3471,6 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_STEERING_ANCHOR,
UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
-#endif
{},
};
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 4ddcd5860e0f..11eca39b73a9 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -397,7 +397,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
if (!us_ibdev) {
usnic_err("Device %s context alloc failed\n",
netdev_name(pci_get_drvdata(dev)));
- return ERR_PTR(-EFAULT);
+ return NULL;
}
us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
@@ -517,8 +517,8 @@ static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
}
us_ibdev = usnic_ib_device_add(parent_pci);
- if (IS_ERR_OR_NULL(us_ibdev)) {
- us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
+ if (!us_ibdev) {
+ us_ibdev = ERR_PTR(-EFAULT);
goto out;
}
@@ -586,10 +586,10 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
}
pf = usnic_ib_discover_pf(vf->vnic);
- if (IS_ERR_OR_NULL(pf)) {
- usnic_err("Failed to discover pf of vnic %s with err%ld\n",
- pci_name(pdev), PTR_ERR(pf));
- err = pf ? PTR_ERR(pf) : -EFAULT;
+ if (IS_ERR(pf)) {
+ err = PTR_ERR(pf);
+ usnic_err("Failed to discover pf of vnic %s with err%d\n",
+ pci_name(pdev), err);
goto out_clean_vnic;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index feb386d98d1d..0bc3fbb6554f 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -140,6 +140,12 @@ static inline int qp_mtu(struct rxe_qp *qp)
return IB_MTU_4096;
}
+static inline bool is_odp_mr(struct rxe_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_odp;
+}
+
void free_rd_atomic_resource(struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 868d2f0b74e9..432d864c3ce9 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -323,7 +323,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
return err;
}
- if (mr->umem->is_odp)
+ if (is_odp_mr(mr))
return rxe_odp_mr_copy(mr, iova, addr, length, dir);
else
return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
@@ -536,7 +536,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
u64 *va;
/* ODP is not supported right now. WIP. */
- if (mr->umem->is_odp)
+ if (is_odp_mr(mr))
return RESPST_ERR_UNSUPPORTED_OPCODE;
/* See IBA oA19-28 */
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 54ba9ee1acc5..5d9174e408db 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -650,7 +650,7 @@ static enum resp_states process_flush(struct rxe_qp *qp,
struct resp_res *res = qp->resp.res;
/* ODP is not supported right now. WIP. */
- if (mr->umem->is_odp)
+ if (is_odp_mr(mr))
return RESPST_ERR_UNSUPPORTED_OPCODE;
/* oA19-14, oA19-15 */
@@ -706,7 +706,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
if (!res->replay) {
u64 iova = qp->resp.va + qp->resp.offset;
- if (mr->umem->is_odp)
+ if (is_odp_mr(mr))
err = rxe_odp_atomic_op(mr, iova, pkt->opcode,
atmeth_comp(pkt),
atmeth_swap_add(pkt),
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index d525ab43a4ae..dd7d030d2e89 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -487,17 +487,6 @@ static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
/* VCMDQ Resource Helpers */
-static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
-{
- struct arm_smmu_queue *q = &vcmdq->cmdq.q;
- size_t nents = 1 << q->llq.max_n_shift;
- size_t qsz = nents << CMDQ_ENT_SZ_SHIFT;
-
- if (!q->base)
- return;
- dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma);
-}
-
static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
{
struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
@@ -560,7 +549,8 @@ static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
char header[64];
- tegra241_vcmdq_free_smmu_cmdq(vcmdq);
+ /* Note that the lvcmdq queue memory space is managed by devres */
+
tegra241_vintf_deinit_lvcmdq(vintf, lidx);
dev_dbg(vintf->cmdqv->dev,
@@ -768,13 +758,13 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
if (!vintf)
- goto out_fallback;
+ return -ENOMEM;
/* Init VINTF0 for in-kernel use */
ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
if (ret) {
dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
- goto free_vintf;
+ return ret;
}
/* Preallocate logical VCMDQs to VINTF0 */
@@ -783,24 +773,12 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
if (IS_ERR(vcmdq))
- goto free_lvcmdq;
+ return PTR_ERR(vcmdq);
}
/* Now, we are ready to run all the impl ops */
smmu->impl_ops = &tegra241_cmdqv_impl_ops;
return 0;
-
-free_lvcmdq:
- for (lidx--; lidx >= 0; lidx--)
- tegra241_vintf_free_lvcmdq(vintf, lidx);
- tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
-free_vintf:
- kfree(vintf);
-out_fallback:
- dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
- smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
- tegra241_cmdqv_remove(smmu);
- return 0;
}
#ifdef CONFIG_IOMMU_DEBUGFS
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index cb7e29dcac15..a775e4dbe06f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1754,7 +1754,7 @@ static size_t cookie_msi_granule(const struct iommu_domain *domain)
return PAGE_SIZE;
default:
BUG();
- };
+ }
}
static struct list_head *cookie_msi_pages(const struct iommu_domain *domain)
@@ -1766,7 +1766,7 @@ static struct list_head *cookie_msi_pages(const struct iommu_domain *domain)
return &domain->msi_cookie->msi_page_list;
default:
BUG();
- };
+ }
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 69e23e017d9e..317266aca6e2 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -832,7 +832,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
- if (&data->domain->domain != &exynos_identity_domain) {
+ if (data->domain) {
dev_dbg(data->sysmmu, "saving state\n");
__sysmmu_disable(data);
}
@@ -850,7 +850,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
- if (&data->domain->domain != &exynos_identity_domain) {
+ if (data->domain) {
dev_dbg(data->sysmmu, "restoring state\n");
__sysmmu_enable(data);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 6e67cc66a204..b29da2d96d0b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3835,7 +3835,6 @@ static void intel_iommu_release_device(struct device *dev)
intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info);
kfree(info);
- set_dma_ops(dev, NULL);
}
static void intel_iommu_get_resv_regions(struct device *device,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index ea3ca5203919..3bc2a03cceca 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1287,43 +1287,44 @@ static struct irq_chip intel_ir_chip = {
};
/*
- * With posted MSIs, all vectors are multiplexed into a single notification
- * vector. Devices MSIs are then dispatched in a demux loop where
- * EOIs can be coalesced as well.
+ * With posted MSIs, the MSI vectors are multiplexed into a single notification
+ * vector, and only the notification vector is sent to the APIC IRR. Device
+ * MSIs are then dispatched in a demux loop that harvests the MSIs from the
+ * CPU's Posted Interrupt Request bitmap. I.e. Posted MSIs never get sent to
+ * the APIC IRR, and thus do not need an EOI. The notification handler instead
+ * performs a single EOI after processing the PIR.
*
- * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack()
- * function. Instead EOI is performed by the posted interrupt notification
- * handler.
+ * Note! Pending SMP/CPU affinity changes, which are per MSI, must still be
+ * honored, only the APIC EOI is omitted.
*
* For the example below, 3 MSIs are coalesced into one CPU notification. Only
- * one apic_eoi() is needed.
+ * one apic_eoi() is needed, but each MSI needs to process pending changes to
+ * its CPU affinity.
*
* __sysvec_posted_msi_notification()
* irq_enter();
* handle_edge_irq()
* irq_chip_ack_parent()
- * dummy(); // No EOI
+ * irq_move_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* handle_edge_irq()
* irq_chip_ack_parent()
- * dummy(); // No EOI
+ * irq_move_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* handle_edge_irq()
* irq_chip_ack_parent()
- * dummy(); // No EOI
+ * irq_move_irq(); // No EOI
* handle_irq_event()
* driver_handler()
* apic_eoi()
* irq_exit()
+ *
*/
-
-static void dummy_ack(struct irq_data *d) { }
-
static struct irq_chip intel_ir_chip_post_msi = {
.name = "INTEL-IR-POST",
- .irq_ack = dummy_ack,
+ .irq_ack = irq_move_irq,
.irq_set_affinity = intel_ir_set_affinity,
.irq_compose_msi_msg = intel_ir_compose_msi_msg,
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c8033ca66377..4f91a740c15f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -538,6 +538,9 @@ static void iommu_deinit_device(struct device *dev)
dev->iommu_group = NULL;
module_put(ops->owner);
dev_iommu_free(dev);
+#ifdef CONFIG_IOMMU_DMA
+ dev->dma_iommu = false;
+#endif
}
static struct iommu_domain *pasid_array_entry_to_domain(void *entry)
@@ -2717,7 +2720,8 @@ int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
* if upper layers showed interest and installed a fault handler,
* invoke it.
*/
- if (domain->handler)
+ if (domain->cookie_type == IOMMU_COOKIE_FAULT_HANDLER &&
+ domain->handler)
ret = domain->handler(domain, dev, iova, flags,
domain->handler_token);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 074daf1aac4e..e424b279a8cd 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1081,31 +1081,24 @@ static int ipmmu_probe(struct platform_device *pdev)
}
}
+ platform_set_drvdata(pdev, mmu);
/*
* Register the IPMMU to the IOMMU subsystem in the following cases:
* - R-Car Gen2 IPMMU (all devices registered)
* - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
*/
- if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
- ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
- dev_name(&pdev->dev));
- if (ret)
- return ret;
-
- ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
- if (ret)
- return ret;
- }
+ if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu))
+ return 0;
- /*
- * We can't create the ARM mapping here as it requires the bus to have
- * an IOMMU, which only happens when bus_set_iommu() is called in
- * ipmmu_init() after the probe function returns.
- */
+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev));
+ if (ret)
+ return ret;
- platform_set_drvdata(pdev, mmu);
+ ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
+ if (ret)
+ iommu_device_sysfs_remove(&mmu->iommu);
- return 0;
+ return ret;
}
static void ipmmu_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 034b0e670384..df98d0c65f54 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1372,15 +1372,6 @@ static int mtk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
mutex_init(&data->mutex);
- ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
- "mtk-iommu.%pa", &ioaddr);
- if (ret)
- goto out_link_remove;
-
- ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
- if (ret)
- goto out_sysfs_remove;
-
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) {
list_add_tail(&data->list, data->plat_data->hw_list);
data->hw_list = data->plat_data->hw_list;
@@ -1390,19 +1381,28 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->hw_list = &data->hw_list_head;
}
+ ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
+ "mtk-iommu.%pa", &ioaddr);
+ if (ret)
+ goto out_list_del;
+
+ ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
+ if (ret)
+ goto out_sysfs_remove;
+
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
if (ret)
- goto out_list_del;
+ goto out_device_unregister;
}
return ret;
-out_list_del:
- list_del(&data->list);
+out_device_unregister:
iommu_device_unregister(&data->iommu);
out_sysfs_remove:
iommu_device_sysfs_remove(&data->iommu);
-out_link_remove:
+out_list_del:
+ list_del(&data->list);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
device_link_remove(data->smicomm_dev, dev);
out_runtime_disable:
diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c
index 49a19db2d1e1..4cce24233f0f 100644
--- a/drivers/irqchip/irq-bcm2712-mip.c
+++ b/drivers/irqchip/irq-bcm2712-mip.c
@@ -163,6 +163,7 @@ static const struct irq_domain_ops mip_middle_domain_ops = {
static const struct msi_parent_ops mip_msi_parent_ops = {
.supported_flags = MIP_MSI_FLAGS_SUPPORTED,
.required_flags = MIP_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PCI_MSI,
.prefix = "MIP-MSI-",
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
index ee682e87eb8b..375b55aa0acd 100644
--- a/drivers/irqchip/irq-sg2042-msi.c
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -151,6 +151,7 @@ static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
static const struct msi_parent_ops sg2042_msi_parent_ops = {
.required_flags = SG2042_MSI_FLAGS_REQUIRED,
.supported_flags = SG2042_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PCI_MSI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.prefix = "SG2042-",
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bdd36409dd9b..665b9af684fc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -850,8 +850,9 @@ static int bond_check_dev_link(struct bonding *bond,
struct net_device *slave_dev, int reporting)
{
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- struct ifreq ifr;
struct mii_ioctl_data *mii;
+ struct ifreq ifr;
+ int ret;
if (!reporting && !netif_running(slave_dev))
return 0;
@@ -860,9 +861,13 @@ static int bond_check_dev_link(struct bonding *bond,
return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
/* Try to get link status using Ethtool first. */
- if (slave_dev->ethtool_ops->get_link)
- return slave_dev->ethtool_ops->get_link(slave_dev) ?
- BMSR_LSTATUS : 0;
+ if (slave_dev->ethtool_ops->get_link) {
+ netdev_lock_ops(slave_dev);
+ ret = slave_dev->ethtool_ops->get_link(slave_dev);
+ netdev_unlock_ops(slave_dev);
+
+ return ret ? BMSR_LSTATUS : 0;
+ }
/* Ethtool can't be used, fallback to MII ioctls. */
if (slave_ops->ndo_eth_ioctl) {
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index 46201c126703..7107a37da36c 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -902,15 +902,16 @@ static int rkcanfd_probe(struct platform_device *pdev)
priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
- if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
priv->can.do_set_mode = rkcanfd_set_mode;
priv->can.do_get_berr_counter = rkcanfd_get_berr_counter;
priv->ndev = ndev;
match = device_get_match_data(&pdev->dev);
- if (match)
+ if (match) {
priv->devtype_data = *(struct rkcanfd_devtype_data *)match;
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ }
err = can_rx_offload_add_manual(ndev, &priv->offload,
RKCANFD_NAPI_WEIGHT);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 61d164ffb3ae..e5ba71897906 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -737,6 +737,15 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
+static void b53_enable_stp(struct b53_device *dev)
+{
+ u8 gc;
+
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+ gc |= GC_RX_BPDU_EN;
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+}
+
static u16 b53_default_pvid(struct b53_device *dev)
{
if (is5325(dev) || is5365(dev))
@@ -876,6 +885,7 @@ static int b53_switch_reset(struct b53_device *dev)
}
b53_enable_mib(dev);
+ b53_enable_stp(dev);
return b53_flush_arl(dev, FAST_AGE_STATIC);
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 29a89ab4b789..08db846cda8d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1852,6 +1852,8 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
if (!chip->info->ops->vtu_getnext)
return -EOPNOTSUPP;
+ memset(entry, 0, sizeof(*entry));
+
entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip);
entry->valid = false;
@@ -1960,7 +1962,16 @@ static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
struct mv88e6xxx_mst *mst, *tmp;
int err;
- if (!sid)
+ /* If the SID is zero, it is for a VLAN mapped to the default MSTI,
+ * and mv88e6xxx_stu_setup() made sure it is always present, and thus,
+ * should not be removed here.
+ *
+ * If the chip lacks STU support, numerically the "sid" variable will
+ * happen to also be zero, but we don't want to rely on that fact, so
+ * we explicitly test that first. In that case, there is also nothing
+ * to do here.
+ */
+ if (!mv88e6xxx_has_stu(chip) || !sid)
return 0;
list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 795c8df7b6a7..195460a0a0d4 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -736,7 +736,8 @@ void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
- dsa_devlink_region_destroy(chip->regions[i]);
+ if (chip->regions[i])
+ dsa_devlink_region_destroy(chip->regions[i]);
}
void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index ac37a4e738ae..04c5e3abd8d7 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -154,8 +154,9 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector);
- intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset),
- GFP_KERNEL);
+ intr_ctrl_regset = devm_kzalloc(pdsc->dev,
+ sizeof(*intr_ctrl_regset),
+ GFP_KERNEL);
if (!intr_ctrl_regset)
return;
intr_ctrl_regset->regs = intr_ctrl_regs;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 83608b7447f4..3d9205b9a8c3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -787,7 +787,7 @@ tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
if (BNXT_TX_PTP_IS_SET(lflags)) {
- txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
/* set SKB to err so PTP worker will clean up */
@@ -795,7 +795,7 @@ tx_kick_pending:
}
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
- txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 7f3f5afa864f..1546c3db08f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -2270,6 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
+ kvfree(eth_filter->port[i].loc_array);
goto free_eth_finfo;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index f8cdab62bf85..7725cb0c5c8a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -108,14 +108,16 @@ struct hbg_irq_info {
bool re_enable;
bool need_print;
bool need_reset;
- u64 count;
- void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info);
+ void (*irq_handle)(struct hbg_priv *priv,
+ const struct hbg_irq_info *info);
};
struct hbg_vector {
char name[HBG_VECTOR_NUM][32];
- struct hbg_irq_info *info_array;
+
+ u64 *stats_array;
+ const struct hbg_irq_info *info_array;
u32 info_array_len;
};
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
index 5e0ba4d5b08d..01ad82d2f5cc 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -61,7 +61,7 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
{
struct net_device *netdev = dev_get_drvdata(s->private);
struct hbg_priv *priv = netdev_priv(netdev);
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
u32 i;
for (i = 0; i < priv->vectors.info_array_len; i++) {
@@ -73,7 +73,7 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
info->mask)),
str_true_false(info->need_reset),
str_true_false(info->need_print),
- info->count);
+ priv->vectors.stats_array[i]);
}
return 0;
@@ -106,6 +106,7 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
{
struct net_device *netdev = dev_get_drvdata(s->private);
struct hbg_priv *priv = netdev_priv(netdev);
+ bool np_link_fail;
seq_printf(s, "event handling state: %s\n",
state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING));
@@ -117,8 +118,10 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
reset_type_str[priv->reset_type]);
seq_printf(s, "need reset state: %s\n",
state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET));
- seq_printf(s, "np_link fail state: %s\n",
- state_str_true_false(priv, HBG_NIC_STATE_NP_LINK_FAIL));
+
+ np_link_fail = !hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+ seq_printf(s, "np_link fail state: %s\n", str_true_false(np_link_fail));
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
index d61c03f34ff0..f23fb5920c3c 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -234,7 +234,7 @@ static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
for (i = 0; i < vectors->info_array_len; i++)
if (vectors->info_array[i].mask == mask)
- return vectors->info_array[i].count;
+ return vectors->stats_array[i];
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 4e8cb66f601c..a0bcfb5a713d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -26,12 +26,15 @@ static void hbg_restore_mac_table(struct hbg_priv *priv)
static void hbg_restore_user_def_settings(struct hbg_priv *priv)
{
+ /* The index of host mac is always 0. */
+ u64 rx_pause_addr = ether_addr_to_u64(priv->filter.mac_table[0].addr);
struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
hbg_restore_mac_table(priv);
hbg_hw_set_mtu(priv, priv->netdev->mtu);
hbg_hw_set_pause_enable(priv, pause_param->tx_pause,
pause_param->rx_pause);
+ hbg_hw_set_rx_pause_mac_addr(priv, rx_pause_addr);
}
int hbg_rebuild(struct hbg_priv *priv)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 74a18033b444..9b65eef62b3f 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -234,6 +234,10 @@ void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
{
hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
+
+ /* only uc filter is supported, so set all bits of mc mask reg to 1 */
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_0, U64_MAX);
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_1, U64_MAX);
}
void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
@@ -242,6 +246,9 @@ void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
+
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B, rx_en);
}
void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index e79e9ab3e530..8af0bc4cca21 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -6,7 +6,7 @@
#include "hbg_hw.h"
static void hbg_irq_handle_err(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
if (irq_info->need_print)
dev_err(&priv->pdev->dev,
@@ -17,30 +17,30 @@ static void hbg_irq_handle_err(struct hbg_priv *priv,
}
static void hbg_irq_handle_tx(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
napi_schedule(&priv->tx_ring.napi);
}
static void hbg_irq_handle_rx(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
napi_schedule(&priv->rx_ring.napi);
}
static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
priv->stats.rx_fifo_less_empty_thrsld_cnt++;
}
#define HBG_IRQ_I(name, handle) \
- {#name, HBG_INT_MSK_##name##_B, false, false, false, 0, handle}
+ {#name, HBG_INT_MSK_##name##_B, false, false, false, handle}
#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \
{#name, HBG_INT_MSK_##name##_B, true, need_print, \
- ndde_reset, 0, hbg_irq_handle_err}
+ ndde_reset, hbg_irq_handle_err}
-static struct hbg_irq_info hbg_irqs[] = {
+static const struct hbg_irq_info hbg_irqs[] = {
HBG_IRQ_I(RX, hbg_irq_handle_rx),
HBG_IRQ_I(TX, hbg_irq_handle_tx),
HBG_ERR_IRQ_I(TX_PKT_CPL, true, true),
@@ -64,7 +64,7 @@ static struct hbg_irq_info hbg_irqs[] = {
static irqreturn_t hbg_irq_handle(int irq_num, void *p)
{
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
struct hbg_priv *priv = p;
u32 status;
u32 i;
@@ -79,7 +79,7 @@ static irqreturn_t hbg_irq_handle(int irq_num, void *p)
hbg_hw_irq_enable(priv, info->mask, false);
hbg_hw_irq_clear(priv, info->mask);
- info->count++;
+ priv->vectors.stats_array[i]++;
if (info->irq_handle)
info->irq_handle(priv, info);
@@ -132,6 +132,12 @@ int hbg_irq_init(struct hbg_priv *priv)
irq_names_map[i]);
}
+ vectors->stats_array = devm_kcalloc(&priv->pdev->dev,
+ ARRAY_SIZE(hbg_irqs),
+ sizeof(u64), GFP_KERNEL);
+ if (!vectors->stats_array)
+ return -ENOMEM;
+
vectors->info_array = hbg_irqs;
vectors->info_array_len = ARRAY_SIZE(hbg_irqs);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 2ac5454338e4..2e64dc1ab355 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -21,7 +21,7 @@
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
u32 i;
for (i = 0; i < priv->vectors.info_array_len; i++) {
@@ -203,12 +203,12 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
return -EBUSY;
- hbg_hw_set_mtu(priv, new_mtu);
- WRITE_ONCE(netdev->mtu, new_mtu);
-
dev_dbg(&priv->pdev->dev,
"change mtu from %u to %u\n", netdev->mtu, new_mtu);
+ hbg_hw_set_mtu(priv, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index f29a937ad087..42b0083c9193 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -2,6 +2,7 @@
// Copyright (c) 2024 Hisilicon Limited.
#include <linux/phy.h>
+#include <linux/rtnetlink.h>
#include "hbg_common.h"
#include "hbg_hw.h"
#include "hbg_mdio.h"
@@ -133,12 +134,17 @@ void hbg_fix_np_link_fail(struct hbg_priv *priv)
{
struct device *dev = &priv->pdev->dev;
+ rtnl_lock();
+
if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) {
dev_err(dev, "failed to fix the MAC link status\n");
priv->stats.np_link_fail_cnt = 0;
- return;
+ goto unlock;
}
+ if (!priv->mac.phydev->link)
+ goto unlock;
+
priv->stats.np_link_fail_cnt++;
dev_err(dev, "failed to link between MAC and PHY, try to fix...\n");
@@ -147,6 +153,9 @@ void hbg_fix_np_link_fail(struct hbg_priv *priv)
*/
hbg_phy_stop(priv);
hbg_phy_start(priv);
+
+unlock:
+ rtnl_unlock();
}
static void hbg_phy_adjust_link(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index cc2cc612770d..a6e7f5e62b48 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -68,6 +68,7 @@
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B BIT(4)
#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080)
#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084)
#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088)
@@ -134,6 +135,8 @@
#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224)
#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228)
#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C)
+#define HBG_REG_STATION_ADDR_LOW_MSK_0 (HBG_REG_SGMII_BASE + 0x0230)
+#define HBG_REG_STATION_ADDR_LOW_MSK_1 (HBG_REG_SGMII_BASE + 0x0238)
/* PCU */
#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index c35cc5cb1185..2f265c0959c7 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -319,6 +319,7 @@ struct igc_adapter {
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
struct system_time_snapshot snapshot;
+ struct mutex ptm_lock; /* Only allow one PTM transaction at a time */
char fw_version[32];
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 8e449904aa7d..d19325b0e6e0 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -574,7 +574,10 @@
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2)
#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
-#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */
+/* A short cycle time of 1us theoretically should work, but appears to be too
+ * short in practice.
+ */
+#define IGC_PTM_SHORT_CYC_DEFAULT 4 /* Default short cycle interval */
#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */
#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */
@@ -593,6 +596,7 @@
#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */
#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */
#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */
+#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */
/* PCIe PTM Cycle Control */
#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 30c72c343262..ddfa654db1e0 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7234,6 +7234,7 @@ static int igc_probe(struct pci_dev *pdev,
err_register:
igc_release_hw_control(adapter);
+ igc_ptp_stop(adapter);
err_eeprom:
if (!igc_check_reset_block(hw))
igc_reset_phy(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index b6c60b3d0e3a..cfb589f9da56 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -963,45 +963,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat)
}
}
+/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */
+static void igc_ptm_trigger(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ /* To "manually" start the PTM cycle we need to set the
+ * trigger (TRIG) bit
+ */
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl |= IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ /* Perform flush after write to CTRL register otherwise
+ * transaction may not start
+ */
+ wrfl();
+}
+
+/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */
+static void igc_ptm_reset(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ /* Write to clear all status */
+ wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL);
+}
+
static int igc_phc_get_syncdevicetime(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
{
- u32 stat, t2_curr_h, t2_curr_l, ctrl;
struct igc_adapter *adapter = ctx;
struct igc_hw *hw = &adapter->hw;
+ u32 stat, t2_curr_h, t2_curr_l;
int err, count = 100;
ktime_t t1, t2_curr;
- /* Get a snapshot of system clocks to use as historic value. */
- ktime_get_snapshot(&adapter->snapshot);
-
+ /* Doing this in a loop because in the event of a
+ * badly timed (ha!) system clock adjustment, we may
+ * get PTM errors from the PCI root, but these errors
+ * are transitory. Repeating the process returns valid
+ * data eventually.
+ */
do {
- /* Doing this in a loop because in the event of a
- * badly timed (ha!) system clock adjustment, we may
- * get PTM errors from the PCI root, but these errors
- * are transitory. Repeating the process returns valid
- * data eventually.
- */
+ /* Get a snapshot of system clocks to use as historic value. */
+ ktime_get_snapshot(&adapter->snapshot);
- /* To "manually" start the PTM cycle we need to clear and
- * then set again the TRIG bit.
- */
- ctrl = rd32(IGC_PTM_CTRL);
- ctrl &= ~IGC_PTM_CTRL_TRIG;
- wr32(IGC_PTM_CTRL, ctrl);
- ctrl |= IGC_PTM_CTRL_TRIG;
- wr32(IGC_PTM_CTRL, ctrl);
-
- /* The cycle only starts "for real" when software notifies
- * that it has read the registers, this is done by setting
- * VALID bit.
- */
- wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+ igc_ptm_trigger(hw);
err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat,
stat, IGC_PTM_STAT_SLEEP,
IGC_PTM_STAT_TIMEOUT);
+ igc_ptm_reset(hw);
+
if (err < 0) {
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
return err;
@@ -1010,15 +1027,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device,
if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID)
break;
- if (stat & ~IGC_PTM_STAT_VALID) {
- /* An error occurred, log it. */
- igc_ptm_log_error(adapter, stat);
- /* The STAT register is write-1-to-clear (W1C),
- * so write the previous error status to clear it.
- */
- wr32(IGC_PTM_STAT, stat);
- continue;
- }
+ igc_ptm_log_error(adapter, stat);
} while (--count);
if (!count) {
@@ -1050,9 +1059,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
{
struct igc_adapter *adapter = container_of(ptp, struct igc_adapter,
ptp_caps);
+ int ret;
+
+ /* This blocks until any in progress PTM transactions complete */
+ mutex_lock(&adapter->ptm_lock);
- return get_device_system_crosststamp(igc_phc_get_syncdevicetime,
- adapter, &adapter->snapshot, cts);
+ ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime,
+ adapter, &adapter->snapshot, cts);
+ mutex_unlock(&adapter->ptm_lock);
+
+ return ret;
}
static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
@@ -1154,6 +1170,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
spin_lock_init(&adapter->ptp_tx_lock);
spin_lock_init(&adapter->free_timer_lock);
spin_lock_init(&adapter->tmreg_lock);
+ mutex_init(&adapter->ptm_lock);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
@@ -1166,6 +1183,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
netdev_err(netdev, "ptp_clock_register failed\n");
+ mutex_destroy(&adapter->ptm_lock);
} else if (adapter->ptp_clock) {
netdev_info(netdev, "PHC added\n");
adapter->ptp_flags |= IGC_PTP_ENABLED;
@@ -1195,10 +1213,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter)
struct igc_hw *hw = &adapter->hw;
u32 ctrl;
+ mutex_lock(&adapter->ptm_lock);
ctrl = rd32(IGC_PTM_CTRL);
ctrl &= ~IGC_PTM_CTRL_EN;
wr32(IGC_PTM_CTRL, ctrl);
+ mutex_unlock(&adapter->ptm_lock);
}
/**
@@ -1229,13 +1249,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
**/
void igc_ptp_stop(struct igc_adapter *adapter)
{
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
igc_ptp_suspend(adapter);
+ adapter->ptp_flags &= ~IGC_PTP_ENABLED;
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
netdev_info(adapter->netdev, "PHC removed\n");
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
}
+ mutex_destroy(&adapter->ptm_lock);
}
/**
@@ -1247,10 +1272,13 @@ void igc_ptp_stop(struct igc_adapter *adapter)
void igc_ptp_reset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
- u32 cycle_ctrl, ctrl;
+ u32 cycle_ctrl, ctrl, stat;
unsigned long flags;
u32 timadj;
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
@@ -1272,6 +1300,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
if (!igc_is_crosststamp_supported(adapter))
break;
+ mutex_lock(&adapter->ptm_lock);
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
@@ -1282,14 +1311,20 @@ void igc_ptp_reset(struct igc_adapter *adapter)
ctrl = IGC_PTM_CTRL_EN |
IGC_PTM_CTRL_START_NOW |
IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) |
- IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) |
- IGC_PTM_CTRL_TRIG;
+ IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT);
wr32(IGC_PTM_CTRL, ctrl);
/* Force the first cycle to run. */
- wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+ igc_ptm_trigger(hw);
+
+ if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat,
+ stat, IGC_PTM_STAT_SLEEP,
+ IGC_PTM_STAT_TIMEOUT))
+ netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
+ igc_ptm_reset(hw);
+ mutex_unlock(&adapter->ptm_lock);
break;
default:
/* No work to do. */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 04e08e06f30f..7153a71dfc86 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -67,6 +67,8 @@ static int rvu_rep_mcam_flow_init(struct rep_dev *rep)
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&priv->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ goto exit;
for (ent = 0; ent < rsp->count; ent++)
rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 43197b28b3e7..bdb98c9d8b1c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -734,7 +734,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
@@ -757,13 +757,13 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
val |= MTK_QTX_SCH_MAX_RATE_EN |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
break;
default:
@@ -871,9 +871,25 @@ static const struct phylink_mac_ops mtk_phylink_ops = {
.mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
};
+static void mtk_mdio_config(struct mtk_eth *eth)
+{
+ u32 val;
+
+ /* Configure MDC Divider */
+ val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
+
+ /* Configure MDC Turbo Mode */
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
+ else
+ val |= PPSC_MDC_TURBO;
+
+ mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
+}
+
static int mtk_mdio_init(struct mtk_eth *eth)
{
- unsigned int max_clk = 2500000, divider;
+ unsigned int max_clk = 2500000;
struct device_node *mii_np;
int ret;
u32 val;
@@ -908,20 +924,9 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
max_clk = val;
}
- divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
-
- /* Configure MDC Turbo Mode */
- if (mtk_is_netsys_v3_or_greater(eth))
- mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
-
- /* Configure MDC Divider */
- val = FIELD_PREP(PPSC_MDC_CFG, divider);
- if (!mtk_is_netsys_v3_or_greater(eth))
- val |= PPSC_MDC_TURBO;
- mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
-
- dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
-
+ eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+ mtk_mdio_config(eth);
+ dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
@@ -3315,7 +3320,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
if (mtk_is_netsys_v2_or_greater(eth))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
- MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
+ MTK_CHK_DDONE_EN;
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
@@ -3974,6 +3979,10 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
+ /* No MT7628/88 support yet */
+ if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ mtk_mdio_config(eth);
+
if (mtk_is_netsys_v3_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 90a377ab4359..39709649ea8d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -1271,6 +1271,7 @@ struct mtk_eth {
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
+ unsigned int mdc_divider;
struct work_struct pending_work;
unsigned long state;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index aedef818800c..988ce9119306 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2666,7 +2666,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- port->slave.port_np = port_np;
+ port->slave.port_np = of_node_get(port_np);
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -2722,6 +2722,17 @@ static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
}
}
+static void am65_cpsw_remove_dt(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ of_node_put(port->slave.port_np);
+ }
+}
+
static int
am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
@@ -3624,6 +3635,7 @@ err_ndevs_clear:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
+ am65_cpsw_remove_dt(common);
err_of_clear:
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
@@ -3663,6 +3675,7 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
+ am65_cpsw_remove_dt(common);
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index b4a34c57b7b4..2a1c43316f46 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -412,6 +412,22 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
int ret;
u64 cmp;
+ if (!on) {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear CMP regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+
+ /* Disable sync */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+
+ return 0;
+ }
+
/* Calculate width of the signal for PPS/PEROUT handling */
ts.tv_sec = req->on.sec;
ts.tv_nsec = req->on.nsec;
@@ -430,64 +446,39 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
if (ret)
return ret;
- if (on) {
- /* Configure CMP */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
- /* Configure SYNC, based on req on width */
- regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
- div_u64(ns_width, iep->def_inc));
- regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
- div_u64(ns_start, iep->def_inc));
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
- /* Enable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
- } else {
- /* Disable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), 0);
-
- /* clear regs */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
- }
+ /* Configure CMP */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+ /* Configure SYNC, based on req on width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
} else {
- if (on) {
- u64 start_ns;
-
- iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
- req->period.nsec;
- start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
- + req->period.nsec;
- icss_iep_update_to_next_boundary(iep, start_ns);
-
- regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
- div_u64(ns_width, iep->def_inc));
- regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
- div_u64(ns_start, iep->def_inc));
- /* Enable Sync in single shot mode */
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
- IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
- /* Enable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
- } else {
- /* Disable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), 0);
-
- /* clear CMP regs */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
-
- /* Disable sync */
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
- }
+ u64 start_ns;
+
+ iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+ req->period.nsec;
+ start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+ + req->period.nsec;
+ icss_iep_update_to_next_boundary(iep, start_ns);
+
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
+ /* Enable Sync in single shot mode */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
}
return 0;
@@ -498,11 +489,21 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
{
int ret = 0;
+ if (!on)
+ goto disable;
+
/* Reject requests with unsupported flags */
if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
PTP_PEROUT_PHASE))
return -EOPNOTSUPP;
+ /* Set default "on" time (1ms) for the signal if not passed by the app */
+ if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
+ req->on.sec = 0;
+ req->on.nsec = NSEC_PER_MSEC;
+ }
+
+disable:
mutex_lock(&iep->ptp_clk_mutex);
if (iep->pps_enabled) {
@@ -513,12 +514,6 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
if (iep->perout_enabled == !!on)
goto exit;
- /* Set default "on" time (1ms) for the signal if not passed by the app */
- if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
- req->on.sec = 0;
- req->on.nsec = NSEC_PER_MSEC;
- }
-
ret = icss_iep_perout_enable_hw(iep, req, on);
if (!ret)
iep->perout_enabled = !!on;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 14002b026452..b4be76e13a2f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -583,7 +583,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (!first_desc) {
netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
- goto drop_free_descs; /* drop */
+ return ICSSG_XDP_CONSUMED; /* drop */
}
if (page) { /* already DMA mapped by page_pool */
@@ -671,8 +671,10 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
q_idx = smp_processor_id() % emac->tx_ch_num;
result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
- if (result == ICSSG_XDP_CONSUMED)
+ if (result == ICSSG_XDP_CONSUMED) {
+ ndev->stats.tx_dropped++;
goto drop;
+ }
dev_sw_netstats_rx_add(ndev, xdpf->len);
return result;
@@ -1215,9 +1217,6 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
prueth_rx_cleanup);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
-
- page_pool_destroy(chn->pg_pool);
- chn->pg_pool = NULL;
}
EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index fd102078f5c9..9c18203c8ce1 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -698,7 +698,7 @@ static int ngbe_probe(struct pci_dev *pdev,
/* setup the private structure */
err = ngbe_sw_init(wx);
if (err)
- goto err_free_mac_table;
+ goto err_pci_release_regions;
/* check if flash load is done after hw power up */
err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST);
@@ -792,6 +792,7 @@ err_register:
err_clear_interrupt_scheme:
wx_clear_interrupt_scheme(wx);
err_free_mac_table:
+ kfree(wx->rss_key);
kfree(wx->mac_table);
err_pci_release_regions:
pci_release_selected_regions(pdev,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index db5166a6db2c..d40db6b42200 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -636,7 +636,7 @@ static int txgbe_probe(struct pci_dev *pdev,
/* setup the private structure */
err = txgbe_sw_init(wx);
if (err)
- goto err_free_mac_table;
+ goto err_pci_release_regions;
/* check if flash load is done after hw power up */
err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST);
@@ -794,6 +794,7 @@ err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
err_free_mac_table:
+ kfree(wx->rss_key);
kfree(wx->mac_table);
err_pci_release_regions:
pci_release_selected_regions(pdev,
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 4f01189b7c4b..6842c2b02b39 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2552,7 +2552,7 @@ static void at76_disconnect(struct usb_interface *interface)
wiphy_info(priv->hw->wiphy, "disconnecting\n");
at76_delete_device(priv);
- usb_put_dev(priv->udev);
+ usb_put_dev(interface_to_usbdev(interface));
dev_info(&interface->dev, "disconnected\n");
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index cfcf01eb0daa..f26e4679e4ff 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -561,8 +561,10 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
if (!found) {
/* No platform data for this device, try OF and DMI data */
brcmf_dmi_probe(settings, chip, chiprev);
- if (brcmf_of_probe(dev, bus_type, settings) == -EPROBE_DEFER)
+ if (brcmf_of_probe(dev, bus_type, settings) == -EPROBE_DEFER) {
+ kfree(settings);
return ERR_PTR(-EPROBE_DEFER);
+ }
brcmf_acpi_probe(dev, bus_type, settings);
}
return settings;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index 5a7207accd86..2c6e8ecd93b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -1895,7 +1895,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
int link_id;
int ret;
bool fw_err = false;
- bool keep_connection;
lockdep_assert_wiphy(mld->wiphy);
@@ -1965,7 +1964,7 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
iwl_mld_process_netdetect_res(mld, bss_vif, &resume_data);
mld->netdetect = false;
} else {
- keep_connection =
+ bool keep_connection =
iwl_mld_process_wowlan_status(mld, bss_vif,
resume_data.wowlan_status);
@@ -1973,11 +1972,10 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
if (keep_connection)
iwl_mld_unblock_emlsr(mld, bss_vif,
IWL_MLD_EMLSR_BLOCKED_WOWLAN);
+ else
+ ieee80211_resume_disconnect(bss_vif);
}
- if (!mld->netdetect && !keep_connection)
- ieee80211_resume_disconnect(bss_vif);
-
goto out;
err:
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index 453ce2ba39d1..89d95e9b4f30 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -396,8 +396,8 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct iwl_mld *mld, char *buf,
.data[0] = &cmd,
};
struct iwl_dhc_tas_status_resp *resp = NULL;
+ u32 resp_len = 0;
ssize_t pos = 0;
- u32 resp_len;
u32 status;
int ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
index d1d56b081bf6..ec14d0736cee 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -166,7 +166,7 @@ struct iwl_mld_vif {
struct iwl_mld_emlsr emlsr;
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
struct iwl_mld_wowlan_data wowlan_data;
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index 6851064b82da..99e13cfd1e5f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -475,8 +475,8 @@ static
int iwl_mld_mac80211_start(struct ieee80211_hw *hw)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
- int ret;
bool in_d3 = false;
+ int ret = 0;
lockdep_assert_wiphy(mld->wiphy);
@@ -537,7 +537,8 @@ void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
/* if the suspend flow fails the fw is in error. Stop it here, and it
* will be started upon wakeup
*/
- if (!suspend || iwl_mld_no_wowlan_suspend(mld))
+ if (!suspend ||
+ (IS_ENABLED(CONFIG_PM_SLEEP) && iwl_mld_no_wowlan_suspend(mld)))
iwl_mld_stop_fw(mld);
/* HW is stopped, no more coming RX. OTOH, the worker can't run as the
@@ -1943,6 +1944,7 @@ static void iwl_mld_sta_rc_update(struct ieee80211_hw *hw,
}
}
+#ifdef CONFIG_PM_SLEEP
static void iwl_mld_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
@@ -1994,6 +1996,7 @@ static int iwl_mld_resume(struct ieee80211_hw *hw)
return 0;
}
+#endif
static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld,
struct iwl_mld_sta *mld_sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 3ece34e30d58..472f26f83ba8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -147,8 +147,14 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
return;
if (trans->state >= IWL_TRANS_FW_STARTED &&
- trans_pcie->fw_reset_handshake)
+ trans_pcie->fw_reset_handshake) {
+ /*
+ * Reset handshake can dump firmware on timeout, but that
+ * should assume that the firmware is already dead.
+ */
+ trans->state = IWL_TRANS_NO_FW;
iwl_trans_pcie_fw_reset_handshake(trans);
+ }
trans_pcie->is_down = true;
diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 474b603c121c..adb4840b0489 100644
--- a/drivers/net/wireless/ti/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
@@ -342,8 +342,10 @@ void wl1251_tx_work(struct work_struct *work)
while ((skb = skb_dequeue(&wl->tx_queue))) {
if (!woken_up) {
ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
+ if (ret < 0) {
+ skb_queue_head(&wl->tx_queue, skb);
goto out;
+ }
woken_up = true;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cc23035148b4..b502ac07483b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4295,6 +4295,15 @@ static void nvme_scan_work(struct work_struct *work)
nvme_scan_ns_sequential(ctrl);
}
mutex_unlock(&ctrl->scan_lock);
+
+ /* Requeue if we have missed AENs */
+ if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
+ nvme_queue_scan(ctrl);
+#ifdef CONFIG_NVME_MULTIPATH
+ else
+ /* Re-read the ANA log page to not miss updates */
+ queue_work(nvme_wq, &ctrl->ana_work);
+#endif
}
/*
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 89be5911b25d..05eccd96d34a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -427,7 +427,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
- return NULL;
+ return false;
list_for_each_entry_srcu(ns, &head->list, siblings,
srcu_read_lock_held(&head->srcu)) {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 26c459f0198d..72d260201d8c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1803,6 +1803,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
ret = PTR_ERR(sock_file);
goto err_destroy_mutex;
}
+
+ sk_net_refcnt_upgrade(queue->sock->sk);
nvme_tcp_reclassify_socket(queue->sock);
/* Single syn retry */
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 7318b736d414..7b50130f10f6 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -995,16 +995,6 @@ nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
return kref_get_unless_zero(&hostport->ref);
}
-static void
-nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
-{
- /* if LLDD not implemented, leave as NULL */
- if (!hostport || !hostport->hosthandle)
- return;
-
- nvmet_fc_hostport_put(hostport);
-}
-
static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
@@ -1028,33 +1018,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
struct nvmet_fc_hostport *newhost, *match = NULL;
unsigned long flags;
+ /*
+ * Caller holds a reference on tgtport.
+ */
+
/* if LLDD not implemented, leave as NULL */
if (!hosthandle)
return NULL;
- /*
- * take reference for what will be the newly allocated hostport if
- * we end up using a new allocation
- */
- if (!nvmet_fc_tgtport_get(tgtport))
- return ERR_PTR(-EINVAL);
-
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
spin_unlock_irqrestore(&tgtport->lock, flags);
- if (match) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (match)
return match;
- }
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
- if (!newhost) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (!newhost)
return ERR_PTR(-ENOMEM);
- }
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
@@ -1063,6 +1044,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
kfree(newhost);
newhost = match;
} else {
+ nvmet_fc_tgtport_get(tgtport);
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
@@ -1076,20 +1058,14 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
static void
-nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
-{
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
-}
-
-static void
nvmet_fc_delete_assoc_work(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- nvmet_fc_delete_assoc(assoc);
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
nvmet_fc_tgtport_put(tgtport);
}
@@ -1097,7 +1073,8 @@ static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
nvmet_fc_tgtport_get(assoc->tgtport);
- queue_work(nvmet_wq, &assoc->del_work);
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ nvmet_fc_tgtport_put(assoc->tgtport);
}
static bool
@@ -1143,6 +1120,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
goto out_ida;
assoc->tgtport = tgtport;
+ nvmet_fc_tgtport_get(tgtport);
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
@@ -1190,7 +1168,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);
- nvmet_fc_free_hostport(assoc->hostport);
+ nvmet_fc_hostport_put(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1244,6 +1222,8 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
+
+ nvmet_fc_tgtport_put(tgtport);
}
static struct nvmet_fc_tgt_assoc *
@@ -1455,11 +1435,6 @@ nvmet_fc_free_tgtport(struct kref *ref)
struct nvmet_fc_tgtport *tgtport =
container_of(ref, struct nvmet_fc_tgtport, ref);
struct device *dev = tgtport->dev;
- unsigned long flags;
-
- spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- list_del(&tgtport->tgt_list);
- spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_free_ls_iodlist(tgtport);
@@ -1620,6 +1595,11 @@ int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_del(&tgtport->tgt_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_portentry_unbind_tgt(tgtport);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index e1abb27927ff..641201e62c1b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -208,6 +208,7 @@ struct fcloop_lport {
struct nvme_fc_local_port *localport;
struct list_head lport_list;
struct completion unreg_done;
+ refcount_t ref;
};
struct fcloop_lport_priv {
@@ -239,7 +240,7 @@ struct fcloop_nport {
struct fcloop_tport *tport;
struct fcloop_lport *lport;
struct list_head nport_list;
- struct kref ref;
+ refcount_t ref;
u64 node_name;
u64 port_name;
u32 port_role;
@@ -274,7 +275,7 @@ struct fcloop_fcpreq {
u32 inistate;
bool active;
bool aborted;
- struct kref ref;
+ refcount_t ref;
struct work_struct fcp_rcv_work;
struct work_struct abort_rcv_work;
struct work_struct tio_done_work;
@@ -478,7 +479,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
if (targetport) {
tport = targetport->private;
spin_lock(&tport->lock);
- list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &tport->ls_list);
spin_unlock(&tport->lock);
queue_work(nvmet_wq, &tport->ls_work);
}
@@ -534,24 +535,18 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
}
static void
-fcloop_tfcp_req_free(struct kref *ref)
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
{
- struct fcloop_fcpreq *tfcp_req =
- container_of(ref, struct fcloop_fcpreq, ref);
+ if (!refcount_dec_and_test(&tfcp_req->ref))
+ return;
kfree(tfcp_req);
}
-static void
-fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
-{
- kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
-}
-
static int
fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
{
- return kref_get_unless_zero(&tfcp_req->ref);
+ return refcount_inc_not_zero(&tfcp_req->ref);
}
static void
@@ -748,7 +743,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
- kref_init(&tfcp_req->ref);
+ refcount_set(&tfcp_req->ref, 1);
queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
@@ -1001,24 +996,39 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
}
static void
-fcloop_nport_free(struct kref *ref)
+fcloop_lport_put(struct fcloop_lport *lport)
{
- struct fcloop_nport *nport =
- container_of(ref, struct fcloop_nport, ref);
+ unsigned long flags;
- kfree(nport);
+ if (!refcount_dec_and_test(&lport->ref))
+ return;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&lport->lport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(lport);
+}
+
+static int
+fcloop_lport_get(struct fcloop_lport *lport)
+{
+ return refcount_inc_not_zero(&lport->ref);
}
static void
fcloop_nport_put(struct fcloop_nport *nport)
{
- kref_put(&nport->ref, fcloop_nport_free);
+ if (!refcount_dec_and_test(&nport->ref))
+ return;
+
+ kfree(nport);
}
static int
fcloop_nport_get(struct fcloop_nport *nport)
{
- return kref_get_unless_zero(&nport->ref);
+ return refcount_inc_not_zero(&nport->ref);
}
static void
@@ -1029,6 +1039,8 @@ fcloop_localport_delete(struct nvme_fc_local_port *localport)
/* release any threads waiting for the unreg to complete */
complete(&lport->unreg_done);
+
+ fcloop_lport_put(lport);
}
static void
@@ -1140,6 +1152,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
lport->localport = localport;
INIT_LIST_HEAD(&lport->lport_list);
+ refcount_set(&lport->ref, 1);
spin_lock_irqsave(&fcloop_lock, flags);
list_add_tail(&lport->lport_list, &fcloop_lports);
@@ -1156,13 +1169,6 @@ out_free_lport:
return ret ? ret : count;
}
-
-static void
-__unlink_local_port(struct fcloop_lport *lport)
-{
- list_del(&lport->lport_list);
-}
-
static int
__wait_localport_unreg(struct fcloop_lport *lport)
{
@@ -1175,8 +1181,6 @@ __wait_localport_unreg(struct fcloop_lport *lport)
if (!ret)
wait_for_completion(&lport->unreg_done);
- kfree(lport);
-
return ret;
}
@@ -1199,8 +1203,9 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
list_for_each_entry(tlport, &fcloop_lports, lport_list) {
if (tlport->localport->node_name == nodename &&
tlport->localport->port_name == portname) {
+ if (!fcloop_lport_get(tlport))
+ break;
lport = tlport;
- __unlink_local_port(lport);
break;
}
}
@@ -1210,6 +1215,7 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
return -ENOENT;
ret = __wait_localport_unreg(lport);
+ fcloop_lport_put(lport);
return ret ? ret : count;
}
@@ -1249,7 +1255,7 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
newnport->port_role = opts->roles;
if (opts->mask & NVMF_OPT_FCADDR)
newnport->port_id = opts->fcaddr;
- kref_init(&newnport->ref);
+ refcount_set(&newnport->ref, 1);
spin_lock_irqsave(&fcloop_lock, flags);
@@ -1637,17 +1643,17 @@ static void __exit fcloop_exit(void)
for (;;) {
lport = list_first_entry_or_null(&fcloop_lports,
typeof(*lport), lport_list);
- if (!lport)
+ if (!lport || !fcloop_lport_get(lport))
break;
- __unlink_local_port(lport);
-
spin_unlock_irqrestore(&fcloop_lock, flags);
ret = __wait_localport_unreg(lport);
if (ret)
pr_warn("%s: Failed deleting local port\n", __func__);
+ fcloop_lport_put(lport);
+
spin_lock_irqsave(&fcloop_lock, flags);
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8d610c17e0f2..94daca15a096 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1990,12 +1990,12 @@ static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
device_create_managed_software_node(&pdev->dev, properties, NULL))
pci_warn(pdev, "could not add stall property");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
/*
* It's possible for the MSI to get corrupted if SHPC and ACPI are used
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index b95dcb8d483c..c18a5b96de5c 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -333,9 +333,9 @@ static ssize_t secure_boot_fuse_state_show(struct device *dev,
else
status = valid ? "Invalid" : "Free";
}
- buf_len += sysfs_emit(buf + buf_len, "%d:%s ", key, status);
+ buf_len += sysfs_emit_at(buf, buf_len, "%d:%s ", key, status);
}
- buf_len += sysfs_emit(buf + buf_len, "\n");
+ buf_len += sysfs_emit_at(buf, buf_len, "\n");
return buf_len;
}
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
index 02ff68be10d0..a184922bba8d 100644
--- a/drivers/platform/x86/amd/pmf/auto-mode.c
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -120,9 +120,9 @@ static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
- pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL);
+ fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
- pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL);
+ fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual,
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
index bc8899e15c91..207a0b33d8d3 100644
--- a/drivers/platform/x86/amd/pmf/cnqf.c
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -81,10 +81,10 @@ static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU],
- NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2],
- NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
apmf_update_fan_idx(dev,
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index a2cb2d5544f5..96821101ec77 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -176,6 +176,20 @@ static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
}
+/**
+ * fixp_q88_fromint: Convert integer to Q8.8
+ * @val: input value
+ *
+ * Converts an integer into binary fixed point format where 8 bits
+ * are used for integer and 8 bits are used for the decimal.
+ *
+ * Return: unsigned integer converted to Q8.8 format
+ */
+u32 fixp_q88_fromint(u32 val)
+{
+ return val << 8;
+}
+
int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
{
int rc;
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index e6bdee68ccf3..45b60238d527 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -777,6 +777,7 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer);
int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag);
+u32 fixp_q88_fromint(u32 val);
/* SPS Layer */
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index d3083383f11f..49e14ca94a9e 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -198,9 +198,11 @@ static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
apts_config_store.val[idx].stt_min_limit, NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
- apts_config_store.val[idx].stt_skin_temp_limit_apu, NULL);
+ fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu),
+ NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
- apts_config_store.val[idx].stt_skin_temp_limit_hs2, NULL);
+ fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2),
+ NULL);
}
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
@@ -217,9 +219,11 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
config_store.prop[src][idx].stt_min, NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
- config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
+ fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]),
+ NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
- config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
+ fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]),
+ NULL);
} else if (op == SLIDER_OP_GET) {
amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index a1e43873a07b..14b99d8b63d2 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -123,7 +123,8 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_STT_SKINTEMP_APU:
if (dev->prev_data->stt_skintemp_apu != val) {
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ fixp_q88_fromint(val), NULL);
dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val);
dev->prev_data->stt_skintemp_apu = val;
}
@@ -131,7 +132,8 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_STT_SKINTEMP_HS2:
if (dev->prev_data->stt_skintemp_hs2 != val) {
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ fixp_q88_fromint(val), NULL);
dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val);
dev->prev_data->stt_skintemp_hs2 = val;
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d460dd194f19..a0a411b4f2d6 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -426,11 +426,14 @@ static int asus_pega_lucid_set(struct asus_laptop *asus, int unit, bool enable)
static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method)
{
+ unsigned long long val = (unsigned long long)curr;
+ acpi_status status;
int i, delta;
- unsigned long long val;
- for (i = 0; i < PEGA_ACC_RETRIES; i++) {
- acpi_evaluate_integer(asus->handle, method, NULL, &val);
+ for (i = 0; i < PEGA_ACC_RETRIES; i++) {
+ status = acpi_evaluate_integer(asus->handle, method, NULL, &val);
+ if (ACPI_FAILURE(status))
+ continue;
/* The output is noisy. From reading the ASL
* dissassembly, timeout errors are returned with 1's
* in the high word, and the lack of locking around
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 3d3014b5adf0..0c3be03385f8 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -62,11 +62,35 @@ static struct awcc_quirks empty_quirks;
static const struct dmi_system_id awcc_dmi_table[] __initconst = {
{
+ .ident = "Alienware Area-51m R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m R2"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware m16 R1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
.ident = "Alienware m16 R1 AMD",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
},
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Alienware m16 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R2"),
+ },
.driver_data = &generic_quirks,
},
{
@@ -94,6 +118,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &generic_quirks,
},
{
+ .ident = "Alienware x15 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15 R2"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
.ident = "Alienware x17 R2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -126,6 +158,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &g_series_quirks,
},
{
+ .ident = "Dell Inc. G16 7630",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G16 7630"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
.ident = "Dell Inc. G3 3500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -149,6 +189,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
},
.driver_data = &g_series_quirks,
},
+ {
+ .ident = "Dell Inc. G5 5505",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G5 5505"),
+ },
+ .driver_data = &g_series_quirks,
+ },
};
enum WMAX_THERMAL_INFORMATION_OPERATIONS {
diff --git a/drivers/platform/x86/msi-wmi-platform.c b/drivers/platform/x86/msi-wmi-platform.c
index 9b5c7f8c79b0..dc5e9878cb68 100644
--- a/drivers/platform/x86/msi-wmi-platform.c
+++ b/drivers/platform/x86/msi-wmi-platform.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/device/driver.h>
@@ -17,6 +18,7 @@
#include <linux/hwmon.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/printk.h>
#include <linux/rwsem.h>
#include <linux/types.h>
@@ -76,8 +78,13 @@ enum msi_wmi_platform_method {
MSI_PLATFORM_GET_WMI = 0x1d,
};
-struct msi_wmi_platform_debugfs_data {
+struct msi_wmi_platform_data {
struct wmi_device *wdev;
+ struct mutex wmi_lock; /* Necessary when calling WMI methods */
+};
+
+struct msi_wmi_platform_debugfs_data {
+ struct msi_wmi_platform_data *data;
enum msi_wmi_platform_method method;
struct rw_semaphore buffer_lock; /* Protects debugfs buffer */
size_t length;
@@ -132,8 +139,9 @@ static int msi_wmi_platform_parse_buffer(union acpi_object *obj, u8 *output, siz
return 0;
}
-static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform_method method,
- u8 *input, size_t input_length, u8 *output, size_t output_length)
+static int msi_wmi_platform_query(struct msi_wmi_platform_data *data,
+ enum msi_wmi_platform_method method, u8 *input,
+ size_t input_length, u8 *output, size_t output_length)
{
struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer in = {
@@ -147,9 +155,15 @@ static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform
if (!input_length || !output_length)
return -EINVAL;
- status = wmidev_evaluate_method(wdev, 0x0, method, &in, &out);
- if (ACPI_FAILURE(status))
- return -EIO;
+ /*
+ * The ACPI control method responsible for handling the WMI method calls
+ * is not thread-safe. Because of this we have to do the locking ourself.
+ */
+ scoped_guard(mutex, &data->wmi_lock) {
+ status = wmidev_evaluate_method(data->wdev, 0x0, method, &in, &out);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ }
obj = out.pointer;
if (!obj)
@@ -170,22 +184,22 @@ static umode_t msi_wmi_platform_is_visible(const void *drvdata, enum hwmon_senso
static int msi_wmi_platform_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
- struct wmi_device *wdev = dev_get_drvdata(dev);
+ struct msi_wmi_platform_data *data = dev_get_drvdata(dev);
u8 input[32] = { 0 };
u8 output[32];
- u16 data;
+ u16 value;
int ret;
- ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_FAN, input, sizeof(input), output,
+ ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_FAN, input, sizeof(input), output,
sizeof(output));
if (ret < 0)
return ret;
- data = get_unaligned_be16(&output[channel * 2 + 1]);
- if (!data)
+ value = get_unaligned_be16(&output[channel * 2 + 1]);
+ if (!value)
*val = 0;
else
- *val = 480000 / data;
+ *val = 480000 / value;
return 0;
}
@@ -231,7 +245,7 @@ static ssize_t msi_wmi_platform_write(struct file *fp, const char __user *input,
return ret;
down_write(&data->buffer_lock);
- ret = msi_wmi_platform_query(data->wdev, data->method, payload, data->length, data->buffer,
+ ret = msi_wmi_platform_query(data->data, data->method, payload, data->length, data->buffer,
data->length);
up_write(&data->buffer_lock);
@@ -277,17 +291,17 @@ static void msi_wmi_platform_debugfs_remove(void *data)
debugfs_remove_recursive(dir);
}
-static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry *dir,
+static void msi_wmi_platform_debugfs_add(struct msi_wmi_platform_data *drvdata, struct dentry *dir,
const char *name, enum msi_wmi_platform_method method)
{
struct msi_wmi_platform_debugfs_data *data;
struct dentry *entry;
- data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&drvdata->wdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return;
- data->wdev = wdev;
+ data->data = drvdata;
data->method = method;
init_rwsem(&data->buffer_lock);
@@ -298,82 +312,82 @@ static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry
entry = debugfs_create_file(name, 0600, dir, data, &msi_wmi_platform_debugfs_fops);
if (IS_ERR(entry))
- devm_kfree(&wdev->dev, data);
+ devm_kfree(&drvdata->wdev->dev, data);
}
-static void msi_wmi_platform_debugfs_init(struct wmi_device *wdev)
+static void msi_wmi_platform_debugfs_init(struct msi_wmi_platform_data *data)
{
struct dentry *dir;
char dir_name[64];
int ret, method;
- scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&wdev->dev));
+ scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&data->wdev->dev));
dir = debugfs_create_dir(dir_name, NULL);
if (IS_ERR(dir))
return;
- ret = devm_add_action_or_reset(&wdev->dev, msi_wmi_platform_debugfs_remove, dir);
+ ret = devm_add_action_or_reset(&data->wdev->dev, msi_wmi_platform_debugfs_remove, dir);
if (ret < 0)
return;
for (method = MSI_PLATFORM_GET_PACKAGE; method <= MSI_PLATFORM_GET_WMI; method++)
- msi_wmi_platform_debugfs_add(wdev, dir, msi_wmi_platform_debugfs_names[method - 1],
+ msi_wmi_platform_debugfs_add(data, dir, msi_wmi_platform_debugfs_names[method - 1],
method);
}
-static int msi_wmi_platform_hwmon_init(struct wmi_device *wdev)
+static int msi_wmi_platform_hwmon_init(struct msi_wmi_platform_data *data)
{
struct device *hdev;
- hdev = devm_hwmon_device_register_with_info(&wdev->dev, "msi_wmi_platform", wdev,
+ hdev = devm_hwmon_device_register_with_info(&data->wdev->dev, "msi_wmi_platform", data,
&msi_wmi_platform_chip_info, NULL);
return PTR_ERR_OR_ZERO(hdev);
}
-static int msi_wmi_platform_ec_init(struct wmi_device *wdev)
+static int msi_wmi_platform_ec_init(struct msi_wmi_platform_data *data)
{
u8 input[32] = { 0 };
u8 output[32];
u8 flags;
int ret;
- ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_EC, input, sizeof(input), output,
+ ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_EC, input, sizeof(input), output,
sizeof(output));
if (ret < 0)
return ret;
flags = output[MSI_PLATFORM_EC_FLAGS_OFFSET];
- dev_dbg(&wdev->dev, "EC RAM version %lu.%lu\n",
+ dev_dbg(&data->wdev->dev, "EC RAM version %lu.%lu\n",
FIELD_GET(MSI_PLATFORM_EC_MAJOR_MASK, flags),
FIELD_GET(MSI_PLATFORM_EC_MINOR_MASK, flags));
- dev_dbg(&wdev->dev, "EC firmware version %.28s\n",
+ dev_dbg(&data->wdev->dev, "EC firmware version %.28s\n",
&output[MSI_PLATFORM_EC_VERSION_OFFSET]);
if (!(flags & MSI_PLATFORM_EC_IS_TIGERLAKE)) {
if (!force)
return -ENODEV;
- dev_warn(&wdev->dev, "Loading on a non-Tigerlake platform\n");
+ dev_warn(&data->wdev->dev, "Loading on a non-Tigerlake platform\n");
}
return 0;
}
-static int msi_wmi_platform_init(struct wmi_device *wdev)
+static int msi_wmi_platform_init(struct msi_wmi_platform_data *data)
{
u8 input[32] = { 0 };
u8 output[32];
int ret;
- ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_WMI, input, sizeof(input), output,
+ ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_WMI, input, sizeof(input), output,
sizeof(output));
if (ret < 0)
return ret;
- dev_dbg(&wdev->dev, "WMI interface version %u.%u\n",
+ dev_dbg(&data->wdev->dev, "WMI interface version %u.%u\n",
output[MSI_PLATFORM_WMI_MAJOR_OFFSET],
output[MSI_PLATFORM_WMI_MINOR_OFFSET]);
@@ -381,7 +395,8 @@ static int msi_wmi_platform_init(struct wmi_device *wdev)
if (!force)
return -ENODEV;
- dev_warn(&wdev->dev, "Loading despite unsupported WMI interface version (%u.%u)\n",
+ dev_warn(&data->wdev->dev,
+ "Loading despite unsupported WMI interface version (%u.%u)\n",
output[MSI_PLATFORM_WMI_MAJOR_OFFSET],
output[MSI_PLATFORM_WMI_MINOR_OFFSET]);
}
@@ -391,19 +406,31 @@ static int msi_wmi_platform_init(struct wmi_device *wdev)
static int msi_wmi_platform_probe(struct wmi_device *wdev, const void *context)
{
+ struct msi_wmi_platform_data *data;
int ret;
- ret = msi_wmi_platform_init(wdev);
+ data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->wdev = wdev;
+ dev_set_drvdata(&wdev->dev, data);
+
+ ret = devm_mutex_init(&wdev->dev, &data->wmi_lock);
+ if (ret < 0)
+ return ret;
+
+ ret = msi_wmi_platform_init(data);
if (ret < 0)
return ret;
- ret = msi_wmi_platform_ec_init(wdev);
+ ret = msi_wmi_platform_ec_init(data);
if (ret < 0)
return ret;
- msi_wmi_platform_debugfs_init(wdev);
+ msi_wmi_platform_debugfs_init(data);
- return msi_wmi_platform_hwmon_init(wdev);
+ return msi_wmi_platform_hwmon_init(data);
}
static const struct wmi_device_id msi_wmi_platform_id_table[] = {
diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
index 3e5fa3b6e2fd..278c6d151dc4 100644
--- a/drivers/platform/x86/x86-android-tablets/dmi.c
+++ b/drivers/platform/x86/x86-android-tablets/dmi.c
@@ -180,6 +180,18 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
.driver_data = (void *)&peaq_c1010_info,
},
{
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)&vexia_edu_atla10_5v_info,
+ },
+ {
/* Vexia Edu Atla 10 tablet 9V version */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -187,7 +199,7 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
/* Above strings are too generic, also match on BIOS date */
DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
},
- .driver_data = (void *)&vexia_edu_atla10_info,
+ .driver_data = (void *)&vexia_edu_atla10_9v_info,
},
{
/* Whitelabel (sold as various brands) TM800A550L */
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index 1d93d9edb23f..f7bd9f863c85 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -599,62 +599,122 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
};
/*
- * Vexia EDU ATLA 10 tablet, Android 4.2 / 4.4 + Guadalinex Ubuntu tablet
+ * Vexia EDU ATLA 10 tablet 5V, Android 4.4 + Guadalinex Ubuntu tablet
+ * distributed to schools in the Spanish Andalucía region.
+ */
+static const struct property_entry vexia_edu_atla10_5v_touchscreen_props[] = {
+ PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000),
+ PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120),
+ { }
+};
+
+static const struct software_node vexia_edu_atla10_5v_touchscreen_node = {
+ .properties = vexia_edu_atla10_5v_touchscreen_props,
+};
+
+static const struct x86_i2c_client_info vexia_edu_atla10_5v_i2c_clients[] __initconst = {
+ {
+ /* kxcjk1013 accelerometer */
+ .board_info = {
+ .type = "kxcjk1013",
+ .addr = 0x0f,
+ .dev_name = "kxcjk1013",
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ }, {
+ /* touchscreen controller */
+ .board_info = {
+ .type = "hid-over-i2c",
+ .addr = 0x38,
+ .dev_name = "FTSC1000",
+ .swnode = &vexia_edu_atla10_5v_touchscreen_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }
+};
+
+static struct gpiod_lookup_table vexia_edu_atla10_5v_ft5416_gpios = {
+ .dev_id = "i2c-FTSC1000",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const vexia_edu_atla10_5v_gpios[] = {
+ &vexia_edu_atla10_5v_ft5416_gpios,
+ NULL
+};
+
+const struct x86_dev_info vexia_edu_atla10_5v_info __initconst = {
+ .i2c_client_info = vexia_edu_atla10_5v_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_5v_i2c_clients),
+ .gpiod_lookup_tables = vexia_edu_atla10_5v_gpios,
+};
+
+/*
+ * Vexia EDU ATLA 10 tablet 9V, Android 4.2 + Guadalinex Ubuntu tablet
* distributed to schools in the Spanish Andalucía region.
*/
static const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
-static const struct property_entry vexia_edu_atla10_ulpmc_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_ulpmc_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", crystal_cove_pwrsrc_psy),
{ }
};
-static const struct software_node vexia_edu_atla10_ulpmc_node = {
- .properties = vexia_edu_atla10_ulpmc_props,
+static const struct software_node vexia_edu_atla10_9v_ulpmc_node = {
+ .properties = vexia_edu_atla10_9v_ulpmc_props,
};
-static const char * const vexia_edu_atla10_accel_mount_matrix[] = {
+static const char * const vexia_edu_atla10_9v_accel_mount_matrix[] = {
"0", "-1", "0",
"1", "0", "0",
"0", "0", "1"
};
-static const struct property_entry vexia_edu_atla10_accel_props[] = {
- PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", vexia_edu_atla10_accel_mount_matrix),
+static const struct property_entry vexia_edu_atla10_9v_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", vexia_edu_atla10_9v_accel_mount_matrix),
{ }
};
-static const struct software_node vexia_edu_atla10_accel_node = {
- .properties = vexia_edu_atla10_accel_props,
+static const struct software_node vexia_edu_atla10_9v_accel_node = {
+ .properties = vexia_edu_atla10_9v_accel_props,
};
-static const struct property_entry vexia_edu_atla10_touchscreen_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_touchscreen_props[] = {
PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000),
PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120),
{ }
};
-static const struct software_node vexia_edu_atla10_touchscreen_node = {
- .properties = vexia_edu_atla10_touchscreen_props,
+static const struct software_node vexia_edu_atla10_9v_touchscreen_node = {
+ .properties = vexia_edu_atla10_9v_touchscreen_props,
};
-static const struct property_entry vexia_edu_atla10_pmic_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_pmic_props[] = {
PROPERTY_ENTRY_BOOL("linux,register-pwrsrc-power_supply"),
{ }
};
-static const struct software_node vexia_edu_atla10_pmic_node = {
- .properties = vexia_edu_atla10_pmic_props,
+static const struct software_node vexia_edu_atla10_9v_pmic_node = {
+ .properties = vexia_edu_atla10_9v_pmic_props,
};
-static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initconst = {
+static const struct x86_i2c_client_info vexia_edu_atla10_9v_i2c_clients[] __initconst = {
{
/* I2C attached embedded controller, used to access fuel-gauge */
.board_info = {
.type = "vexia_atla10_ec",
.addr = 0x76,
.dev_name = "ulpmc",
- .swnode = &vexia_edu_atla10_ulpmc_node,
+ .swnode = &vexia_edu_atla10_9v_ulpmc_node,
},
.adapter_path = "0000:00:18.1",
}, {
@@ -679,7 +739,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "kxtj21009",
.addr = 0x0f,
.dev_name = "kxtj21009",
- .swnode = &vexia_edu_atla10_accel_node,
+ .swnode = &vexia_edu_atla10_9v_accel_node,
},
.adapter_path = "0000:00:18.5",
}, {
@@ -688,7 +748,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "hid-over-i2c",
.addr = 0x38,
.dev_name = "FTSC1000",
- .swnode = &vexia_edu_atla10_touchscreen_node,
+ .swnode = &vexia_edu_atla10_9v_touchscreen_node,
},
.adapter_path = "0000:00:18.6",
.irq_data = {
@@ -703,7 +763,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "intel_soc_pmic_crc",
.addr = 0x6e,
.dev_name = "intel_soc_pmic_crc",
- .swnode = &vexia_edu_atla10_pmic_node,
+ .swnode = &vexia_edu_atla10_9v_pmic_node,
},
.adapter_path = "0000:00:18.7",
.irq_data = {
@@ -715,7 +775,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
}
};
-static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
+static const struct x86_serdev_info vexia_edu_atla10_9v_serdevs[] __initconst = {
{
.ctrl.pci.devfn = PCI_DEVFN(0x1e, 3),
.ctrl_devname = "serial0",
@@ -723,7 +783,7 @@ static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
},
};
-static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
+static struct gpiod_lookup_table vexia_edu_atla10_9v_ft5416_gpios = {
.dev_id = "i2c-FTSC1000",
.table = {
GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_LOW),
@@ -731,12 +791,12 @@ static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
},
};
-static struct gpiod_lookup_table * const vexia_edu_atla10_gpios[] = {
- &vexia_edu_atla10_ft5416_gpios,
+static struct gpiod_lookup_table * const vexia_edu_atla10_9v_gpios[] = {
+ &vexia_edu_atla10_9v_ft5416_gpios,
NULL
};
-static int __init vexia_edu_atla10_init(struct device *dev)
+static int __init vexia_edu_atla10_9v_init(struct device *dev)
{
struct pci_dev *pdev;
int ret;
@@ -760,13 +820,13 @@ static int __init vexia_edu_atla10_init(struct device *dev)
return 0;
}
-const struct x86_dev_info vexia_edu_atla10_info __initconst = {
- .i2c_client_info = vexia_edu_atla10_i2c_clients,
- .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_i2c_clients),
- .serdev_info = vexia_edu_atla10_serdevs,
- .serdev_count = ARRAY_SIZE(vexia_edu_atla10_serdevs),
- .gpiod_lookup_tables = vexia_edu_atla10_gpios,
- .init = vexia_edu_atla10_init,
+const struct x86_dev_info vexia_edu_atla10_9v_info __initconst = {
+ .i2c_client_info = vexia_edu_atla10_9v_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_9v_i2c_clients),
+ .serdev_info = vexia_edu_atla10_9v_serdevs,
+ .serdev_count = ARRAY_SIZE(vexia_edu_atla10_9v_serdevs),
+ .gpiod_lookup_tables = vexia_edu_atla10_9v_gpios,
+ .init = vexia_edu_atla10_9v_init,
.use_pci = true,
};
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 63a38a0069ba..dcf8d49e3b5f 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -127,7 +127,8 @@ extern const struct x86_dev_info nextbook_ares8_info;
extern const struct x86_dev_info nextbook_ares8a_info;
extern const struct x86_dev_info peaq_c1010_info;
extern const struct x86_dev_info whitelabel_tm800a550l_info;
-extern const struct x86_dev_info vexia_edu_atla10_info;
+extern const struct x86_dev_info vexia_edu_atla10_5v_info;
+extern const struct x86_dev_info vexia_edu_atla10_9v_info;
extern const struct x86_dev_info xiaomi_mipad2_info;
extern const struct dmi_system_id x86_android_tablet_ids[];
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 7945c6be1f7c..faf6e027f89a 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -2067,6 +2067,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
if (!s->start) {
/* roundup() does not work on 32-bit systems */
s->start = DIV64_U64_ROUND_UP(start_ns, s->period);
+ s->start *= s->period;
s->start = ktime_add(s->start, s->phase);
}
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index a40c511e0096..0387bd838487 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -322,7 +322,7 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
const struct pwm_ops *ops = chip->ops;
char wfhw[WFHWSIZE];
struct pwm_waveform wf_rounded;
- int err;
+ int err, ret_tohw;
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
@@ -332,16 +332,16 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
if (!pwm_wf_valid(wf))
return -EINVAL;
- err = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw);
- if (err)
- return err;
+ ret_tohw = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw);
+ if (ret_tohw < 0)
+ return ret_tohw;
if ((IS_ENABLED(CONFIG_PWM_DEBUG) || exact) && wf->period_length_ns) {
err = __pwm_round_waveform_fromhw(chip, pwm, &wfhw, &wf_rounded);
if (err)
return err;
- if (IS_ENABLED(CONFIG_PWM_DEBUG) && !pwm_check_rounding(wf, &wf_rounded))
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && ret_tohw == 0 && !pwm_check_rounding(wf, &wf_rounded))
dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu]\n",
wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns);
@@ -382,7 +382,8 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns,
wf_set.duty_length_ns, wf_set.period_length_ns, wf_set.duty_offset_ns);
}
- return 0;
+
+ return ret_tohw;
}
/**
diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c
index 4259a0db9ff4..4337c8f5acf0 100644
--- a/drivers/pwm/pwm-axi-pwmgen.c
+++ b/drivers/pwm/pwm-axi-pwmgen.c
@@ -75,6 +75,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
{
struct axi_pwmgen_waveform *wfhw = _wfhw;
struct axi_pwmgen_ddata *ddata = axi_pwmgen_ddata_from_chip(chip);
+ int ret = 0;
if (wf->period_length_ns == 0) {
*wfhw = (struct axi_pwmgen_waveform){
@@ -91,12 +92,15 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
if (wfhw->period_cnt == 0) {
/*
* The specified period is too short for the hardware.
- * Let's round .duty_cycle down to 0 to get a (somewhat)
- * valid result.
+ * So round up .period_cnt to 1 (i.e. the smallest
+ * possible period). With .duty_cycle and .duty_offset
+ * being less than or equal to .period, their rounded
+ * value must be 0.
*/
wfhw->period_cnt = 1;
wfhw->duty_cycle_cnt = 0;
wfhw->duty_offset_cnt = 0;
+ ret = 1;
} else {
wfhw->duty_cycle_cnt = min_t(u64,
mul_u64_u32_div(wf->duty_length_ns, ddata->clk_rate_hz, NSEC_PER_SEC),
@@ -111,7 +115,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
pwm->hwpwm, wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
ddata->clk_rate_hz, wfhw->period_cnt, wfhw->duty_cycle_cnt, wfhw->duty_offset_cnt);
- return 0;
+ return ret;
}
static int axi_pwmgen_round_waveform_fromhw(struct pwm_chip *chip, struct pwm_device *pwm,
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 2510c10ca473..c45a5fca4cbb 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -118,6 +118,9 @@ static unsigned int fsl_pwm_ticks_to_ns(struct fsl_pwm_chip *fpc,
unsigned long long exval;
rate = clk_get_rate(fpc->clk[fpc->period.clk_select]);
+ if (rate >> fpc->period.clk_ps == 0)
+ return 0;
+
exval = ticks;
exval *= 1000000000UL;
do_div(exval, rate >> fpc->period.clk_ps);
@@ -190,6 +193,9 @@ static unsigned int fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc,
unsigned int period = fpc->period.mod_period + 1;
unsigned int period_ns = fsl_pwm_ticks_to_ns(fpc, period);
+ if (!period_ns)
+ return 0;
+
duty = (unsigned long long)duty_ns * period;
do_div(duty, period_ns);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 01dfa0fab80a..7eaab5831499 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -121,21 +121,25 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
reg_thres = PWMTHRES;
+ unsigned long clk_rate;
u64 resolution;
int ret;
ret = pwm_mediatek_clk_enable(chip, pwm);
-
if (ret < 0)
return ret;
+ clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]);
+ if (!clk_rate)
+ return -EINVAL;
+
/* Make sure we use the bus clock and not the 26MHz clock */
if (pc->soc->has_ck_26m_sel)
writel(0, pc->regs + PWM_CK_26M_SEL);
/* Using resolution in picosecond gets accuracy higher */
resolution = (u64)NSEC_PER_SEC * 1000;
- do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm]));
+ do_div(resolution, clk_rate);
cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
while (cnt_period > 8191) {
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 2261789cc27d..578dbdd2d5a7 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -8,6 +8,7 @@
* - The hardware cannot generate a 0% duty cycle.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -102,23 +103,24 @@ static void rcar_pwm_set_clock_control(struct rcar_pwm_chip *rp,
rcar_pwm_write(rp, value, RCAR_PWMCR);
}
-static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
- int period_ns)
+static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, u64 duty_ns,
+ u64 period_ns)
{
- unsigned long long one_cycle, tmp; /* 0.01 nanoseconds */
+ unsigned long long tmp;
unsigned long clk_rate = clk_get_rate(rp->clk);
u32 cyc, ph;
- one_cycle = NSEC_PER_SEC * 100ULL << div;
- do_div(one_cycle, clk_rate);
+ /* div <= 24 == RCAR_PWM_MAX_DIVISION, so the shift doesn't overflow. */
+ tmp = mul_u64_u64_div_u64(period_ns, clk_rate, (u64)NSEC_PER_SEC << div);
+ if (tmp > FIELD_MAX(RCAR_PWMCNT_CYC0_MASK))
+ tmp = FIELD_MAX(RCAR_PWMCNT_CYC0_MASK);
- tmp = period_ns * 100ULL;
- do_div(tmp, one_cycle);
- cyc = (tmp << RCAR_PWMCNT_CYC0_SHIFT) & RCAR_PWMCNT_CYC0_MASK;
+ cyc = FIELD_PREP(RCAR_PWMCNT_CYC0_MASK, tmp);
- tmp = duty_ns * 100ULL;
- do_div(tmp, one_cycle);
- ph = tmp & RCAR_PWMCNT_PH0_MASK;
+ tmp = mul_u64_u64_div_u64(duty_ns, clk_rate, (u64)NSEC_PER_SEC << div);
+ if (tmp > FIELD_MAX(RCAR_PWMCNT_PH0_MASK))
+ tmp = FIELD_MAX(RCAR_PWMCNT_PH0_MASK);
+ ph = FIELD_PREP(RCAR_PWMCNT_PH0_MASK, tmp);
/* Avoid prohibited setting */
if (cyc == 0 || ph == 0)
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index a59de4de18b6..ec2c05c9ee7a 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -103,22 +103,16 @@ static int stm32_pwm_round_waveform_tohw(struct pwm_chip *chip,
if (ret)
goto out;
- /*
- * calculate the best value for ARR for the given PSC, refuse if
- * the resulting period gets bigger than the requested one.
- */
arr = mul_u64_u64_div_u64(wf->period_length_ns, rate,
(u64)NSEC_PER_SEC * (wfhw->psc + 1));
if (arr <= wfhw->arr) {
/*
- * requested period is small than the currently
+ * requested period is smaller than the currently
* configured and unchangable period, report back the smallest
- * possible period, i.e. the current state; Initialize
- * ccr to anything valid.
+ * possible period, i.e. the current state and return 1
+ * to indicate the wrong rounding direction.
*/
- wfhw->ccr = 0;
ret = 1;
- goto out;
}
} else {
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index f9be26d25348..d096b58cd0ae 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -362,4 +362,7 @@ static inline void atl_debug_on_bad_intlv_mode(struct addr_ctx *ctx)
atl_debug(ctx, "Unrecognized interleave mode: %u", ctx->map.intlv_mode);
}
+#define MI300_UMC_MCA_COL GENMASK(5, 1)
+#define MI300_UMC_MCA_ROW13 BIT(23)
+
#endif /* __AMD_ATL_INTERNAL_H__ */
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index dc8aa12f63c8..6e072b7667e9 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -229,7 +229,6 @@ int get_umc_info_mi300(void)
* Additionally, the PC and Bank bits may be hashed. This must be accounted for before
* reconstructing the normalized address.
*/
-#define MI300_UMC_MCA_COL GENMASK(5, 1)
#define MI300_UMC_MCA_BANK GENMASK(9, 6)
#define MI300_UMC_MCA_ROW GENMASK(24, 10)
#define MI300_UMC_MCA_PC BIT(25)
@@ -320,7 +319,7 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
* See amd_atl::convert_dram_to_norm_addr_mi300() for MI300 address formats.
*/
#define MI300_NUM_COL BIT(HWEIGHT(MI300_UMC_MCA_COL))
-static void retire_row_mi300(struct atl_err *a_err)
+static void _retire_row_mi300(struct atl_err *a_err)
{
unsigned long addr;
struct page *p;
@@ -351,6 +350,22 @@ static void retire_row_mi300(struct atl_err *a_err)
}
}
+/*
+ * In addition to the column bits, the row[13] bit should also be included when
+ * calculating addresses affected by a physical row.
+ *
+ * Instead of running through another loop over a single bit, just run through
+ * the column bits twice and flip the row[13] bit in-between.
+ *
+ * See MI300_UMC_MCA_ROW for the row bits in MCA_ADDR_UMC value.
+ */
+static void retire_row_mi300(struct atl_err *a_err)
+{
+ _retire_row_mi300(a_err);
+ a_err->addr ^= MI300_UMC_MCA_ROW13;
+ _retire_row_mi300(a_err);
+}
+
void amd_retire_dram_row(struct atl_err *a_err)
{
if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c
index 90de737fbc90..8877c6ff64c4 100644
--- a/drivers/ras/amd/fmpm.c
+++ b/drivers/ras/amd/fmpm.c
@@ -250,6 +250,13 @@ static bool rec_has_valid_entries(struct fru_rec *rec)
return true;
}
+/*
+ * Row retirement is done on MI300 systems, and some bits are 'don't
+ * care' for comparing addresses with unique physical rows. This
+ * includes all column bits and the row[13] bit.
+ */
+#define MASK_ADDR(addr) ((addr) & ~(MI300_UMC_MCA_ROW13 | MI300_UMC_MCA_COL))
+
static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_desc *new)
{
/*
@@ -258,7 +265,7 @@ static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_
*
* Also, order the checks from most->least likely to fail to shortcut the code.
*/
- if (old->addr != new->addr)
+ if (MASK_ADDR(old->addr) != MASK_ADDR(new->addr))
return false;
if (old->hw_id != new->hw_id)
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 21fa7ac849e5..4904b831c0a7 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -302,11 +302,17 @@ static struct airq_info *new_airq_info(int index)
static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
u64 *first, void **airq_info)
{
- int i, j;
+ int i, j, queue_idx, highest_queue_idx = -1;
struct airq_info *info;
unsigned long *indicator_addr = NULL;
unsigned long bit, flags;
+ /* Array entries without an actual queue pointer must be ignored. */
+ for (i = 0; i < nvqs; i++) {
+ if (vqs[i])
+ highest_queue_idx++;
+ }
+
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
mutex_lock(&airq_areas_lock);
if (!airq_areas[i])
@@ -316,7 +322,7 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
if (!info)
return NULL;
write_lock_irqsave(&info->lock, flags);
- bit = airq_iv_alloc(info->aiv, nvqs);
+ bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1);
if (bit == -1UL) {
/* Not enough vacancies. */
write_unlock_irqrestore(&info->lock, flags);
@@ -325,8 +331,10 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
*first = bit;
*airq_info = info;
indicator_addr = info->aiv->vector;
- for (j = 0; j < nvqs; j++) {
- airq_iv_set_ptr(info->aiv, bit + j,
+ for (j = 0, queue_idx = 0; j < nvqs; j++) {
+ if (!vqs[j])
+ continue;
+ airq_iv_set_ptr(info->aiv, bit + queue_idx++,
(unsigned long)vqs[j]);
}
write_unlock_irqrestore(&info->lock, flags);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5cb1d3db4907..944cf2fb0561 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -935,8 +935,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work,
container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
+ struct domain_device *port_dev;
int phy_no = sas_phy->id;
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) &&
+ sas_port && port && (port->id != phy->port_id)) {
+ dev_info(dev, "phy%d's hw port id changed from %d to %llu\n",
+ phy_no, port->id, phy->port_id);
+ port_dev = sas_port->port_dev;
+ if (port_dev && !dev_is_expander(port_dev->dev_type)) {
+ /*
+ * Set the device state to gone to block
+ * sending IO to the device.
+ */
+ set_bit(SAS_DEV_GONE, &port_dev->state);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ return;
+ }
+ }
+
phy->wait_phyup_cnt = 0;
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index a1fc400ab4c3..1e9830940f84 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2501,6 +2501,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
struct sas_ata_task *ata_task = &task->ata_task;
struct sas_tmf_task *tmf = slot->tmf;
+ int phy_id;
u8 *buf_cmd;
int has_data = 0, hdr_tag = 0;
u32 dw0, dw1 = 0, dw2 = 0;
@@ -2508,10 +2509,14 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
/* create header */
/* dw0 */
dw0 = port->id << CMD_HDR_PORT_OFF;
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
dw0 |= 3 << CMD_HDR_CMD_OFF;
- else
+ } else {
+ phy_id = device->phy->identify.phy_identifier;
+ dw0 |= (1U << phy_id) << CMD_HDR_PHY_ID_OFF;
+ dw0 |= CMD_HDR_FORCE_PHY_MSK;
dw0 |= 4 << CMD_HDR_CMD_OFF;
+ }
if (tmf && ata_task->force_phy) {
dw0 |= CMD_HDR_FORCE_PHY_MSK;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 2684d6482067..08dac9ae2f10 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -359,6 +359,10 @@
#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
#define CMD_HDR_TLR_CTRL_OFF 6
#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
+#define CMD_HDR_PHY_ID_OFF 8
+#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF)
+#define CMD_HDR_FORCE_PHY_OFF 17
+#define CMD_HDR_FORCE_PHY_MSK (0x1U << CMD_HDR_FORCE_PHY_OFF)
#define CMD_HDR_PORT_OFF 18
#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
#define CMD_HDR_PRIORITY_OFF 27
@@ -1429,15 +1433,21 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ int phy_id;
u8 *buf_cmd;
int has_data = 0, hdr_tag = 0;
u32 dw1 = 0, dw2 = 0;
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
- else
+ } else {
+ phy_id = device->phy->identify.phy_identifier;
+ hdr->dw0 |= cpu_to_le32((1U << phy_id)
+ << CMD_HDR_PHY_ID_OFF);
+ hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF);
+ }
switch (task->data_dir) {
case DMA_TO_DEVICE:
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 088cc40ae866..8ee2bfe47571 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -23,8 +23,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.727.03.00-rc1"
-#define MEGASAS_RELDATE "Oct 03, 2023"
+#define MEGASAS_VERSION "07.734.00.00-rc1"
+#define MEGASAS_RELDATE "Apr 03, 2025"
#define MEGASAS_MSIX_NAME_LEN 32
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index c20447b39cb9..5e33d411fa3d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2103,6 +2103,9 @@ static int megasas_sdev_configure(struct scsi_device *sdev,
/* This sdev property may change post OCR */
megasas_set_dynamic_target_properties(sdev, lim, is_target_prop);
+ if (!MEGASAS_IS_LOGICAL(sdev))
+ sdev->no_vpd_size = 1;
+
mutex_unlock(&instance->reset_mutex);
return 0;
@@ -3662,8 +3665,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_STAT_SCSI_IO_FAILED:
case MFI_STAT_LD_INIT_IN_PROGRESS:
- cmd->scmd->result =
- (DID_ERROR << 16) | hdr->scsi_status;
+ if (hdr->scsi_status == 0xf0)
+ cmd->scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION;
+ else
+ cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status;
break;
case MFI_STAT_SCSI_DONE_WITH_ERROR:
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 721860cb1ef6..a6794f49e9fa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2043,7 +2043,10 @@ map_cmd_status(struct fusion_context *fusion,
case MFI_STAT_SCSI_IO_FAILED:
case MFI_STAT_LD_INIT_IN_PROGRESS:
- scmd->result = (DID_ERROR << 16) | ext_status;
+ if (ext_status == 0xf0)
+ scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION;
+ else
+ scmd->result = (DID_ERROR << 16) | ext_status;
break;
case MFI_STAT_SCSI_DONE_WITH_ERROR:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 183ce00aa671..f7067878b34f 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -766,6 +766,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+ pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
pm8001_free_dev(pm8001_dev);
} else {
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9c347c64c315..0b8c91bf793f 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3182,11 +3182,14 @@ iscsi_set_host_param(struct iscsi_transport *transport,
}
/* see similar check in iscsi_if_set_param() */
- if (strlen(data) > ev->u.set_host_param.len)
- return -EINVAL;
+ if (strlen(data) > ev->u.set_host_param.len) {
+ err = -EINVAL;
+ goto out;
+ }
err = transport->set_host_param(shost, ev->u.set_host_param.param,
data, ev->u.set_host_param.len);
+out:
scsi_host_put(shost);
return err;
}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 64f6b22e8cc0..aeb58a9e6b7f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -388,7 +388,7 @@ static void srp_reconnect_work(struct work_struct *work)
"reconnect attempt %d failed (%d)\n",
++rport->failed_reconnects, res);
delay = rport->reconnect_delay *
- min(100, max(1, rport->failed_reconnects - 10));
+ clamp(rport->failed_reconnects - 10, 1, 100);
if (delay > 0)
queue_delayed_work(system_long_wq,
&rport->reconnect_work, delay * HZ);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 88135fdb8bd1..8a26eca4fdc9 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -19,6 +19,7 @@
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
+#include <linux/crash_dump.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -5246,7 +5247,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
ctrl_info->error_buffer_length =
ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
- if (reset_devices)
+ if (is_kdump_kernel())
max_transfer_size = min(ctrl_info->max_transfer_size,
PQI_MAX_TRANSFER_SIZE_KDUMP);
else
@@ -5275,7 +5276,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
u16 num_elements_per_iq;
u16 num_elements_per_oq;
- if (reset_devices) {
+ if (is_kdump_kernel()) {
num_queue_groups = 1;
} else {
int num_cpus;
@@ -8288,12 +8289,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
u32 product_id;
if (reset_devices) {
- if (pqi_is_fw_triage_supported(ctrl_info)) {
+ if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) {
rc = sis_wait_for_fw_triage_completion(ctrl_info);
if (rc)
return rc;
}
- if (sis_is_ctrl_logging_supported(ctrl_info)) {
+ if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) {
sis_notify_kdump(ctrl_info);
rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
if (rc)
@@ -8344,7 +8345,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->product_id = (u8)product_id;
ctrl_info->product_revision = (u8)(product_id >> 8);
- if (reset_devices) {
+ if (is_kdump_kernel()) {
if (ctrl_info->max_outstanding_requests >
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
ctrl_info->max_outstanding_requests =
@@ -8480,7 +8481,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- if (ctrl_info->ctrl_logging_supported && !reset_devices) {
+ if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) {
pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
}
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 5c59fddb32c1..b5ecffcaf795 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -949,24 +949,20 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q);
if (ret)
- goto err_destroy_mutex;
+ goto err_put_ctrl;
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
- goto err_destroy_mutex;
+ goto err_put_ctrl;
return 0;
-err_destroy_mutex:
- mutex_destroy(&q->lock);
-
err_disable_clk:
fsl_qspi_clk_disable_unprep(q);
err_put_ctrl:
spi_controller_put(ctlr);
- dev_err(dev, "Freescale QuadSPI probe failed\n");
return ret;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 90b5ab60f5ae..634cf163f4cb 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -466,6 +466,56 @@ static ssize_t critical_health_show(struct device *dev,
return sysfs_emit(buf, "%d\n", hba->critical_health_count);
}
+static ssize_t device_lvl_exception_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count));
+}
+
+static ssize_t device_lvl_exception_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int value;
+
+ if (kstrtouint(buf, 0, &value))
+ return -EINVAL;
+
+ /* the only supported usecase is to reset the dev_lvl_exception_count */
+ if (value)
+ return -EINVAL;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+
+ return count;
+}
+
+static ssize_t device_lvl_exception_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u64 exception_id;
+ int err;
+
+ ufshcd_rpm_get_sync(hba);
+ err = ufshcd_read_device_lvl_exception_id(hba, &exception_id);
+ ufshcd_rpm_put_sync(hba);
+
+ if (err)
+ return err;
+
+ hba->dev_lvl_exception_id = exception_id;
+ return sysfs_emit(buf, "%llu\n", exception_id);
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -479,6 +529,8 @@ static DEVICE_ATTR_RW(wb_flush_threshold);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
static DEVICE_ATTR_RO(critical_health);
+static DEVICE_ATTR_RW(device_lvl_exception_count);
+static DEVICE_ATTR_RO(device_lvl_exception_id);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -494,6 +546,8 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
&dev_attr_critical_health.attr,
+ &dev_attr_device_lvl_exception_count.attr,
+ &dev_attr_device_lvl_exception_id.attr,
NULL
};
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 10b4a19a70f1..d0a2c963a27d 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 0534390c2a35..44156041d88f 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -3176,16 +3176,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
int err;
retry:
- time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
time_left);
if (likely(time_left)) {
- /*
- * The completion handler called complete() and the caller of
- * this function still owns the @lrbp tag so the code below does
- * not trigger any race conditions.
- */
- hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
@@ -3199,7 +3193,6 @@ retry:
/* successfully cleared the command, retry if needed */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
err = -EAGAIN;
- hba->dev_cmd.complete = NULL;
return err;
}
@@ -3215,11 +3208,9 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending) {
- hba->dev_cmd.complete = NULL;
+ if (pending)
__clear_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- }
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3237,8 +3228,6 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending)
- hba->dev_cmd.complete = NULL;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3272,13 +3261,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
{
- DECLARE_COMPLETION_ONSTACK(wait);
int err;
- hba->dev_cmd.complete = &wait;
-
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -5585,12 +5570,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- } else if (hba->dev_cmd.complete) {
+ } else {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
- complete(hba->dev_cmd.complete);
+ complete(&hba->dev_cmd.complete);
}
}
@@ -6013,6 +5998,42 @@ out:
__func__, err);
}
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
+{
+ struct utp_upiu_query_v4_0 *upiu_resp;
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+
+ ufshcd_init_query(hba, &request, &response,
+ UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0);
+
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: failed to read device level exception %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ upiu_resp = (struct utp_upiu_query_v4_0 *)response;
+ *exception_id = get_unaligned_be64(&upiu_resp->osf3);
+out:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ return err;
+}
+
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6083,7 +6104,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
-static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
u32 cur_buf;
@@ -6165,15 +6186,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
}
/*
- * The ufs device needs the vcc to be ON to flush.
* With user-space reduction enabled, it's enough to enable flush
* by checking only the available buffer. The threshold
* defined here is > 90% full.
* With user-space preserved enabled, the current-buffer
* should be checked too because the wb buffer size can reduce
* when disk tends to be full. This info is provided by current
- * buffer (dCurrentWriteBoosterBufferSize). There's no point in
- * keeping vcc on when current buffer is empty.
+ * buffer (dCurrentWriteBoosterBufferSize).
*/
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -6188,7 +6207,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
if (!hba->dev_info.b_presrv_uspc_en)
return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
- return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+ return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf);
}
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
@@ -6240,6 +6259,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
}
+ if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) {
+ atomic_inc(&hba->dev_lvl_exception_count);
+ sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count");
+ }
+
ufs_debugfs_exception_event(hba, status);
}
@@ -8139,6 +8163,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u32 ext_ufs_feature;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP))
+ return;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+ ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION);
+}
+
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8339,6 +8379,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufs_init_rtc(hba, desc_buf);
+ ufshcd_device_lvl_exception_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -10490,6 +10532,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ init_completion(&hba->dev_cmd.complete);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index d7539cda97da..3e545af536e5 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -34,7 +34,7 @@
* Exynos's Vendor specific registers for UFSHCI
*/
#define HCI_TXPRDT_ENTRY_SIZE 0x00
-#define PRDT_PREFECT_EN BIT(31)
+#define PRDT_PREFETCH_EN BIT(31)
#define HCI_RXPRDT_ENTRY_SIZE 0x04
#define HCI_1US_TO_CNT_VAL 0x0C
#define CNT_VAL_1US_MASK 0x3FF
@@ -92,11 +92,16 @@
UIC_TRANSPORT_NO_CONNECTION_RX |\
UIC_TRANSPORT_BAD_TC)
-/* FSYS UFS Shareability */
-#define UFS_WR_SHARABLE BIT(2)
-#define UFS_RD_SHARABLE BIT(1)
-#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
-#define UFS_SHAREABILITY_OFFSET 0x710
+/* UFS Shareability */
+#define UFS_EXYNOSAUTO_WR_SHARABLE BIT(2)
+#define UFS_EXYNOSAUTO_RD_SHARABLE BIT(1)
+#define UFS_EXYNOSAUTO_SHARABLE (UFS_EXYNOSAUTO_WR_SHARABLE | \
+ UFS_EXYNOSAUTO_RD_SHARABLE)
+#define UFS_GS101_WR_SHARABLE BIT(1)
+#define UFS_GS101_RD_SHARABLE BIT(0)
+#define UFS_GS101_SHARABLE (UFS_GS101_WR_SHARABLE | \
+ UFS_GS101_RD_SHARABLE)
+#define UFS_SHAREABILITY_OFFSET 0x710
/* Multi-host registers */
#define MHCTRL 0xC4
@@ -209,8 +214,8 @@ static int exynos_ufs_shareability(struct exynos_ufs *ufs)
/* IO Coherency setting */
if (ufs->sysreg) {
return regmap_update_bits(ufs->sysreg,
- ufs->shareability_reg_offset,
- UFS_SHARABLE, UFS_SHARABLE);
+ ufs->iocc_offset,
+ ufs->iocc_mask, ufs->iocc_val);
}
return 0;
@@ -957,6 +962,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
}
phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
+
+ if (generic_phy->power_count) {
+ phy_power_off(generic_phy);
+ phy_exit(generic_phy);
+ }
+
ret = phy_init(generic_phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
@@ -1049,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
exynos_ufs_set_unipro_pclk_div(ufs);
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
+
/* unipro */
exynos_ufs_config_unipro(ufs);
+ if (ufs->drv_data->pre_link)
+ ufs->drv_data->pre_link(ufs);
+
/* m-phy */
exynos_ufs_phy_init(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
@@ -1059,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_phy_cap_attr(ufs);
}
- exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
-
- if (ufs->drv_data->pre_link)
- ufs->drv_data->pre_link(ufs);
-
return 0;
}
@@ -1087,12 +1098,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ u32 val = ilog2(DATA_UNIT_SIZE);
exynos_ufs_establish_connt(ufs);
exynos_ufs_fit_aggr_timeout(ufs);
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
- hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ val |= PRDT_PREFETCH_EN;
+ hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
+
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
@@ -1168,12 +1184,22 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
ufs->sysreg = NULL;
else {
if (of_property_read_u32_index(np, "samsung,sysreg", 1,
- &ufs->shareability_reg_offset)) {
+ &ufs->iocc_offset)) {
dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
- ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
+ ufs->iocc_offset = UFS_SHAREABILITY_OFFSET;
}
}
+ ufs->iocc_mask = ufs->drv_data->iocc_mask;
+ /*
+ * no 'dma-coherent' property means the descriptors are
+ * non-cacheable so iocc shareability should be disabled.
+ */
+ if (of_dma_is_coherent(dev->of_node))
+ ufs->iocc_val = ufs->iocc_mask;
+ else
+ ufs->iocc_val = 0;
+
ufs->pclk_avail_min = PCLK_AVAIL_MIN;
ufs->pclk_avail_max = PCLK_AVAIL_MAX;
@@ -1497,6 +1523,14 @@ out:
return ret;
}
+static void exynos_ufs_exit(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+}
+
static int exynos_ufs_host_reset(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1667,6 +1701,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
}
}
+static int gs101_ufs_suspend(struct exynos_ufs *ufs)
+{
+ hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1675,6 +1715,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE)
return 0;
+ if (ufs->drv_data->suspend)
+ ufs->drv_data->suspend(ufs);
+
if (!ufshcd_is_link_active(hba))
phy_power_off(ufs->phy);
@@ -1952,6 +1995,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
+ .exit = exynos_ufs_exit,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
@@ -1990,13 +2034,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static void exynos_ufs_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
ufshcd_pltfrm_remove(pdev);
-
- phy_power_off(ufs->phy);
- phy_exit(ufs->phy);
}
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
@@ -2035,6 +2073,7 @@ static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .iocc_mask = UFS_EXYNOSAUTO_SHARABLE,
.drv_init = exynosauto_ufs_drv_init,
.post_hce_enable = exynosauto_ufs_post_hce_enable,
.pre_link = exynosauto_ufs_pre_link,
@@ -2136,10 +2175,12 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
.opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_UFSPR_SECURE |
EXYNOS_UFS_OPT_TIMER_TICK_SELECT,
+ .iocc_mask = UFS_GS101_SHARABLE,
.drv_init = gs101_ufs_drv_init,
.pre_link = gs101_ufs_pre_link,
.post_link = gs101_ufs_post_link,
.pre_pwr_change = gs101_ufs_pre_pwr_change,
+ .suspend = gs101_ufs_suspend,
};
static const struct of_device_id exynos_ufs_of_match[] = {
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index aac517276189..abe7e472759e 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -181,6 +181,7 @@ struct exynos_ufs_drv_data {
struct exynos_ufs_uic_attr *uic_attr;
unsigned int quirks;
unsigned int opts;
+ u32 iocc_mask;
/* SoC's specific operations */
int (*drv_init)(struct exynos_ufs *ufs);
int (*pre_link)(struct exynos_ufs *ufs);
@@ -191,6 +192,7 @@ struct exynos_ufs_drv_data {
const struct ufs_pa_layer_attr *pwr);
int (*pre_hce_enable)(struct exynos_ufs *ufs);
int (*post_hce_enable)(struct exynos_ufs *ufs);
+ int (*suspend)(struct exynos_ufs *ufs);
};
struct ufs_phy_time_cfg {
@@ -230,7 +232,9 @@ struct exynos_ufs {
ktime_t entry_hibern8_t;
const struct exynos_ufs_drv_data *drv_data;
struct regmap *sysreg;
- u32 shareability_reg_offset;
+ u32 iocc_offset;
+ u32 iocc_mask;
+ u32 iocc_val;
u32 opts;
#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0)
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 691e0ae607a1..8c6130789fde 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -348,9 +348,9 @@ static int afs_dynroot_readdir(struct file *file, struct dir_context *ctx)
}
if ((unsigned long long)ctx->pos <= AFS_MAX_DYNROOT_CELL_INO) {
- rcu_read_lock();
+ down_read(&net->cells_lock);
ret = afs_dynroot_readdir_cells(net, ctx);
- rcu_read_unlock();
+ up_read(&net->cells_lock);
}
return ret;
}
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index adcf425a9b88..07709b0d7688 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -15,10 +15,9 @@ config BCACHEFS_FS
select ZLIB_INFLATE
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
- select CRYPTO
select CRYPTO_LIB_SHA256
- select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
+ select CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_POLY1305
select KEYS
select RAID6_PQ
select XOR_BLOCKS
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 5d9f208a1bb7..5cb0fc384ac0 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -981,8 +981,8 @@ struct bch_fs {
mempool_t compress_workspace[BCH_COMPRESSION_OPT_NR];
size_t zstd_workspace_size;
- struct crypto_sync_skcipher *chacha20;
- struct crypto_shash *poly1305;
+ struct bch_key chacha20_key;
+ bool chacha20_key_set;
atomic64_t key_version;
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index d1ad1a7613c9..7d6c971db23c 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -644,8 +644,6 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
*/
static int journal_sort_key_cmp(const void *_l, const void *_r)
{
- cond_resched();
-
const struct journal_key *l = _l;
const struct journal_key *r = _r;
@@ -689,7 +687,8 @@ void bch2_journal_keys_put(struct bch_fs *c)
static void __journal_keys_sort(struct journal_keys *keys)
{
- sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
+ sort_nonatomic(keys->data, keys->nr, sizeof(keys->data[0]),
+ journal_sort_key_cmp, NULL);
cond_resched();
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index 8c9fdb7263fe..86acf037590c 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -183,7 +183,7 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
return;
if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) {
- if (!c->chacha20)
+ if (!c->chacha20_key_set)
return;
struct nonce nonce = btree_nonce(&bn->keys, 0);
@@ -398,7 +398,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
bch2_print_string_as_lines(KERN_INFO, buf.buf);
}
- sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
+ sort_nonatomic(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
dst = 0;
darray_for_each(f->nodes, i) {
@@ -418,7 +418,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
}
f->nodes.nr = dst;
- sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
+ sort_nonatomic(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
if (0 && c->opts.verbose) {
printbuf_reset(&buf);
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index adbe576ec77e..0941fb2c026d 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -428,10 +428,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
*/
trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
- sort(wb->flushing.keys.data,
- wb->flushing.keys.nr,
- sizeof(wb->flushing.keys.data[0]),
- wb_key_seq_cmp, NULL);
+ sort_nonatomic(wb->flushing.keys.data,
+ wb->flushing.keys.nr,
+ sizeof(wb->flushing.keys.data[0]),
+ wb_key_seq_cmp, NULL);
darray_for_each(wb->flushing.keys, i) {
if (!i->journal_seq)
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 3726689093e3..d0a34a097b80 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -7,17 +7,12 @@
#include "super-io.h"
#include <linux/crc32c.h>
-#include <linux/crypto.h>
#include <linux/xxhash.h>
#include <linux/key.h>
#include <linux/random.h>
#include <linux/ratelimit.h>
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
#include <crypto/chacha.h>
-#include <crypto/hash.h>
#include <crypto/poly1305.h>
-#include <crypto/skcipher.h>
#include <keys/user-type.h>
/*
@@ -96,116 +91,40 @@ static void bch2_checksum_update(struct bch2_checksum_state *state, const void *
}
}
-static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
- struct nonce nonce,
- struct scatterlist *sg, size_t len)
+static void bch2_chacha20_init(u32 state[CHACHA_STATE_WORDS],
+ const struct bch_key *key, struct nonce nonce)
{
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ u32 key_words[CHACHA_KEY_SIZE / sizeof(u32)];
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
+ BUILD_BUG_ON(sizeof(key_words) != sizeof(*key));
+ memcpy(key_words, key, sizeof(key_words));
+ le32_to_cpu_array(key_words, ARRAY_SIZE(key_words));
- int ret = crypto_skcipher_encrypt(req);
- if (ret)
- pr_err("got error %i from crypto_skcipher_encrypt()", ret);
-
- return ret;
-}
-
-static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
- struct nonce nonce,
- void *buf, size_t len)
-{
- if (!is_vmalloc_addr(buf)) {
- struct scatterlist sg = {};
-
- sg_mark_end(&sg);
- sg_set_page(&sg, virt_to_page(buf), len, offset_in_page(buf));
- return do_encrypt_sg(tfm, nonce, &sg, len);
- } else {
- DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
- size_t sgl_len = 0;
- int ret;
-
- darray_init(&sgl);
-
- while (len) {
- unsigned offset = offset_in_page(buf);
- struct scatterlist sg = {
- .page_link = (unsigned long) vmalloc_to_page(buf),
- .offset = offset,
- .length = min(len, PAGE_SIZE - offset),
- };
+ BUILD_BUG_ON(sizeof(nonce) != CHACHA_IV_SIZE);
+ chacha_init(state, key_words, (const u8 *)nonce.d);
- if (darray_push(&sgl, sg)) {
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
- if (ret)
- goto err;
-
- nonce = nonce_add(nonce, sgl_len);
- sgl_len = 0;
- sgl.nr = 0;
- BUG_ON(darray_push(&sgl, sg));
- }
-
- buf += sg.length;
- len -= sg.length;
- sgl_len += sg.length;
- }
-
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
-err:
- darray_exit(&sgl);
- return ret;
- }
+ memzero_explicit(key_words, sizeof(key_words));
}
-int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
- void *buf, size_t len)
+static void bch2_chacha20(const struct bch_key *key, struct nonce nonce,
+ void *data, size_t len)
{
- struct crypto_sync_skcipher *chacha20 =
- crypto_alloc_sync_skcipher("chacha20", 0, 0);
- int ret;
-
- ret = PTR_ERR_OR_ZERO(chacha20);
- if (ret) {
- pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
- return ret;
- }
-
- ret = crypto_skcipher_setkey(&chacha20->base,
- (void *) key, sizeof(*key));
- if (ret) {
- pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
- goto err;
- }
+ u32 state[CHACHA_STATE_WORDS];
- ret = do_encrypt(chacha20, nonce, buf, len);
-err:
- crypto_free_sync_skcipher(chacha20);
- return ret;
+ bch2_chacha20_init(state, key, nonce);
+ chacha20_crypt(state, data, data, len);
+ memzero_explicit(state, sizeof(state));
}
-static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
- struct nonce nonce)
+static void bch2_poly1305_init(struct poly1305_desc_ctx *desc,
+ struct bch_fs *c, struct nonce nonce)
{
- u8 key[POLY1305_KEY_SIZE];
- int ret;
+ u8 key[POLY1305_KEY_SIZE] = { 0 };
nonce.d[3] ^= BCH_NONCE_POLY;
- memset(key, 0, sizeof(key));
- ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
- if (ret)
- return ret;
-
- desc->tfm = c->poly1305;
- crypto_shash_init(desc);
- crypto_shash_update(desc, key, sizeof(key));
- return 0;
+ bch2_chacha20(&c->chacha20_key, nonce, key, sizeof(key));
+ poly1305_init(desc, key);
}
struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
@@ -230,14 +149,13 @@ struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
case BCH_CSUM_chacha20_poly1305_80:
case BCH_CSUM_chacha20_poly1305_128: {
- SHASH_DESC_ON_STACK(desc, c->poly1305);
+ struct poly1305_desc_ctx dctx;
u8 digest[POLY1305_DIGEST_SIZE];
struct bch_csum ret = { 0 };
- gen_poly_key(c, desc, nonce);
-
- crypto_shash_update(desc, data, len);
- crypto_shash_final(desc, digest);
+ bch2_poly1305_init(&dctx, c, nonce);
+ poly1305_update(&dctx, data, len);
+ poly1305_final(&dctx, digest);
memcpy(&ret, digest, bch_crc_bytes[type]);
return ret;
@@ -253,11 +171,12 @@ int bch2_encrypt(struct bch_fs *c, unsigned type,
if (!bch2_csum_type_is_encryption(type))
return 0;
- if (bch2_fs_inconsistent_on(!c->chacha20,
+ if (bch2_fs_inconsistent_on(!c->chacha20_key_set,
c, "attempting to encrypt without encryption key"))
return -BCH_ERR_no_encryption_key;
- return do_encrypt(c->chacha20, nonce, data, len);
+ bch2_chacha20(&c->chacha20_key, nonce, data, len);
+ return 0;
}
static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
@@ -296,26 +215,26 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
case BCH_CSUM_chacha20_poly1305_80:
case BCH_CSUM_chacha20_poly1305_128: {
- SHASH_DESC_ON_STACK(desc, c->poly1305);
+ struct poly1305_desc_ctx dctx;
u8 digest[POLY1305_DIGEST_SIZE];
struct bch_csum ret = { 0 };
- gen_poly_key(c, desc, nonce);
+ bch2_poly1305_init(&dctx, c, nonce);
#ifdef CONFIG_HIGHMEM
__bio_for_each_segment(bv, bio, *iter, *iter) {
void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
- crypto_shash_update(desc, p, bv.bv_len);
+ poly1305_update(&dctx, p, bv.bv_len);
kunmap_local(p);
}
#else
__bio_for_each_bvec(bv, bio, *iter, *iter)
- crypto_shash_update(desc,
+ poly1305_update(&dctx,
page_address(bv.bv_page) + bv.bv_offset,
bv.bv_len);
#endif
- crypto_shash_final(desc, digest);
+ poly1305_final(&dctx, digest);
memcpy(&ret, digest, bch_crc_bytes[type]);
return ret;
@@ -338,43 +257,33 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
{
struct bio_vec bv;
struct bvec_iter iter;
- DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
- size_t sgl_len = 0;
+ u32 chacha_state[CHACHA_STATE_WORDS];
int ret = 0;
- if (bch2_fs_inconsistent_on(!c->chacha20,
+ if (bch2_fs_inconsistent_on(!c->chacha20_key_set,
c, "attempting to encrypt without encryption key"))
return -BCH_ERR_no_encryption_key;
- darray_init(&sgl);
+ bch2_chacha20_init(chacha_state, &c->chacha20_key, nonce);
bio_for_each_segment(bv, bio, iter) {
- struct scatterlist sg = {
- .page_link = (unsigned long) bv.bv_page,
- .offset = bv.bv_offset,
- .length = bv.bv_len,
- };
-
- if (darray_push(&sgl, sg)) {
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
- if (ret)
- goto err;
-
- nonce = nonce_add(nonce, sgl_len);
- sgl_len = 0;
- sgl.nr = 0;
-
- BUG_ON(darray_push(&sgl, sg));
+ void *p;
+
+ /*
+ * chacha_crypt() assumes that the length is a multiple of
+ * CHACHA_BLOCK_SIZE on any non-final call.
+ */
+ if (!IS_ALIGNED(bv.bv_len, CHACHA_BLOCK_SIZE)) {
+ bch_err_ratelimited(c, "bio not aligned for encryption");
+ ret = -EIO;
+ break;
}
- sgl_len += sg.length;
+ p = bvec_kmap_local(&bv);
+ chacha20_crypt(chacha_state, p, p, bv.bv_len);
+ kunmap_local(p);
}
-
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
-err:
- darray_exit(&sgl);
+ memzero_explicit(chacha_state, sizeof(chacha_state));
return ret;
}
@@ -650,10 +559,7 @@ int bch2_decrypt_sb_key(struct bch_fs *c,
}
/* decrypt real key: */
- ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
- &sb_key, sizeof(sb_key));
- if (ret)
- goto err;
+ bch2_chacha20(&user_key, bch2_sb_key_nonce(c), &sb_key, sizeof(sb_key));
if (bch2_key_is_encrypted(&sb_key)) {
bch_err(c, "incorrect encryption key");
@@ -668,31 +574,6 @@ err:
return ret;
}
-static int bch2_alloc_ciphers(struct bch_fs *c)
-{
- if (c->chacha20)
- return 0;
-
- struct crypto_sync_skcipher *chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
- int ret = PTR_ERR_OR_ZERO(chacha20);
- if (ret) {
- bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
- return ret;
- }
-
- struct crypto_shash *poly1305 = crypto_alloc_shash("poly1305", 0, 0);
- ret = PTR_ERR_OR_ZERO(poly1305);
- if (ret) {
- bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
- crypto_free_sync_skcipher(chacha20);
- return ret;
- }
-
- c->chacha20 = chacha20;
- c->poly1305 = poly1305;
- return 0;
-}
-
#if 0
/*
@@ -797,35 +678,21 @@ err:
void bch2_fs_encryption_exit(struct bch_fs *c)
{
- if (c->poly1305)
- crypto_free_shash(c->poly1305);
- if (c->chacha20)
- crypto_free_sync_skcipher(c->chacha20);
+ memzero_explicit(&c->chacha20_key, sizeof(c->chacha20_key));
}
int bch2_fs_encryption_init(struct bch_fs *c)
{
struct bch_sb_field_crypt *crypt;
- struct bch_key key;
- int ret = 0;
+ int ret;
crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
if (!crypt)
- goto out;
+ return 0;
- ret = bch2_alloc_ciphers(c);
+ ret = bch2_decrypt_sb_key(c, crypt, &c->chacha20_key);
if (ret)
- goto out;
-
- ret = bch2_decrypt_sb_key(c, crypt, &key);
- if (ret)
- goto out;
-
- ret = crypto_skcipher_setkey(&c->chacha20->base,
- (void *) &key.key, sizeof(key.key));
- if (ret)
- goto out;
-out:
- memzero_explicit(&key, sizeof(key));
- return ret;
+ return ret;
+ c->chacha20_key_set = true;
+ return 0;
}
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 4ac251c8fcd8..1310782d3ae9 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -69,7 +69,6 @@ static inline void bch2_csum_err_msg(struct printbuf *out,
bch2_csum_to_text(out, type, expected);
}
-int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
int bch2_request_key(struct bch_sb *, struct bch_key *);
#ifndef __KERNEL__
int bch2_revoke_key(struct bch_sb *);
@@ -156,7 +155,7 @@ static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
if (type >= BCH_CSUM_NR)
return false;
- if (bch2_csum_type_is_encryption(type) && !c->chacha20)
+ if (bch2_csum_type_is_encryption(type) && !c->chacha20_key_set)
return false;
return true;
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index de02ebf847ec..b211c97238ab 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -607,7 +607,7 @@ void bch2_data_update_inflight_to_text(struct printbuf *out, struct data_update
prt_newline(out);
printbuf_indent_add(out, 2);
bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
- prt_printf(out, "read_done:\t\%u\n", m->read_done);
+ prt_printf(out, "read_done:\t%u\n", m->read_done);
bch2_write_op_to_text(out, &m->op);
printbuf_indent_sub(out, 2);
}
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index bf53a029f356..8488a7578115 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -287,8 +287,8 @@ static void dirent_init_casefolded_name(struct bkey_i_dirent *dirent,
EBUG_ON(!dirent->v.d_casefold);
EBUG_ON(!cf_name->len);
- dirent->v.d_cf_name_block.d_name_len = name->len;
- dirent->v.d_cf_name_block.d_cf_name_len = cf_name->len;
+ dirent->v.d_cf_name_block.d_name_len = cpu_to_le16(name->len);
+ dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_name->len);
memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
memcpy(&dirent->v.d_cf_name_block.d_names[name->len], cf_name->name, cf_name->len);
memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_name->len], 0,
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 19d4599918dc..e3a75dcca60c 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -225,11 +225,26 @@ static void bchfs_read(struct btree_trans *trans,
bch2_read_extent(trans, rbio, iter.pos,
data_btree, k, offset_into_extent, flags);
- swap(rbio->bio.bi_iter.bi_size, bytes);
+ /*
+ * Careful there's a landmine here if bch2_read_extent() ever
+ * starts returning transaction restarts here.
+ *
+ * We've changed rbio->bi_iter.bi_size to be "bytes we can read
+ * from this extent" with the swap call, and we restore it
+ * below. That restore needs to come before checking for
+ * errors.
+ *
+ * But unlike __bch2_read(), we use the rbio bvec iter, not one
+ * on the stack, so we can't do the restore right after the
+ * bch2_read_extent() call: we don't own that iterator anymore
+ * if BCH_READ_last_fragment is set, since we may have submitted
+ * that rbio instead of cloning it.
+ */
if (flags & BCH_READ_last_fragment)
break;
+ swap(rbio->bio.bi_iter.bi_size, bytes);
bio_advance(&rbio->bio, bytes);
err:
if (ret &&
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 417bb0c7bbfa..fd627c8d1053 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -977,7 +977,8 @@ retry_pick:
goto err;
}
- if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) && !c->chacha20) {
+ if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) &&
+ !c->chacha20_key_set) {
struct printbuf buf = PRINTBUF;
bch2_read_err_msg_trans(trans, &buf, orig, read_pos);
prt_printf(&buf, "attempting to read encrypted data without encryption key\n ");
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 1b7961f4f609..2a54ac79189b 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1460,7 +1460,7 @@ fsck_err:
static void journal_advance_devs_to_next_bucket(struct journal *j,
struct dev_alloc_list *devs,
- unsigned sectors, u64 seq)
+ unsigned sectors, __le64 seq)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 79fd18a5a07c..d2b07f602da9 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -389,9 +389,9 @@ int bch2_journal_replay(struct bch_fs *c)
* Now, replay any remaining keys in the order in which they appear in
* the journal, unpinning those journal entries as we go:
*/
- sort(keys_sorted.data, keys_sorted.nr,
- sizeof(keys_sorted.data[0]),
- journal_sort_seq_cmp, NULL);
+ sort_nonatomic(keys_sorted.data, keys_sorted.nr,
+ sizeof(keys_sorted.data[0]),
+ journal_sort_seq_cmp, NULL);
darray_for_each(keys_sorted, kp) {
cond_resched();
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index a58edde43bee..b79e80a435e0 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -70,14 +70,10 @@
#include <linux/percpu.h>
#include <linux/random.h>
#include <linux/sysfs.h>
-#include <crypto/hash.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
MODULE_DESCRIPTION("bcachefs filesystem");
-MODULE_SOFTDEP("pre: chacha20");
-MODULE_SOFTDEP("pre: poly1305");
-MODULE_SOFTDEP("pre: xxhash");
const char * const bch2_fs_flag_strs[] = {
#define x(n) #n,
@@ -1002,12 +998,6 @@ static void print_mount_opts(struct bch_fs *c)
prt_str(&p, "starting version ");
bch2_version_to_text(&p, c->sb.version);
- if (c->opts.read_only) {
- prt_str(&p, " opts=");
- first = false;
- prt_printf(&p, "ro");
- }
-
for (i = 0; i < bch2_opts_nr; i++) {
const struct bch_option *opt = &bch2_opt_table[i];
u64 v = bch2_opt_get_by_id(&c->opts, i);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3dd555db3d32..aa58e0663a5d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3853,7 +3853,6 @@ static int write_dev_supers(struct btrfs_device *device,
atomic_inc(&device->sb_write_errors);
continue;
}
- ASSERT(folio_order(folio) == 0);
offset = offset_in_folio(folio, bytenr);
disk_super = folio_address(folio) + offset;
@@ -3926,7 +3925,6 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
/* If the folio has been removed, then we know it completed. */
if (IS_ERR(folio))
continue;
- ASSERT(folio_order(folio) == 0);
/* Folio will be unlocked once the write completes. */
folio_wait_locked(folio);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a13d81bb56a0..63aeacc54945 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4902,6 +4902,8 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state,
&disk_bytenr, &disk_io_size);
+ if (ret == -EAGAIN)
+ goto out_acct;
if (ret < 0 && ret != -EIOCBQUEUED)
goto out_free;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 40709e2a44fc..7121d8c7a318 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1139,8 +1139,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
subvol_name = btrfs_get_subvol_name_from_objectid(info,
btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
if (!IS_ERR(subvol_name)) {
- seq_puts(seq, ",subvol=");
- seq_escape(seq, subvol_name, " \t\n\\");
+ seq_show_option(seq, "subvol", subvol_name);
kfree(subvol_name);
}
return 0;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 42e4d6eeb29f..9c20d78e41f6 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -89,12 +89,12 @@ enum {
};
static const struct fs_parameter_spec devpts_param_specs[] = {
- fsparam_u32 ("gid", Opt_gid),
+ fsparam_gid ("gid", Opt_gid),
fsparam_s32 ("max", Opt_max),
fsparam_u32oct ("mode", Opt_mode),
fsparam_flag ("newinstance", Opt_newinstance),
fsparam_u32oct ("ptmxmode", Opt_ptmxmode),
- fsparam_u32 ("uid", Opt_uid),
+ fsparam_uid ("uid", Opt_uid),
{}
};
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index 9581e9bf8192..767fb4acdc93 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -56,7 +56,7 @@ struct erofs_super_block {
union {
__le16 rootnid_2b; /* nid of root directory */
__le16 blocks_hi; /* (48BIT on) blocks count MSB */
- } rb;
+ } __packed rb;
__le64 inos; /* total valid ino # (== f_files - f_favail) */
__le64 epoch; /* base seconds used for compact inodes */
__le32 fixed_nsec; /* fixed nanoseconds for compact inodes */
@@ -148,7 +148,7 @@ union erofs_inode_i_nb {
__le16 nlink; /* if EROFS_I_NLINK_1_BIT is unset */
__le16 blocks_hi; /* total blocks count MSB */
__le16 startblk_hi; /* starting block number MSB */
-};
+} __packed;
/* 32-byte reduced form of an ondisk inode */
struct erofs_inode_compact {
@@ -369,9 +369,9 @@ struct z_erofs_map_header {
* bit 7 : pack the whole file into packed inode
*/
__u8 h_clusterbits;
- };
+ } __packed;
__le16 h_extents_hi; /* extent count MSB */
- };
+ } __packed;
};
enum {
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
index bec4b56b3826..4fa0a0121288 100644
--- a/fs/erofs/fileio.c
+++ b/fs/erofs/fileio.c
@@ -32,6 +32,8 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
ret = 0;
}
if (rq->bio.bi_end_io) {
+ if (ret < 0 && !rq->bio.bi_status)
+ rq->bio.bi_status = errno_to_blk_status(ret);
rq->bio.bi_end_io(&rq->bio);
} else {
bio_for_each_folio_all(fi, &rq->bio) {
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 0671184d9cf1..5c061aaeeb45 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -725,7 +725,6 @@ static int z_erofs_register_pcluster(struct z_erofs_frontend *fe)
lockref_init(&pcl->lockref); /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
pcl->pclustersize = map->m_plen;
- pcl->pageofs_in = pageofs_in;
pcl->length = 0;
pcl->partial = true;
pcl->next = fe->head;
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 8de50df05dfe..14ea47f954f5 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -559,7 +559,8 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
pos += sizeof(__le64);
lstart = 0;
} else {
- lstart = map->m_la >> vi->z_lclusterbits;
+ lstart = round_down(map->m_la, 1 << vi->z_lclusterbits);
+ pos += (lstart >> vi->z_lclusterbits) * recsz;
pa = EROFS_NULL_ADDR;
}
@@ -614,7 +615,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
map->m_flags |= EROFS_MAP_MAPPED | EROFS_MAP_FRAGMENT;
vi->z_fragmentoff = map->m_plen;
- if (recsz >= offsetof(struct z_erofs_extent, pstart_lo))
+ if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
vi->z_fragmentoff |= map->m_pa << 32;
} else if (map->m_plen) {
map->m_flags |= EROFS_MAP_MAPPED |
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 87ee3a17bd29..e8c5525afc67 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -351,10 +351,9 @@ int ext4_check_blockref(const char *function, unsigned int line,
{
__le32 *bref = p;
unsigned int blk;
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- if (ext4_has_feature_journal(inode->i_sb) &&
- (inode->i_ino ==
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+ if (journal && inode == journal->j_inode)
return 0;
while (bref < p+max) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1dc09ed5d403..94c7d2d828a6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -386,10 +386,11 @@ static int __check_block_validity(struct inode *inode, const char *func,
unsigned int line,
struct ext4_map_blocks *map)
{
- if (ext4_has_feature_journal(inode->i_sb) &&
- (inode->i_ino ==
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+
+ if (journal && inode == journal->j_inode)
return 0;
+
if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock %llu "
@@ -4724,22 +4725,43 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
inode_set_iversion_queried(inode, val);
}
-static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
-
+static int check_igot_inode(struct inode *inode, ext4_iget_flags flags,
+ const char *function, unsigned int line)
{
+ const char *err_str;
+
if (flags & EXT4_IGET_EA_INODE) {
- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
- return "missing EA_INODE flag";
+ if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ err_str = "missing EA_INODE flag";
+ goto error;
+ }
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
- EXT4_I(inode)->i_file_acl)
- return "ea_inode with extended attributes";
+ EXT4_I(inode)->i_file_acl) {
+ err_str = "ea_inode with extended attributes";
+ goto error;
+ }
} else {
- if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
- return "unexpected EA_INODE flag";
+ if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+ /*
+ * open_by_handle_at() could provide an old inode number
+ * that has since been reused for an ea_inode; this does
+ * not indicate filesystem corruption
+ */
+ if (flags & EXT4_IGET_HANDLE)
+ return -ESTALE;
+ err_str = "unexpected EA_INODE flag";
+ goto error;
+ }
+ }
+ if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
+ err_str = "unexpected bad inode w/o EXT4_IGET_BAD";
+ goto error;
}
- if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
- return "unexpected bad inode w/o EXT4_IGET_BAD";
- return NULL;
+ return 0;
+
+error:
+ ext4_error_inode(inode, function, line, 0, err_str);
+ return -EFSCORRUPTED;
}
struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
@@ -4751,7 +4773,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
struct ext4_inode_info *ei;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
struct inode *inode;
- const char *err_str;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
loff_t size;
@@ -4780,10 +4801,10 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
- if ((err_str = check_igot_inode(inode, flags)) != NULL) {
- ext4_error_inode(inode, function, line, 0, err_str);
+ ret = check_igot_inode(inode, flags, function, line);
+ if (ret) {
iput(inode);
- return ERR_PTR(-EFSCORRUPTED);
+ return ERR_PTR(ret);
}
return inode;
}
@@ -5065,13 +5086,21 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = -EFSCORRUPTED;
goto bad_inode;
}
- if ((err_str = check_igot_inode(inode, flags)) != NULL) {
- ext4_error_inode(inode, function, line, 0, err_str);
- ret = -EFSCORRUPTED;
- goto bad_inode;
+ ret = check_igot_inode(inode, flags, function, line);
+ /*
+ * -ESTALE here means there is nothing inherently wrong with the inode,
+ * it's just not an inode we can return for an fhandle lookup.
+ */
+ if (ret == -ESTALE) {
+ brelse(iloc.bh);
+ unlock_new_inode(inode);
+ iput(inode);
+ return ERR_PTR(-ESTALE);
}
-
+ if (ret)
+ goto bad_inode;
brelse(iloc.bh);
+
unlock_new_inode(inode);
return inode;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 0d523e9fb3d5..f88424c28194 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3037,10 +3037,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
unsigned char blocksize_bits = min_t(unsigned char,
sb->s_blocksize_bits,
EXT4_MAX_BLOCK_LOG_SIZE);
- struct sg {
- struct ext4_group_info info;
- ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
- } sg;
+ DEFINE_RAW_FLEX(struct ext4_group_info, sg, bb_counters,
+ EXT4_MAX_BLOCK_LOG_SIZE + 2);
group--;
if (group == 0)
@@ -3048,7 +3046,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
" 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
" 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
- i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
+ i = (blocksize_bits + 2) * sizeof(sg->bb_counters[0]) +
sizeof(struct ext4_group_info);
grinfo = ext4_get_group_info(sb, group);
@@ -3068,14 +3066,14 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
* We care only about free space counters in the group info and
* these are safe to access even after the buddy has been unloaded
*/
- memcpy(&sg, grinfo, i);
- seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
- sg.info.bb_fragments, sg.info.bb_first_free);
+ memcpy(sg, grinfo, i);
+ seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg->bb_free,
+ sg->bb_fragments, sg->bb_first_free);
for (i = 0; i <= 13; i++)
seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
- sg.info.bb_counters[i] : 0);
+ sg->bb_counters[i] : 0);
seq_puts(seq, " ]");
- if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
+ if (EXT4_MB_GRP_BBITMAP_CORRUPT(sg))
seq_puts(seq, " Block bitmap corrupted!");
seq_putc(seq, '\n');
return 0;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index cb5cb33b1d91..e9712e64ec8f 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1971,7 +1971,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
* split it in half by count; each resulting block will have at least
* half the space free.
*/
- if (i > 0)
+ if (i >= 0)
split = count - move;
else
split = count/2;
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 2c7b24cb67ad..53c2626e90e7 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -1669,6 +1669,9 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
unsigned int virtqueue_size;
int err = -EIO;
+ if (!fsc->source)
+ return invalf(fsc, "No source specified");
+
/* This gets a reference on virtio_fs object. This ptr gets installed
* in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
* to drop the reference to this object.
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 6add6ebfef89..cb823a8a6ba9 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -67,6 +67,12 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
else
key_len = tree->max_key_len + 1;
+ if (key_len > sizeof(hfs_btree_key) || key_len < 1) {
+ memset(key, 0, sizeof(hfs_btree_key));
+ pr_err("hfs: Invalid key length: %d\n", key_len);
+ return;
+ }
+
hfs_bnode_read(node, key, off, key_len);
}
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 87974d5e6791..079ea80534f7 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -67,6 +67,12 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
else
key_len = tree->max_key_len + 2;
+ if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
+ memset(key, 0, sizeof(hfsplus_btree_key));
+ pr_err("hfsplus: Invalid key length: %d\n", key_len);
+ return;
+ }
+
hfs_bnode_read(node, key, off, key_len);
}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 31553372b33a..5b08bd417b28 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -259,7 +259,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
}
/* truncate len if we find any trailing uptodate block(s) */
- for ( ; i <= last; i++) {
+ while (++i <= last) {
if (ifs_block_is_uptodate(ifs, i)) {
plen -= (last - i + 1) * block_size;
last = i - 1;
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 35768a63fb1d..421d247fae52 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -180,7 +180,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
return NULL;
return isofs_export_iget(sb,
- fh_len > 2 ? ifid->parent_block : 0,
+ fh_len > 3 ? ifid->parent_block : 0,
ifid->parent_offset,
fh_len > 4 ? ifid->parent_generation : 0);
}
diff --git a/fs/namei.c b/fs/namei.c
index 360a86ca1f02..8510ff53f12e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -125,9 +125,9 @@
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
-static inline void initname(struct filename *name)
+static inline void initname(struct filename *name, const char __user *uptr)
{
- name->uptr = NULL;
+ name->uptr = uptr;
name->aname = NULL;
atomic_set(&name->refcnt, 1);
}
@@ -210,7 +210,7 @@ getname_flags(const char __user *filename, int flags)
return ERR_PTR(-ENAMETOOLONG);
}
}
- initname(result);
+ initname(result, filename);
audit_getname(result);
return result;
}
@@ -268,7 +268,7 @@ struct filename *getname_kernel(const char * filename)
return ERR_PTR(-ENAMETOOLONG);
}
memcpy((char *)result->name, filename, len);
- initname(result);
+ initname(result, NULL);
audit_getname(result);
return result;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 14935a0500a2..d9ca80dcc544 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1830,6 +1830,8 @@ static inline void namespace_lock(void)
down_write(&namespace_sem);
}
+DEFINE_GUARD(namespace_lock, struct rw_semaphore *, namespace_lock(), namespace_unlock())
+
enum umount_tree_flags {
UMOUNT_SYNC = 1,
UMOUNT_PROPAGATE = 2,
@@ -2383,7 +2385,7 @@ void dissolve_on_fput(struct vfsmount *mnt)
return;
}
- scoped_guard(rwsem_write, &namespace_sem) {
+ scoped_guard(namespace_lock, &namespace_sem) {
ns = m->mnt_ns;
if (!must_dissolve(ns))
return;
@@ -5189,8 +5191,8 @@ static void finish_mount_kattr(struct mount_kattr *kattr)
mnt_idmap_put(kattr->mnt_idmap);
}
-static int copy_mount_setattr(struct mount_attr __user *uattr, size_t usize,
- struct mount_kattr *kattr)
+static int wants_mount_setattr(struct mount_attr __user *uattr, size_t usize,
+ struct mount_kattr *kattr)
{
int ret;
struct mount_attr attr;
@@ -5213,9 +5215,13 @@ static int copy_mount_setattr(struct mount_attr __user *uattr, size_t usize,
if (attr.attr_set == 0 &&
attr.attr_clr == 0 &&
attr.propagation == 0)
- return 0;
+ return 0; /* Tell caller to not bother. */
+
+ ret = build_mount_kattr(&attr, usize, kattr);
+ if (ret < 0)
+ return ret;
- return build_mount_kattr(&attr, usize, kattr);
+ return 1;
}
SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
@@ -5247,8 +5253,8 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
if (flags & AT_RECURSIVE)
kattr.kflags |= MOUNT_KATTR_RECURSE;
- err = copy_mount_setattr(uattr, usize, &kattr);
- if (err)
+ err = wants_mount_setattr(uattr, usize, &kattr);
+ if (err <= 0)
return err;
err = user_path_at(dfd, path, kattr.lookup_flags, &target);
@@ -5282,15 +5288,17 @@ SYSCALL_DEFINE5(open_tree_attr, int, dfd, const char __user *, filename,
if (flags & AT_RECURSIVE)
kattr.kflags |= MOUNT_KATTR_RECURSE;
- ret = copy_mount_setattr(uattr, usize, &kattr);
- if (ret)
+ ret = wants_mount_setattr(uattr, usize, &kattr);
+ if (ret < 0)
return ret;
- ret = do_mount_setattr(&file->f_path, &kattr);
- if (ret)
- return ret;
+ if (ret) {
+ ret = do_mount_setattr(&file->f_path, &kattr);
+ if (ret)
+ return ret;
- finish_mount_kattr(&kattr);
+ finish_mount_kattr(&kattr);
+ }
}
fd = get_unused_fd_flags(flags & O_CLOEXEC);
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
index 4e3e62040831..70ecc8f5f210 100644
--- a/fs/netfs/main.c
+++ b/fs/netfs/main.c
@@ -127,11 +127,13 @@ static int __init netfs_init(void)
if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0)
goto error_subreqpool;
+#ifdef CONFIG_PROC_FS
if (!proc_mkdir("fs/netfs", NULL))
goto error_proc;
if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
&netfs_requests_seq_ops))
goto error_procfile;
+#endif
#ifdef CONFIG_FSCACHE_STATS
if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
netfs_stats_show))
@@ -144,9 +146,11 @@ static int __init netfs_init(void)
return 0;
error_fscache:
+#ifdef CONFIG_PROC_FS
error_procfile:
remove_proc_subtree("fs/netfs", NULL);
error_proc:
+#endif
mempool_exit(&netfs_subrequest_pool);
error_subreqpool:
kmem_cache_destroy(netfs_subrequest_slab);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 6f2f8f4cfbbc..aef942a758ce 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -541,8 +541,6 @@ int ovl_set_metacopy_xattr(struct ovl_fs *ofs, struct dentry *d,
bool ovl_is_metacopy_dentry(struct dentry *dentry);
char *ovl_get_redirect_xattr(struct ovl_fs *ofs, const struct path *path, int padding);
int ovl_ensure_verity_loaded(struct path *path);
-int ovl_get_verity_xattr(struct ovl_fs *ofs, const struct path *path,
- u8 *digest_buf, int *buf_length);
int ovl_validate_verity(struct ovl_fs *ofs,
struct path *metapath,
struct path *datapath);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index b63474d1b064..e19940d649ca 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1138,6 +1138,11 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
return ERR_PTR(-EINVAL);
}
+ if (ctx->nr == ctx->nr_data) {
+ pr_err("at least one non-data lowerdir is required\n");
+ return ERR_PTR(-EINVAL);
+ }
+
err = -EINVAL;
for (i = 0; i < ctx->nr; i++) {
l = &ctx->lower[i];
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index e69968e88fe7..35892df7335c 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -704,18 +704,12 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
cifs_free_hash(&server->secmech.md5);
cifs_free_hash(&server->secmech.sha512);
- if (!SERVER_IS_CHAN(server)) {
- if (server->secmech.enc) {
- crypto_free_aead(server->secmech.enc);
- server->secmech.enc = NULL;
- }
-
- if (server->secmech.dec) {
- crypto_free_aead(server->secmech.dec);
- server->secmech.dec = NULL;
- }
- } else {
+ if (server->secmech.enc) {
+ crypto_free_aead(server->secmech.enc);
server->secmech.enc = NULL;
+ }
+ if (server->secmech.dec) {
+ crypto_free_aead(server->secmech.dec);
server->secmech.dec = NULL;
}
}
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 07c4688ec4c9..3b32116b0b49 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -625,10 +625,8 @@ struct smb_version_operations {
bool (*is_status_io_timeout)(char *buf);
/* Check for STATUS_NETWORK_NAME_DELETED */
bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
- int (*parse_reparse_point)(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data);
+ struct reparse_data_buffer * (*get_reparse_point_buffer)(const struct kvec *rsp_iov,
+ u32 *plen);
int (*create_reparse_symlink)(const unsigned int xid,
struct inode *inode,
struct dentry *dentry,
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index 48d0d6f439cf..18d67ab113f0 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -2256,6 +2256,8 @@ typedef struct {
#define FILE_SUPPORTS_ENCRYPTION 0x00020000
#define FILE_SUPPORTS_OBJECT_IDS 0x00010000
#define FILE_VOLUME_IS_COMPRESSED 0x00008000
+#define FILE_SUPPORTS_POSIX_UNLINK_RENAME 0x00000400
+#define FILE_RETURNS_CLEANUP_RESULT_INFO 0x00000200
#define FILE_SUPPORTS_REMOTE_STORAGE 0x00000100
#define FILE_SUPPORTS_REPARSE_POINTS 0x00000080
#define FILE_SUPPORTS_SPARSE_FILES 0x00000040
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index f298e86a3c1f..4a0b2d220fe8 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -2556,6 +2556,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
return 0;
if (tcon->nodelete != ctx->nodelete)
return 0;
+ if (tcon->posix_extensions != ctx->linux_ext)
+ return 0;
return 1;
}
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index a00a9d91d0da..75be4b46bc6f 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -1203,18 +1203,17 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
goto out;
}
break;
- case IO_REPARSE_TAG_MOUNT_POINT:
- cifs_create_junction_fattr(fattr, sb);
- rc = 0;
- goto out;
default:
/* Check for cached reparse point data */
if (data->symlink_target || data->reparse.buf) {
rc = 0;
- } else if (iov && server->ops->parse_reparse_point) {
- rc = server->ops->parse_reparse_point(cifs_sb,
- full_path,
- iov, data);
+ } else if (iov && server->ops->get_reparse_point_buffer) {
+ struct reparse_data_buffer *reparse_buf;
+ u32 reparse_len;
+
+ reparse_buf = server->ops->get_reparse_point_buffer(iov, &reparse_len);
+ rc = parse_reparse_point(reparse_buf, reparse_len,
+ cifs_sb, full_path, data);
/*
* If the reparse point was not handled but it is the
* name surrogate which points to directory, then treat
@@ -1228,6 +1227,16 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
cifs_create_junction_fattr(fattr, sb);
goto out;
}
+ /*
+ * If the reparse point is unsupported by the Linux SMB
+ * client then let it process by the SMB server. So mask
+ * the -EOPNOTSUPP error code. This will allow Linux SMB
+ * client to send SMB OPEN request to server. If server
+ * does not support this reparse point too then server
+ * will return error during open the path.
+ */
+ if (rc == -EOPNOTSUPP)
+ rc = 0;
}
if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK && !rc) {
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 2b9e9885dc42..bb25e77c5540 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -542,12 +542,12 @@ static int wsl_set_reparse_buf(struct reparse_data_buffer **buf,
kfree(symname_utf16);
return -ENOMEM;
}
- /* Flag 0x02000000 is unknown, but all wsl symlinks have this value */
- symlink_buf->Flags = cpu_to_le32(0x02000000);
- /* PathBuffer is in UTF-8 but without trailing null-term byte */
+ /* Version field must be set to 2 (MS-FSCC 2.1.2.7) */
+ symlink_buf->Version = cpu_to_le32(2);
+ /* Target for Version 2 is in UTF-8 but without trailing null-term byte */
symname_utf8_len = utf16s_to_utf8s((wchar_t *)symname_utf16, symname_utf16_len/2,
UTF16_LITTLE_ENDIAN,
- symlink_buf->PathBuffer,
+ symlink_buf->Target,
symname_utf8_maxlen);
*buf = (struct reparse_data_buffer *)symlink_buf;
buf_len = sizeof(struct reparse_wsl_symlink_data_buffer) + symname_utf8_len;
@@ -1016,29 +1016,36 @@ static int parse_reparse_wsl_symlink(struct reparse_wsl_symlink_data_buffer *buf
struct cifs_open_info_data *data)
{
int len = le16_to_cpu(buf->ReparseDataLength);
+ int data_offset = offsetof(typeof(*buf), Target) - offsetof(typeof(*buf), Version);
int symname_utf8_len;
__le16 *symname_utf16;
int symname_utf16_len;
- if (len <= sizeof(buf->Flags)) {
+ if (len <= data_offset) {
cifs_dbg(VFS, "srv returned malformed wsl symlink buffer\n");
return -EIO;
}
- /* PathBuffer is in UTF-8 but without trailing null-term byte */
- symname_utf8_len = len - sizeof(buf->Flags);
+ /* MS-FSCC 2.1.2.7 defines layout of the Target field only for Version 2. */
+ if (le32_to_cpu(buf->Version) != 2) {
+ cifs_dbg(VFS, "srv returned unsupported wsl symlink version %u\n", le32_to_cpu(buf->Version));
+ return -EIO;
+ }
+
+ /* Target for Version 2 is in UTF-8 but without trailing null-term byte */
+ symname_utf8_len = len - data_offset;
/*
* Check that buffer does not contain null byte
* because Linux cannot process symlink with null byte.
*/
- if (strnlen(buf->PathBuffer, symname_utf8_len) != symname_utf8_len) {
+ if (strnlen(buf->Target, symname_utf8_len) != symname_utf8_len) {
cifs_dbg(VFS, "srv returned null byte in wsl symlink target location\n");
return -EIO;
}
symname_utf16 = kzalloc(symname_utf8_len * 2, GFP_KERNEL);
if (!symname_utf16)
return -ENOMEM;
- symname_utf16_len = utf8s_to_utf16s(buf->PathBuffer, symname_utf8_len,
+ symname_utf16_len = utf8s_to_utf16s(buf->Target, symname_utf8_len,
UTF16_LITTLE_ENDIAN,
(wchar_t *) symname_utf16, symname_utf8_len * 2);
if (symname_utf16_len < 0) {
@@ -1062,8 +1069,6 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
const char *full_path,
struct cifs_open_info_data *data)
{
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
-
data->reparse.buf = buf;
/* See MS-FSCC 2.1.2 */
@@ -1090,24 +1095,17 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
}
return 0;
default:
- cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n",
- le32_to_cpu(buf->ReparseTag));
return -EOPNOTSUPP;
}
}
-int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data)
+struct reparse_data_buffer *smb2_get_reparse_point_buffer(const struct kvec *rsp_iov,
+ u32 *plen)
{
- struct reparse_data_buffer *buf;
struct smb2_ioctl_rsp *io = rsp_iov->iov_base;
- u32 plen = le32_to_cpu(io->OutputCount);
-
- buf = (struct reparse_data_buffer *)((u8 *)io +
- le32_to_cpu(io->OutputOffset));
- return parse_reparse_point(buf, plen, cifs_sb, full_path, data);
+ *plen = le32_to_cpu(io->OutputCount);
+ return (struct reparse_data_buffer *)((u8 *)io +
+ le32_to_cpu(io->OutputOffset));
}
static bool wsl_to_fattr(struct cifs_open_info_data *data,
@@ -1233,16 +1231,6 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
bool ok;
switch (tag) {
- case IO_REPARSE_TAG_INTERNAL:
- if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
- return false;
- fallthrough;
- case IO_REPARSE_TAG_DFS:
- case IO_REPARSE_TAG_DFSR:
- case IO_REPARSE_TAG_MOUNT_POINT:
- /* See cifs_create_junction_fattr() */
- fattr->cf_mode = S_IFDIR | 0711;
- break;
case IO_REPARSE_TAG_LX_SYMLINK:
case IO_REPARSE_TAG_LX_FIFO:
case IO_REPARSE_TAG_AF_UNIX:
@@ -1262,7 +1250,14 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
fattr->cf_mode |= S_IFLNK;
break;
default:
- return false;
+ if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
+ return false;
+ if (!IS_REPARSE_TAG_NAME_SURROGATE(tag) &&
+ tag != IO_REPARSE_TAG_INTERNAL)
+ return false;
+ /* See cifs_create_junction_fattr() */
+ fattr->cf_mode = S_IFDIR | 0711;
+ break;
}
fattr->cf_dtype = S_DT(fattr->cf_mode);
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index c0be5ab45a78..08de853b36a8 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -135,9 +135,6 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev);
-int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data);
+struct reparse_data_buffer *smb2_get_reparse_point_buffer(const struct kvec *rsp_iov, u32 *len);
#endif /* _CIFS_REPARSE_H */
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index f2ca5963cd9d..b3fa9ee26912 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -680,6 +680,22 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
*pbcc_area = bcc_ptr;
}
+static void
+ascii_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
+{
+ char *bcc_ptr = *pbcc_area;
+
+ strcpy(bcc_ptr, "Linux version ");
+ bcc_ptr += strlen("Linux version ");
+ strcpy(bcc_ptr, init_utsname()->release);
+ bcc_ptr += strlen(init_utsname()->release) + 1;
+
+ strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+ bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+
+ *pbcc_area = bcc_ptr;
+}
+
static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
@@ -704,6 +720,25 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
*pbcc_area = bcc_ptr;
}
+static void ascii_domain_string(char **pbcc_area, struct cifs_ses *ses,
+ const struct nls_table *nls_cp)
+{
+ char *bcc_ptr = *pbcc_area;
+ int len;
+
+ /* copy domain */
+ if (ses->domainName != NULL) {
+ len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_DOMAINNAME_LEN - 1;
+ bcc_ptr += len;
+ } /* else we send a null domain name so server will default to its own domain */
+ *bcc_ptr = 0;
+ bcc_ptr++;
+
+ *pbcc_area = bcc_ptr;
+}
+
static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
@@ -749,25 +784,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
- /* copy domain */
- if (ses->domainName != NULL) {
- len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
- if (WARN_ON_ONCE(len < 0))
- len = CIFS_MAX_DOMAINNAME_LEN - 1;
- bcc_ptr += len;
- } /* else we send a null domain name so server will default to its own domain */
- *bcc_ptr = 0;
- bcc_ptr++;
-
/* BB check for overflow here */
- strcpy(bcc_ptr, "Linux version ");
- bcc_ptr += strlen("Linux version ");
- strcpy(bcc_ptr, init_utsname()->release);
- bcc_ptr += strlen(init_utsname()->release) + 1;
-
- strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
- bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+ ascii_domain_string(&bcc_ptr, ses, nls_cp);
+ ascii_oslm_strings(&bcc_ptr, nls_cp);
*pbcc_area = bcc_ptr;
}
@@ -1570,7 +1590,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
sess_data->iov[1].iov_len = msg->secblob_len;
pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len);
- if (ses->capabilities & CAP_UNICODE) {
+ if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) {
/* unicode strings must be word aligned */
if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
*bcc_ptr = 0;
@@ -1579,8 +1599,8 @@ sess_auth_kerberos(struct sess_data *sess_data)
unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
} else {
- /* BB: is this right? */
- ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+ ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ ascii_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
}
sess_data->iov[2].iov_len = (long) bcc_ptr -
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index 26df807fbe7a..0adeec652dc1 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -568,6 +568,42 @@ static int cifs_query_path_info(const unsigned int xid,
data->reparse_point = le32_to_cpu(fi.Attributes) & ATTR_REPARSE;
}
+#ifdef CONFIG_CIFS_XATTR
+ /*
+ * For WSL CHR and BLK reparse points it is required to fetch
+ * EA $LXDEV which contains major and minor device numbers.
+ */
+ if (!rc && data->reparse_point) {
+ struct smb2_file_full_ea_info *ea;
+
+ ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
+ rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV,
+ &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
+ SMB2_WSL_XATTR_DEV_SIZE, cifs_sb);
+ if (rc == SMB2_WSL_XATTR_DEV_SIZE) {
+ ea->next_entry_offset = cpu_to_le32(0);
+ ea->flags = 0;
+ ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
+ ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE);
+ memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1);
+ data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
+ SMB2_WSL_XATTR_DEV_SIZE;
+ rc = 0;
+ } else if (rc >= 0) {
+ /* It is an error if EA $LXDEV has wrong size. */
+ rc = -EINVAL;
+ } else {
+ /*
+ * In all other cases ignore error if fetching
+ * of EA $LXDEV failed. It is needed only for
+ * WSL CHR and BLK reparse points and wsl_to_fattr()
+ * handle the case when EA is missing.
+ */
+ rc = 0;
+ }
+ }
+#endif
+
return rc;
}
@@ -970,18 +1006,13 @@ static int cifs_query_symlink(const unsigned int xid,
return rc;
}
-static int cifs_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data)
+static struct reparse_data_buffer *cifs_get_reparse_point_buffer(const struct kvec *rsp_iov,
+ u32 *plen)
{
- struct reparse_data_buffer *buf;
TRANSACT_IOCTL_RSP *io = rsp_iov->iov_base;
- u32 plen = le16_to_cpu(io->ByteCount);
-
- buf = (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
- le32_to_cpu(io->DataOffset));
- return parse_reparse_point(buf, plen, cifs_sb, full_path, data);
+ *plen = le16_to_cpu(io->ByteCount);
+ return (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
+ le32_to_cpu(io->DataOffset));
}
static bool
@@ -1157,7 +1188,7 @@ struct smb_version_operations smb1_operations = {
.rename = CIFSSMBRename,
.create_hardlink = CIFSCreateHardLink,
.query_symlink = cifs_query_symlink,
- .parse_reparse_point = cifs_parse_reparse_point,
+ .get_reparse_point_buffer = cifs_get_reparse_point_buffer,
.open = cifs_open_file,
.set_fid = cifs_set_fid,
.close = cifs_close_file,
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 41d8cd20b25f..2fe8eeb98535 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4555,9 +4555,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
return rc;
}
} else {
- if (unlikely(!server->secmech.dec))
- return -EIO;
-
+ rc = smb3_crypto_aead_allocate(server);
+ if (unlikely(rc))
+ return rc;
tfm = server->secmech.dec;
}
@@ -5303,7 +5303,7 @@ struct smb_version_operations smb20_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5406,7 +5406,7 @@ struct smb_version_operations smb21_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5513,7 +5513,7 @@ struct smb_version_operations smb30_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5629,7 +5629,7 @@ struct smb_version_operations smb311_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 81e05db8e4d5..c4d52bebd37d 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -1252,15 +1252,8 @@ SMB2_negotiate(const unsigned int xid,
cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
}
- if (server->cipher_type && !rc) {
- if (!SERVER_IS_CHAN(server)) {
- rc = smb3_crypto_aead_allocate(server);
- } else {
- /* For channels, just reuse the primary server crypto secmech. */
- server->secmech.enc = server->primary_server->secmech.enc;
- server->secmech.dec = server->primary_server->secmech.dec;
- }
- }
+ if (server->cipher_type && !rc)
+ rc = smb3_crypto_aead_allocate(server);
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 764dca80c15c..f79a5165a7cc 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -1567,13 +1567,13 @@ struct reparse_nfs_data_buffer {
__u8 DataBuffer[];
} __packed;
-/* For IO_REPARSE_TAG_LX_SYMLINK */
+/* For IO_REPARSE_TAG_LX_SYMLINK - see MS-FSCC 2.1.2.7 */
struct reparse_wsl_symlink_data_buffer {
__le32 ReparseTag;
__le16 ReparseDataLength;
__u16 Reserved;
- __le32 Flags;
- __u8 PathBuffer[]; /* Variable Length UTF-8 string without nul-term */
+ __le32 Version; /* Always 2 */
+ __u8 Target[]; /* Variable Length UTF-8 string without nul-term */
} __packed;
struct validate_negotiate_info_req {
diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
index a3d8a905b07e..d742ba754348 100644
--- a/fs/smb/server/smb_common.h
+++ b/fs/smb/server/smb_common.h
@@ -72,6 +72,8 @@
#define FILE_SUPPORTS_ENCRYPTION 0x00020000
#define FILE_SUPPORTS_OBJECT_IDS 0x00010000
#define FILE_VOLUME_IS_COMPRESSED 0x00008000
+#define FILE_SUPPORTS_POSIX_UNLINK_RENAME 0x00000400
+#define FILE_RETURNS_CLEANUP_RESULT_INFO 0x00000200
#define FILE_SUPPORTS_REMOTE_STORAGE 0x00000100
#define FILE_SUPPORTS_REPARSE_POINTS 0x00000080
#define FILE_SUPPORTS_SPARSE_FILES 0x00000040
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 8e7f1b324b3b..1a2b3f06fa71 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -105,6 +105,7 @@ xfs_buf_free(
{
unsigned int size = BBTOB(bp->b_length);
+ might_sleep();
trace_xfs_buf_free(bp, _RET_IP_);
ASSERT(list_empty(&bp->b_lru));
diff --git a/fs/xfs/xfs_buf_mem.c b/fs/xfs/xfs_buf_mem.c
index b4ffd80b7cb6..dcbfa274e06d 100644
--- a/fs/xfs/xfs_buf_mem.c
+++ b/fs/xfs/xfs_buf_mem.c
@@ -165,7 +165,7 @@ xmbuf_map_backing_mem(
folio_set_dirty(folio);
folio_unlock(folio);
- bp->b_addr = folio_address(folio);
+ bp->b_addr = folio_address(folio) + offset_in_folio(folio, pos);
return 0;
}
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index edbc521870a1..b4e32f0860b7 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -1186,9 +1186,8 @@ xfs_qm_dqflush_done(
if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
(lip->li_lsn == qlip->qli_flush_lsn ||
test_bit(XFS_LI_FAILED, &lip->li_flags))) {
-
spin_lock(&ailp->ail_lock);
- xfs_clear_li_failed(lip);
+ clear_bit(XFS_LI_FAILED, &lip->li_flags);
if (lip->li_lsn == qlip->qli_flush_lsn) {
/* xfs_ail_update_finish() drops the AIL lock */
tail_lsn = xfs_ail_delete_one(ailp, lip);
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index a4bc1642fe56..414b27a86458 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -876,6 +876,7 @@ xfs_getfsmap_rtdev_rmapbt(
const struct xfs_fsmap *keys,
struct xfs_getfsmap_info *info)
{
+ struct xfs_fsmap key0 = *keys; /* struct copy */
struct xfs_mount *mp = tp->t_mountp;
struct xfs_rtgroup *rtg = NULL;
struct xfs_btree_cur *bt_cur = NULL;
@@ -887,32 +888,46 @@ xfs_getfsmap_rtdev_rmapbt(
int error = 0;
eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart + mp->m_sb.sb_rblocks);
- if (keys[0].fmr_physical >= eofs)
+ if (key0.fmr_physical >= eofs)
return 0;
+ /*
+ * On zoned filesystems with an internal rt volume, the volume comes
+ * immediately after the end of the data volume. However, the
+ * xfs_rtblock_t address space is relative to the start of the data
+ * device, which means that the first @rtstart fsblocks do not actually
+ * point anywhere. If a fsmap query comes in with the low key starting
+ * below @rtstart, report it as "owned by filesystem".
+ */
rtstart_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart);
- if (keys[0].fmr_physical < rtstart_daddr) {
+ if (xfs_has_zoned(mp) && key0.fmr_physical < rtstart_daddr) {
struct xfs_fsmap_irec frec = {
.owner = XFS_RMAP_OWN_FS,
.len_daddr = rtstart_daddr,
};
- /* Adjust the low key if we are continuing from where we left off. */
- if (keys[0].fmr_length > 0) {
- info->low_daddr = keys[0].fmr_physical + keys[0].fmr_length;
- return 0;
+ /*
+ * Adjust the start of the query range if we're picking up from
+ * a previous round, and only emit the record if we haven't
+ * already gone past.
+ */
+ key0.fmr_physical += key0.fmr_length;
+ if (key0.fmr_physical < rtstart_daddr) {
+ error = xfs_getfsmap_helper(tp, info, &frec);
+ if (error)
+ return error;
+
+ key0.fmr_physical = rtstart_daddr;
}
- /* Fabricate an rmap entry for space occupied by the data dev */
- error = xfs_getfsmap_helper(tp, info, &frec);
- if (error)
- return error;
+ /* Zero the other fields to avoid further adjustments. */
+ key0.fmr_owner = 0;
+ key0.fmr_offset = 0;
+ key0.fmr_length = 0;
}
- start_rtb = xfs_daddr_to_rtb(mp, rtstart_daddr + keys[0].fmr_physical);
- end_rtb = xfs_daddr_to_rtb(mp, rtstart_daddr +
- min(eofs - 1, keys[1].fmr_physical));
-
+ start_rtb = xfs_daddr_to_rtb(mp, key0.fmr_physical);
+ end_rtb = xfs_daddr_to_rtb(mp, min(eofs - 1, keys[1].fmr_physical));
info->missing_owner = XFS_FMR_OWN_FREE;
/*
@@ -920,12 +935,12 @@ xfs_getfsmap_rtdev_rmapbt(
* low to the fsmap low key and max out the high key to the end
* of the rtgroup.
*/
- info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
- error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]);
+ info->low.rm_offset = XFS_BB_TO_FSBT(mp, key0.fmr_offset);
+ error = xfs_fsmap_owner_to_rmap(&info->low, &key0);
if (error)
return error;
- info->low.rm_blockcount = XFS_BB_TO_FSBT(mp, keys[0].fmr_length);
- xfs_getfsmap_set_irec_flags(&info->low, &keys[0]);
+ info->low.rm_blockcount = XFS_BB_TO_FSBT(mp, key0.fmr_length);
+ xfs_getfsmap_set_irec_flags(&info->low, &key0);
/* Adjust the low key if we are continuing from where we left off. */
if (info->low.rm_blockcount == 0) {
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 40fc1bf900af..c6cb0b6b9e46 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -1089,13 +1089,7 @@ xfs_iflush_abort(
* state. Whilst the inode is in the AIL, it should have a valid buffer
* pointer for push operations to access - it is only safe to remove the
* inode from the buffer once it has been removed from the AIL.
- *
- * We also clear the failed bit before removing the item from the AIL
- * as xfs_trans_ail_delete()->xfs_clear_li_failed() will release buffer
- * references the inode item owns and needs to hold until we've fully
- * aborted the inode log item and detached it from the buffer.
*/
- clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
xfs_trans_ail_delete(&iip->ili_item, 0);
/*
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 6493bdb57351..980aabc49512 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2888,7 +2888,7 @@ xlog_force_and_check_iclog(
*
* 1. the current iclog is active and has no data; the previous iclog
* is in the active or dirty state.
- * 2. the current iclog is drity, and the previous iclog is in the
+ * 2. the current iclog is dirty, and the previous iclog is in the
* active or dirty state.
*
* We may sleep if:
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 799b84220ebb..e5192c12e7ac 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -229,6 +229,7 @@ typedef struct xfs_mount {
bool m_finobt_nores; /* no per-AG finobt resv. */
bool m_update_sb; /* sb needs update in mount */
unsigned int m_max_open_zones;
+ unsigned int m_zonegc_low_space;
/*
* Bitsets of per-fs metadata that have been checked and/or are sick.
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index b7e82d85f043..7a5c5ef2db92 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -718,8 +718,40 @@ max_open_zones_show(
}
XFS_SYSFS_ATTR_RO(max_open_zones);
+static ssize_t
+zonegc_low_space_store(
+ struct kobject *kobj,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned int val;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > 100)
+ return -EINVAL;
+
+ zoned_to_mp(kobj)->m_zonegc_low_space = val;
+
+ return count;
+}
+
+static ssize_t
+zonegc_low_space_show(
+ struct kobject *kobj,
+ char *buf)
+{
+ return sysfs_emit(buf, "%u\n",
+ zoned_to_mp(kobj)->m_zonegc_low_space);
+}
+XFS_SYSFS_ATTR_RW(zonegc_low_space);
+
static struct attribute *xfs_zoned_attrs[] = {
ATTR_LIST(max_open_zones),
+ ATTR_LIST(zonegc_low_space),
NULL,
};
ATTRIBUTE_GROUPS(xfs_zoned);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 0fcb1828e598..85a649fec6ac 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -909,10 +909,9 @@ xfs_trans_ail_delete(
return;
}
- /* xfs_ail_update_finish() drops the AIL lock */
- xfs_clear_li_failed(lip);
+ clear_bit(XFS_LI_FAILED, &lip->li_flags);
tail_lsn = xfs_ail_delete_one(ailp, lip);
- xfs_ail_update_finish(ailp, tail_lsn);
+ xfs_ail_update_finish(ailp, tail_lsn); /* drops the AIL lock */
}
int
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index bd841df93021..f945f0450b16 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -167,32 +167,4 @@ xfs_trans_ail_copy_lsn(
}
#endif
-static inline void
-xfs_clear_li_failed(
- struct xfs_log_item *lip)
-{
- struct xfs_buf *bp = lip->li_buf;
-
- ASSERT(test_bit(XFS_LI_IN_AIL, &lip->li_flags));
- lockdep_assert_held(&lip->li_ailp->ail_lock);
-
- if (test_and_clear_bit(XFS_LI_FAILED, &lip->li_flags)) {
- lip->li_buf = NULL;
- xfs_buf_rele(bp);
- }
-}
-
-static inline void
-xfs_set_li_failed(
- struct xfs_log_item *lip,
- struct xfs_buf *bp)
-{
- lockdep_assert_held(&lip->li_ailp->ail_lock);
-
- if (!test_and_set_bit(XFS_LI_FAILED, &lip->li_flags)) {
- xfs_buf_hold(bp);
- lip->li_buf = bp;
- }
-}
-
#endif /* __XFS_TRANS_PRIV_H__ */
diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c
index 52af234936a2..d509e49b2aaa 100644
--- a/fs/xfs/xfs_zone_alloc.c
+++ b/fs/xfs/xfs_zone_alloc.c
@@ -1201,6 +1201,13 @@ xfs_mount_zones(
xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
iz.available + iz.reclaimable);
+ /*
+ * The user may configure GC to free up a percentage of unused blocks.
+ * By default this is 0. GC will always trigger at the minimum level
+ * for keeping max_open_zones available for data placement.
+ */
+ mp->m_zonegc_low_space = 0;
+
error = xfs_zone_gc_mount(mp);
if (error)
goto out_free_zone_info;
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
index c5136ea9bb1d..8c541ca71872 100644
--- a/fs/xfs/xfs_zone_gc.c
+++ b/fs/xfs/xfs_zone_gc.c
@@ -162,18 +162,30 @@ struct xfs_zone_gc_data {
/*
* We aim to keep enough zones free in stock to fully use the open zone limit
- * for data placement purposes.
+ * for data placement purposes. Additionally, the m_zonegc_low_space tunable
+ * can be set to make sure a fraction of the unused blocks are available for
+ * writing.
*/
bool
xfs_zoned_need_gc(
struct xfs_mount *mp)
{
+ s64 available, free;
+
if (!xfs_group_marked(mp, XG_TYPE_RTG, XFS_RTG_RECLAIMABLE))
return false;
- if (xfs_estimate_freecounter(mp, XC_FREE_RTAVAILABLE) <
+
+ available = xfs_estimate_freecounter(mp, XC_FREE_RTAVAILABLE);
+
+ if (available <
mp->m_groups[XG_TYPE_RTG].blocks *
(mp->m_max_open_zones - XFS_OPEN_GC_ZONES))
return true;
+
+ free = xfs_estimate_freecounter(mp, XC_FREE_RTEXTENTS);
+ if (available < mult_frac(free, mp->m_zonegc_low_space, 100))
+ return true;
+
return false;
}
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 2aa83ee0ec98..a67988316d06 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -10,6 +10,7 @@
#include <linux/atomic.h>
#include <linux/crypto.h>
+#include <linux/slab.h>
#include <linux/string.h>
/* Set this bit for virtual address instead of SG list. */
@@ -581,7 +582,10 @@ static inline struct ahash_request *ahash_request_alloc_noprof(
* ahash_request_free() - zeroize and free the request data structure
* @req: request data structure cipher handle to be freed
*/
-void ahash_request_free(struct ahash_request *req);
+static inline void ahash_request_free(struct ahash_request *req)
+{
+ kfree_sensitive(req);
+}
static inline struct ahash_request *ahash_request_cast(
struct crypto_async_request *req)
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 485e22cf517e..052ac7924af3 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -249,7 +249,7 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
static inline bool ahash_request_chained(struct ahash_request *req)
{
- return crypto_request_chained(&req->base);
+ return false;
}
static inline bool ahash_request_isvirt(struct ahash_request *req)
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index 11d59ce0bac0..1c62d1d4458c 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -118,6 +118,9 @@ drm_kunit_helper_create_crtc(struct kunit *test,
const struct drm_crtc_funcs *funcs,
const struct drm_crtc_helper_funcs *helper_funcs);
+int drm_kunit_add_mode_destroy_action(struct kunit *test,
+ struct drm_display_mode *mode);
+
struct drm_display_mode *
drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
u8 video_code);
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index 4736ea525048..d212848d07f3 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -850,6 +850,7 @@
MACRO__(0xE20C, ## __VA_ARGS__), \
MACRO__(0xE20D, ## __VA_ARGS__), \
MACRO__(0xE210, ## __VA_ARGS__), \
+ MACRO__(0xE211, ## __VA_ARGS__), \
MACRO__(0xE212, ## __VA_ARGS__), \
MACRO__(0xE215, ## __VA_ARGS__), \
MACRO__(0xE216, ## __VA_ARGS__)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 8d1395f945bf..e9f07e37dd6f 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -173,65 +173,59 @@ struct dentry_operations {
*/
/* d_flags entries */
-#define DCACHE_OP_HASH BIT(0)
-#define DCACHE_OP_COMPARE BIT(1)
-#define DCACHE_OP_REVALIDATE BIT(2)
-#define DCACHE_OP_DELETE BIT(3)
-#define DCACHE_OP_PRUNE BIT(4)
-
-#define DCACHE_DISCONNECTED BIT(5)
- /* This dentry is possibly not currently connected to the dcache tree, in
- * which case its parent will either be itself, or will have this flag as
- * well. nfsd will not use a dentry with this bit set, but will first
- * endeavour to clear the bit either by discovering that it is connected,
- * or by performing lookup operations. Any filesystem which supports
- * nfsd_operations MUST have a lookup function which, if it finds a
- * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that
- * dentry into place and return that dentry rather than the passed one,
- * typically using d_splice_alias. */
-
-#define DCACHE_REFERENCED BIT(6) /* Recently used, don't discard. */
-
-#define DCACHE_DONTCACHE BIT(7) /* Purge from memory on final dput() */
-
-#define DCACHE_CANT_MOUNT BIT(8)
-#define DCACHE_GENOCIDE BIT(9)
-#define DCACHE_SHRINK_LIST BIT(10)
-
-#define DCACHE_OP_WEAK_REVALIDATE BIT(11)
-
-#define DCACHE_NFSFS_RENAMED BIT(12)
- /* this dentry has been "silly renamed" and has to be deleted on the last
- * dput() */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(13)
- /* Parent inode is watched by some fsnotify listener */
-
-#define DCACHE_DENTRY_KILLED BIT(14)
-
-#define DCACHE_MOUNTED BIT(15) /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT BIT(16) /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT BIT(17) /* manage transit from this dirent */
+enum dentry_flags {
+ DCACHE_OP_HASH = BIT(0),
+ DCACHE_OP_COMPARE = BIT(1),
+ DCACHE_OP_REVALIDATE = BIT(2),
+ DCACHE_OP_DELETE = BIT(3),
+ DCACHE_OP_PRUNE = BIT(4),
+ /*
+ * This dentry is possibly not currently connected to the dcache tree,
+ * in which case its parent will either be itself, or will have this
+ * flag as well. nfsd will not use a dentry with this bit set, but will
+ * first endeavour to clear the bit either by discovering that it is
+ * connected, or by performing lookup operations. Any filesystem which
+ * supports nfsd_operations MUST have a lookup function which, if it
+ * finds a directory inode with a DCACHE_DISCONNECTED dentry, will
+ * d_move that dentry into place and return that dentry rather than the
+ * passed one, typically using d_splice_alias.
+ */
+ DCACHE_DISCONNECTED = BIT(5),
+ DCACHE_REFERENCED = BIT(6), /* Recently used, don't discard. */
+ DCACHE_DONTCACHE = BIT(7), /* Purge from memory on final dput() */
+ DCACHE_CANT_MOUNT = BIT(8),
+ DCACHE_GENOCIDE = BIT(9),
+ DCACHE_SHRINK_LIST = BIT(10),
+ DCACHE_OP_WEAK_REVALIDATE = BIT(11),
+ /*
+ * this dentry has been "silly renamed" and has to be deleted on the
+ * last dput()
+ */
+ DCACHE_NFSFS_RENAMED = BIT(12),
+ DCACHE_FSNOTIFY_PARENT_WATCHED = BIT(13), /* Parent inode is watched by some fsnotify listener */
+ DCACHE_DENTRY_KILLED = BIT(14),
+ DCACHE_MOUNTED = BIT(15), /* is a mountpoint */
+ DCACHE_NEED_AUTOMOUNT = BIT(16), /* handle automount on this dir */
+ DCACHE_MANAGE_TRANSIT = BIT(17), /* manage transit from this dirent */
+ DCACHE_LRU_LIST = BIT(18),
+ DCACHE_ENTRY_TYPE = (7 << 19), /* bits 19..21 are for storing type: */
+ DCACHE_MISS_TYPE = (0 << 19), /* Negative dentry */
+ DCACHE_WHITEOUT_TYPE = (1 << 19), /* Whiteout dentry (stop pathwalk) */
+ DCACHE_DIRECTORY_TYPE = (2 << 19), /* Normal directory */
+ DCACHE_AUTODIR_TYPE = (3 << 19), /* Lookupless directory (presumed automount) */
+ DCACHE_REGULAR_TYPE = (4 << 19), /* Regular file type */
+ DCACHE_SPECIAL_TYPE = (5 << 19), /* Other file type */
+ DCACHE_SYMLINK_TYPE = (6 << 19), /* Symlink */
+ DCACHE_NOKEY_NAME = BIT(22), /* Encrypted name encoded without key */
+ DCACHE_OP_REAL = BIT(23),
+ DCACHE_PAR_LOOKUP = BIT(24), /* being looked up (with parent locked shared) */
+ DCACHE_DENTRY_CURSOR = BIT(25),
+ DCACHE_NORCU = BIT(26), /* No RCU delay for freeing */
+};
+
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_LRU_LIST BIT(18)
-
-#define DCACHE_ENTRY_TYPE (7 << 19) /* bits 19..21 are for storing type: */
-#define DCACHE_MISS_TYPE (0 << 19) /* Negative dentry */
-#define DCACHE_WHITEOUT_TYPE (1 << 19) /* Whiteout dentry (stop pathwalk) */
-#define DCACHE_DIRECTORY_TYPE (2 << 19) /* Normal directory */
-#define DCACHE_AUTODIR_TYPE (3 << 19) /* Lookupless directory (presumed automount) */
-#define DCACHE_REGULAR_TYPE (4 << 19) /* Regular file type */
-#define DCACHE_SPECIAL_TYPE (5 << 19) /* Other file type */
-#define DCACHE_SYMLINK_TYPE (6 << 19) /* Symlink */
-
-#define DCACHE_NOKEY_NAME BIT(22) /* Encrypted name encoded without key */
-#define DCACHE_OP_REAL BIT(23)
-
-#define DCACHE_PAR_LOOKUP BIT(24) /* being looked up (with parent locked shared) */
-#define DCACHE_DENTRY_CURSOR BIT(25)
-#define DCACHE_NORCU BIT(26) /* No RCU delay for freeing */
-
extern seqlock_t rename_lock;
/*
diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
index 4f87a908ab4f..ecd821ed8064 100644
--- a/include/linux/firmware/cirrus/cs_dsp_test_utils.h
+++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
@@ -104,7 +104,6 @@ unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_w
unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
unsigned int alg_id,
int mem_type);
-unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv);
unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header);
void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv);
int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 1adcba3ddd76..1ef867bb8c44 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -345,7 +345,7 @@ static inline void hrtimer_update_function(struct hrtimer *timer,
if (WARN_ON_ONCE(!function))
return;
#endif
- timer->function = function;
+ ACCESS_PRIVATE(timer, function) = function;
}
/* Forward a hrtimer so it expires after now: */
diff --git a/include/linux/irqchip/irq-davinci-aintc.h b/include/linux/irqchip/irq-davinci-aintc.h
deleted file mode 100644
index ea4e087fac98..000000000000
--- a/include/linux/irqchip/irq-davinci-aintc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019 Texas Instruments
- */
-
-#ifndef _LINUX_IRQ_DAVINCI_AINTC_
-#define _LINUX_IRQ_DAVINCI_AINTC_
-
-#include <linux/ioport.h>
-
-/**
- * struct davinci_aintc_config - configuration data for davinci-aintc driver.
- *
- * @reg: register range to map
- * @num_irqs: number of HW interrupts supported by the controller
- * @prios: an array of size num_irqs containing priority settings for
- * each interrupt
- */
-struct davinci_aintc_config {
- struct resource reg;
- unsigned int num_irqs;
- u8 *prios;
-};
-
-void davinci_aintc_init(const struct davinci_aintc_config *config);
-
-#endif /* _LINUX_IRQ_DAVINCI_AINTC_ */
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index 1a0bc35839e3..16a2ee4f8310 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -52,44 +52,23 @@
__local_unlock_irqrestore(lock, flags)
/**
- * localtry_lock_init - Runtime initialize a lock instance
- */
-#define localtry_lock_init(lock) __localtry_lock_init(lock)
-
-/**
- * localtry_lock - Acquire a per CPU local lock
- * @lock: The lock variable
- */
-#define localtry_lock(lock) __localtry_lock(lock)
-
-/**
- * localtry_lock_irq - Acquire a per CPU local lock and disable interrupts
- * @lock: The lock variable
- */
-#define localtry_lock_irq(lock) __localtry_lock_irq(lock)
-
-/**
- * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
- * interrupts
- * @lock: The lock variable
- * @flags: Storage for interrupt flags
+ * local_lock_init - Runtime initialize a lock instance
*/
-#define localtry_lock_irqsave(lock, flags) \
- __localtry_lock_irqsave(lock, flags)
+#define local_trylock_init(lock) __local_trylock_init(lock)
/**
- * localtry_trylock - Try to acquire a per CPU local lock.
+ * local_trylock - Try to acquire a per CPU local lock
* @lock: The lock variable
*
* The function can be used in any context such as NMI or HARDIRQ. Due to
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define localtry_trylock(lock) __localtry_trylock(lock)
+#define local_trylock(lock) __local_trylock(lock)
/**
- * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
- * interrupts if acquired
+ * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
+ * interrupts if acquired
* @lock: The lock variable
* @flags: Storage for interrupt flags
*
@@ -97,29 +76,8 @@
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define localtry_trylock_irqsave(lock, flags) \
- __localtry_trylock_irqsave(lock, flags)
-
-/**
- * local_unlock - Release a per CPU local lock
- * @lock: The lock variable
- */
-#define localtry_unlock(lock) __localtry_unlock(lock)
-
-/**
- * local_unlock_irq - Release a per CPU local lock and enable interrupts
- * @lock: The lock variable
- */
-#define localtry_unlock_irq(lock) __localtry_unlock_irq(lock)
-
-/**
- * localtry_unlock_irqrestore - Release a per CPU local lock and restore
- * interrupt flags
- * @lock: The lock variable
- * @flags: Interrupt flags to restore
- */
-#define localtry_unlock_irqrestore(lock, flags) \
- __localtry_unlock_irqrestore(lock, flags)
+#define local_trylock_irqsave(lock, flags) \
+ __local_trylock_irqsave(lock, flags)
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
local_lock(_T),
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 67bd13d142fa..bf2bf40d7b18 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -15,10 +15,11 @@ typedef struct {
#endif
} local_lock_t;
+/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
typedef struct {
local_lock_t llock;
- unsigned int acquired;
-} localtry_lock_t;
+ u8 acquired;
+} local_trylock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
@@ -29,6 +30,9 @@ typedef struct {
}, \
.owner = NULL,
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
+ .llock = { LOCAL_LOCK_DEBUG_INIT((lockname).llock) },
+
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
@@ -56,6 +60,7 @@ static inline void local_lock_debug_init(local_lock_t *l)
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
@@ -63,7 +68,7 @@ static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
-#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
+#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
#define __local_lock_init(lock) \
do { \
@@ -76,6 +81,8 @@ do { \
local_lock_debug_init(lock); \
} while (0)
+#define __local_trylock_init(lock) __local_lock_init(lock.llock)
+
#define __spinlock_nested_bh_init(lock) \
do { \
static struct lock_class_key __key; \
@@ -87,149 +94,117 @@ do { \
local_lock_debug_init(lock); \
} while (0)
+#define __local_lock_acquire(lock) \
+ do { \
+ local_trylock_t *tl; \
+ local_lock_t *l; \
+ \
+ l = (local_lock_t *)this_cpu_ptr(lock); \
+ tl = (local_trylock_t *)l; \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(tl->acquired == 0); \
+ WRITE_ONCE(tl->acquired, 1); \
+ }), \
+ default:(void)0); \
+ local_lock_acquire(l); \
+ } while (0)
+
#define __local_lock(lock) \
do { \
preempt_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
- local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
-#define __local_unlock(lock) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
- preempt_enable(); \
+ __local_lock_acquire(lock); \
} while (0)
-#define __local_unlock_irq(lock) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
- local_irq_enable(); \
- } while (0)
-
-#define __local_unlock_irqrestore(lock, flags) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
- local_irq_restore(flags); \
- } while (0)
-
-#define __local_lock_nested_bh(lock) \
- do { \
- lockdep_assert_in_softirq(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
-#define __local_unlock_nested_bh(lock) \
- local_lock_release(this_cpu_ptr(lock))
-
-/* localtry_lock_t variants */
-
-#define __localtry_lock_init(lock) \
-do { \
- __local_lock_init(&(lock)->llock); \
- WRITE_ONCE((lock)->acquired, 0); \
-} while (0)
-
-#define __localtry_lock(lock) \
- do { \
- localtry_lock_t *lt; \
- preempt_disable(); \
- lt = this_cpu_ptr(lock); \
- local_lock_acquire(&lt->llock); \
- WRITE_ONCE(lt->acquired, 1); \
- } while (0)
-
-#define __localtry_lock_irq(lock) \
- do { \
- localtry_lock_t *lt; \
- local_irq_disable(); \
- lt = this_cpu_ptr(lock); \
- local_lock_acquire(&lt->llock); \
- WRITE_ONCE(lt->acquired, 1); \
- } while (0)
-
-#define __localtry_lock_irqsave(lock, flags) \
- do { \
- localtry_lock_t *lt; \
- local_irq_save(flags); \
- lt = this_cpu_ptr(lock); \
- local_lock_acquire(&lt->llock); \
- WRITE_ONCE(lt->acquired, 1); \
- } while (0)
-
-#define __localtry_trylock(lock) \
+#define __local_trylock(lock) \
({ \
- localtry_lock_t *lt; \
- bool _ret; \
+ local_trylock_t *tl; \
\
preempt_disable(); \
- lt = this_cpu_ptr(lock); \
- if (!READ_ONCE(lt->acquired)) { \
- WRITE_ONCE(lt->acquired, 1); \
- local_trylock_acquire(&lt->llock); \
- _ret = true; \
- } else { \
- _ret = false; \
+ tl = this_cpu_ptr(lock); \
+ if (READ_ONCE(tl->acquired)) { \
preempt_enable(); \
+ tl = NULL; \
+ } else { \
+ WRITE_ONCE(tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)tl); \
} \
- _ret; \
+ !!tl; \
})
-#define __localtry_trylock_irqsave(lock, flags) \
+#define __local_trylock_irqsave(lock, flags) \
({ \
- localtry_lock_t *lt; \
- bool _ret; \
+ local_trylock_t *tl; \
\
local_irq_save(flags); \
- lt = this_cpu_ptr(lock); \
- if (!READ_ONCE(lt->acquired)) { \
- WRITE_ONCE(lt->acquired, 1); \
- local_trylock_acquire(&lt->llock); \
- _ret = true; \
- } else { \
- _ret = false; \
+ tl = this_cpu_ptr(lock); \
+ if (READ_ONCE(tl->acquired)) { \
local_irq_restore(flags); \
+ tl = NULL; \
+ } else { \
+ WRITE_ONCE(tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)tl); \
} \
- _ret; \
+ !!tl; \
})
-#define __localtry_unlock(lock) \
+#define __local_lock_release(lock) \
+ do { \
+ local_trylock_t *tl; \
+ local_lock_t *l; \
+ \
+ l = (local_lock_t *)this_cpu_ptr(lock); \
+ tl = (local_trylock_t *)l; \
+ local_lock_release(l); \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(tl->acquired == 1); \
+ WRITE_ONCE(tl->acquired, 0); \
+ }), \
+ default:(void)0); \
+ } while (0)
+
+#define __local_unlock(lock) \
do { \
- localtry_lock_t *lt; \
- lt = this_cpu_ptr(lock); \
- WRITE_ONCE(lt->acquired, 0); \
- local_lock_release(&lt->llock); \
+ __local_lock_release(lock); \
preempt_enable(); \
} while (0)
-#define __localtry_unlock_irq(lock) \
+#define __local_unlock_irq(lock) \
do { \
- localtry_lock_t *lt; \
- lt = this_cpu_ptr(lock); \
- WRITE_ONCE(lt->acquired, 0); \
- local_lock_release(&lt->llock); \
+ __local_lock_release(lock); \
local_irq_enable(); \
} while (0)
-#define __localtry_unlock_irqrestore(lock, flags) \
+#define __local_unlock_irqrestore(lock, flags) \
do { \
- localtry_lock_t *lt; \
- lt = this_cpu_ptr(lock); \
- WRITE_ONCE(lt->acquired, 0); \
- local_lock_release(&lt->llock); \
+ __local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
+#define __local_lock_nested_bh(lock) \
+ do { \
+ lockdep_assert_in_softirq(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_unlock_nested_bh(lock) \
+ local_lock_release(this_cpu_ptr(lock))
+
#else /* !CONFIG_PREEMPT_RT */
/*
@@ -237,16 +212,18 @@ do { \
* critical section while staying preemptible.
*/
typedef spinlock_t local_lock_t;
-typedef spinlock_t localtry_lock_t;
+typedef spinlock_t local_trylock_t;
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
-#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname)
+#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
#define __local_lock_init(l) \
do { \
local_spin_lock_init((l)); \
} while (0)
+#define __local_trylock_init(l) __local_lock_init(l)
+
#define __local_lock(__lock) \
do { \
migrate_disable(); \
@@ -283,17 +260,7 @@ do { \
spin_unlock(this_cpu_ptr((lock))); \
} while (0)
-/* localtry_lock_t variants */
-
-#define __localtry_lock_init(lock) __local_lock_init(lock)
-#define __localtry_lock(lock) __local_lock(lock)
-#define __localtry_lock_irq(lock) __local_lock(lock)
-#define __localtry_lock_irqsave(lock, flags) __local_lock_irqsave(lock, flags)
-#define __localtry_unlock(lock) __local_unlock(lock)
-#define __localtry_unlock_irq(lock) __local_unlock(lock)
-#define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags)
-
-#define __localtry_trylock(lock) \
+#define __local_trylock(lock) \
({ \
int __locked; \
\
@@ -308,11 +275,11 @@ do { \
__locked; \
})
-#define __localtry_trylock_irqsave(lock, flags) \
+#define __local_trylock_irqsave(lock, flags) \
({ \
typecheck(unsigned long, flags); \
flags = 0; \
- __localtry_trylock(lock); \
+ __local_trylock(lock); \
})
#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5a9bf15d4461..0069ba6866a4 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -823,7 +823,6 @@ struct perf_event {
struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
- struct rcuwait pending_work_wait;
atomic_t event_limit;
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index e2b705c14945..b50447ef1c92 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1511,8 +1511,9 @@ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
/*
* track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page
- * tables copied during copy_page_range(). On success, stores the pfn to be
- * passed to untrack_pfn_copy().
+ * tables copied during copy_page_range(). Will store the pfn to be
+ * passed to untrack_pfn_copy() only if there is something to be untracked.
+ * Callers should initialize the pfn to 0.
*/
static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, unsigned long *pfn)
@@ -1522,7 +1523,9 @@ static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
/*
* untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during
- * copy_page_range(), but after track_pfn_copy() was already called.
+ * copy_page_range(), but after track_pfn_copy() was already called. Can
+ * be called even if track_pfn_copy() did not actually track anything:
+ * handled internally.
*/
static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma,
unsigned long pfn)
diff --git a/include/linux/platform_data/x86/intel_pmc_ipc.h b/include/linux/platform_data/x86/intel_pmc_ipc.h
index 6e603a8c075f..1d34435b7001 100644
--- a/include/linux/platform_data/x86/intel_pmc_ipc.h
+++ b/include/linux/platform_data/x86/intel_pmc_ipc.h
@@ -36,6 +36,7 @@ struct pmc_ipc_rbuf {
*/
static inline int intel_pmc_ipc(struct pmc_ipc_cmd *ipc_cmd, struct pmc_ipc_rbuf *rbuf)
{
+#ifdef CONFIG_ACPI
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object params[PMC_IPCS_PARAM_COUNT] = {
{.type = ACPI_TYPE_INTEGER,},
@@ -89,6 +90,9 @@ static inline int intel_pmc_ipc(struct pmc_ipc_cmd *ipc_cmd, struct pmc_ipc_rbuf
}
return 0;
+#else
+ return -ENODEV;
+#endif /* CONFIG_ACPI */
}
#endif /* INTEL_PMC_IPC_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 5927910ec06e..6e68e359ad18 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -45,6 +45,8 @@ struct fib_rule {
struct fib_rule_port_range dport_range;
u16 sport_mask;
u16 dport_mask;
+ u8 iif_is_l3_master;
+ u8 oif_is_l3_master;
struct rcu_head rcu;
};
diff --git a/include/net/flow.h b/include/net/flow.h
index 335bbc52171c..2a3f0c42f092 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -38,6 +38,7 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
+#define FLOWI_FLAG_L3MDEV_OIF 0x04
__u32 flowic_secid;
kuid_t flowic_uid;
__u32 flowic_multipath_hash;
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index f7fe796e8429..1eb8dad18f7e 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -59,6 +59,20 @@ int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
+static inline
+bool l3mdev_fib_rule_iif_match(const struct flowi *fl, int iifindex)
+{
+ return !(fl->flowi_flags & FLOWI_FLAG_L3MDEV_OIF) &&
+ fl->flowi_l3mdev == iifindex;
+}
+
+static inline
+bool l3mdev_fib_rule_oif_match(const struct flowi *fl, int oifindex)
+{
+ return fl->flowi_flags & FLOWI_FLAG_L3MDEV_OIF &&
+ fl->flowi_l3mdev == oifindex;
+}
+
void l3mdev_update_flow(struct net *net, struct flowi *fl);
int l3mdev_master_ifindex_rcu(const struct net_device *dev);
@@ -327,6 +341,19 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
{
return 1;
}
+
+static inline
+bool l3mdev_fib_rule_iif_match(const struct flowi *fl, int iifindex)
+{
+ return false;
+}
+
+static inline
+bool l3mdev_fib_rule_oif_match(const struct flowi *fl, int oifindex)
+{
+ return false;
+}
+
static inline
void l3mdev_update_flow(struct net *net, struct flowi *fl)
{
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index d42eae69d9a8..901353796fbb 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4790,7 +4790,14 @@ void roce_del_all_netdev_gids(struct ib_device *ib_dev,
struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
+#else
+static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
+{
+ return 0;
+}
+#endif
struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
diff --git a/include/uapi/cxl/features.h b/include/uapi/cxl/features.h
index d6db8984889f..490606d7694b 100644
--- a/include/uapi/cxl/features.h
+++ b/include/uapi/cxl/features.h
@@ -8,10 +8,19 @@
#define _UAPI_CXL_FEATURES_H_
#include <linux/types.h>
-#ifndef __KERNEL__
-#include <uuid/uuid.h>
-#else
+
+typedef unsigned char __uapi_uuid_t[16];
+
+#ifdef __KERNEL__
#include <linux/uuid.h>
+/*
+ * Note, __uapi_uuid_t is 1-byte aligned on modern compilers and 4-byte
+ * aligned on others. Ensure that __uapi_uuid_t in a struct is placed at
+ * a 4-byte aligned offset, or the structure is packed, to ensure
+ * consistent padding.
+ */
+static_assert(sizeof(__uapi_uuid_t) == sizeof(uuid_t));
+#define __uapi_uuid_t uuid_t
#endif
/*
@@ -60,7 +69,7 @@ struct cxl_mbox_get_sup_feats_in {
* Get Supported Features Supported Feature Entry
*/
struct cxl_feat_entry {
- uuid_t uuid;
+ __uapi_uuid_t uuid;
__le16 id;
__le16 get_feat_size;
__le16 set_feat_size;
@@ -110,7 +119,7 @@ struct cxl_mbox_get_sup_feats_out {
* CXL spec r3.2 section 8.2.9.6.2 Table 8-99
*/
struct cxl_mbox_get_feat_in {
- uuid_t uuid;
+ __uapi_uuid_t uuid;
__le16 offset;
__le16 count;
__u8 selection;
@@ -143,7 +152,7 @@ enum cxl_get_feat_selection {
*/
struct cxl_mbox_set_feat_in {
__struct_group(cxl_mbox_set_feat_hdr, hdr, /* no attrs */,
- uuid_t uuid;
+ __uapi_uuid_t uuid;
__le32 flags;
__le16 offset;
__u8 version;
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 8a24ed59ec46..1c47136d8715 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -180,7 +180,8 @@ enum attr_idn {
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
- QUERY_ATTR_IDN_TIMESTAMP = 0x30
+ QUERY_ATTR_IDN_TIMESTAMP = 0x30,
+ QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID = 0x34,
};
/* Descriptor idn for Query requests */
@@ -390,6 +391,7 @@ enum {
UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+ UFS_DEV_LVL_EXCEPTION_SUP = BIT(12),
};
#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
@@ -419,6 +421,7 @@ enum {
MASK_EE_TOO_LOW_TEMP = BIT(4),
MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
+ MASK_EE_DEV_LVL_EXCEPTION = BIT(7),
MASK_EE_HEALTH_CRITICAL = BIT(9),
};
#define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP)
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index e3909cc691b2..e928ed0265ff 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -246,7 +246,7 @@ struct ufs_query {
struct ufs_dev_cmd {
enum dev_cmd_type type;
struct mutex lock;
- struct completion *complete;
+ struct completion complete;
struct ufs_query query;
};
@@ -968,6 +968,9 @@ enum ufshcd_mcq_opr {
* @pm_qos_req: PM QoS request handle
* @pm_qos_enabled: flag to check if pm qos is enabled
* @critical_health_count: count of critical health exceptions
+ * @dev_lvl_exception_count: count of device level exceptions since last reset
+ * @dev_lvl_exception_id: vendor specific information about the
+ * device level exception event.
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -1138,6 +1141,8 @@ struct ufs_hba {
bool pm_qos_enabled;
int critical_health_count;
+ atomic_t dev_lvl_exception_count;
+ u64 dev_lvl_exception_id;
};
/**
diff --git a/include/vdso/unaligned.h b/include/vdso/unaligned.h
index eee3d2a4dbe4..ff0c06b6513e 100644
--- a/include/vdso/unaligned.h
+++ b/include/vdso/unaligned.h
@@ -2,14 +2,14 @@
#ifndef __VDSO_UNALIGNED_H
#define __VDSO_UNALIGNED_H
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x; \
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed * __get_pptr = (typeof(__get_pptr))(ptr); \
+ __get_pptr->x; \
})
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x = (val); \
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed * __put_pptr = (typeof(__put_pptr))(ptr); \
+ __put_pptr->x = (val); \
} while (0)
#endif /* __VDSO_UNALIGNED_H */
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index d869f51ea93a..9a5f94371e50 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -9,13 +9,14 @@
#include <linux/slab.h>
#include <linux/btf_ids.h>
#include "percpu_freelist.h"
+#include <asm/rqspinlock.h>
#define QUEUE_STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
struct bpf_queue_stack {
struct bpf_map map;
- raw_spinlock_t lock;
+ rqspinlock_t lock;
u32 head, tail;
u32 size; /* max_entries + 1 */
@@ -78,7 +79,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
qs->size = size;
- raw_spin_lock_init(&qs->lock);
+ raw_res_spin_lock_init(&qs->lock);
return &qs->map;
}
@@ -98,12 +99,8 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
int err = 0;
void *ptr;
- if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, flags))
- return -EBUSY;
- } else {
- raw_spin_lock_irqsave(&qs->lock, flags);
- }
+ if (raw_res_spin_lock_irqsave(&qs->lock, flags))
+ return -EBUSY;
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
@@ -120,7 +117,7 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
}
out:
- raw_spin_unlock_irqrestore(&qs->lock, flags);
+ raw_res_spin_unlock_irqrestore(&qs->lock, flags);
return err;
}
@@ -133,12 +130,8 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
void *ptr;
u32 index;
- if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, flags))
- return -EBUSY;
- } else {
- raw_spin_lock_irqsave(&qs->lock, flags);
- }
+ if (raw_res_spin_lock_irqsave(&qs->lock, flags))
+ return -EBUSY;
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
@@ -157,7 +150,7 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
qs->head = index;
out:
- raw_spin_unlock_irqrestore(&qs->lock, flags);
+ raw_res_spin_unlock_irqrestore(&qs->lock, flags);
return err;
}
@@ -203,12 +196,8 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
if (flags & BPF_NOEXIST || flags > BPF_EXIST)
return -EINVAL;
- if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
- return -EBUSY;
- } else {
- raw_spin_lock_irqsave(&qs->lock, irq_flags);
- }
+ if (raw_res_spin_lock_irqsave(&qs->lock, irq_flags))
+ return -EBUSY;
if (queue_stack_map_is_full(qs)) {
if (!replace) {
@@ -227,7 +216,7 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
qs->head = 0;
out:
- raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
+ raw_res_spin_unlock_irqrestore(&qs->lock, irq_flags);
return err;
}
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 1499d8caa9a3..719d73299397 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -11,6 +11,7 @@
#include <linux/kmemleak.h>
#include <uapi/linux/btf.h>
#include <linux/btf_ids.h>
+#include <asm/rqspinlock.h>
#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
@@ -29,7 +30,7 @@ struct bpf_ringbuf {
u64 mask;
struct page **pages;
int nr_pages;
- raw_spinlock_t spinlock ____cacheline_aligned_in_smp;
+ rqspinlock_t spinlock ____cacheline_aligned_in_smp;
/* For user-space producer ring buffers, an atomic_t busy bit is used
* to synchronize access to the ring buffers in the kernel, rather than
* the spinlock that is used for kernel-producer ring buffers. This is
@@ -173,7 +174,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
if (!rb)
return NULL;
- raw_spin_lock_init(&rb->spinlock);
+ raw_res_spin_lock_init(&rb->spinlock);
atomic_set(&rb->busy, 0);
init_waitqueue_head(&rb->waitq);
init_irq_work(&rb->work, bpf_ringbuf_notify);
@@ -416,12 +417,8 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
cons_pos = smp_load_acquire(&rb->consumer_pos);
- if (in_nmi()) {
- if (!raw_spin_trylock_irqsave(&rb->spinlock, flags))
- return NULL;
- } else {
- raw_spin_lock_irqsave(&rb->spinlock, flags);
- }
+ if (raw_res_spin_lock_irqsave(&rb->spinlock, flags))
+ return NULL;
pend_pos = rb->pending_pos;
prod_pos = rb->producer_pos;
@@ -446,7 +443,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
*/
if (new_prod_pos - cons_pos > rb->mask ||
new_prod_pos - pend_pos > rb->mask) {
- raw_spin_unlock_irqrestore(&rb->spinlock, flags);
+ raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
return NULL;
}
@@ -458,7 +455,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
/* pairs with consumer's smp_load_acquire() */
smp_store_release(&rb->producer_pos, new_prod_pos);
- raw_spin_unlock_irqrestore(&rb->spinlock, flags);
+ raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
return (void *)hdr + BPF_RINGBUF_HDR_SZ;
}
diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c
index b896c4a75a5c..338305c8852c 100644
--- a/kernel/bpf/rqspinlock.c
+++ b/kernel/bpf/rqspinlock.c
@@ -253,7 +253,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
})
#else
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
- ({ (ret) = check_timeout(&(ts)); })
+ ({ (ret) = check_timeout((lock), (mask), &(ts)); })
#endif
/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 128db74e9eab..e93c19565914 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5518,30 +5518,6 @@ static bool exclusive_event_installable(struct perf_event *event,
static void perf_free_addr_filters(struct perf_event *event);
-static void perf_pending_task_sync(struct perf_event *event)
-{
- struct callback_head *head = &event->pending_task;
-
- if (!event->pending_work)
- return;
- /*
- * If the task is queued to the current task's queue, we
- * obviously can't wait for it to complete. Simply cancel it.
- */
- if (task_work_cancel(current, head)) {
- event->pending_work = 0;
- local_dec(&event->ctx->nr_no_switch_fast);
- return;
- }
-
- /*
- * All accesses related to the event are within the same RCU section in
- * perf_pending_task(). The RCU grace period before the event is freed
- * will make sure all those accesses are complete by then.
- */
- rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
-}
-
/* vs perf_event_alloc() error */
static void __free_event(struct perf_event *event)
{
@@ -5599,7 +5575,6 @@ static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending_irq);
irq_work_sync(&event->pending_disable_irq);
- perf_pending_task_sync(event);
unaccount_event(event);
@@ -5692,10 +5667,17 @@ static void perf_remove_from_owner(struct perf_event *event)
static void put_event(struct perf_event *event)
{
+ struct perf_event *parent;
+
if (!atomic_long_dec_and_test(&event->refcount))
return;
+ parent = event->parent;
_free_event(event);
+
+ /* Matches the refcount bump in inherit_event() */
+ if (parent)
+ put_event(parent);
}
/*
@@ -5779,11 +5761,6 @@ again:
if (tmp == child) {
perf_remove_from_context(child, DETACH_GROUP);
list_move(&child->child_list, &free_list);
- /*
- * This matches the refcount bump in inherit_event();
- * this can't be the last reference.
- */
- put_event(event);
} else {
var = &ctx->refcount;
}
@@ -5809,7 +5786,8 @@ again:
void *var = &child->ctx->refcount;
list_del(&child->child_list);
- free_event(child);
+ /* Last reference unless ->pending_task work is pending */
+ put_event(child);
/*
* Wake any perf_event_free_task() waiting for this event to be
@@ -5820,7 +5798,11 @@ again:
}
no_ctx:
- put_event(event); /* Must be the 'last' reference */
+ /*
+ * Last reference unless ->pending_task work is pending on this event
+ * or any of its children.
+ */
+ put_event(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
@@ -7236,12 +7218,6 @@ static void perf_pending_task(struct callback_head *head)
int rctx;
/*
- * All accesses to the event must belong to the same implicit RCU read-side
- * critical section as the ->pending_work reset. See comment in
- * perf_pending_task_sync().
- */
- rcu_read_lock();
- /*
* If we 'fail' here, that's OK, it means recursion is already disabled
* and we won't recurse 'further'.
*/
@@ -7251,9 +7227,8 @@ static void perf_pending_task(struct callback_head *head)
event->pending_work = 0;
perf_sigtrap(event);
local_dec(&event->ctx->nr_no_switch_fast);
- rcuwait_wake_up(&event->pending_work_wait);
}
- rcu_read_unlock();
+ put_event(event);
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
@@ -10248,6 +10223,7 @@ static int __perf_event_overflow(struct perf_event *event,
!task_work_add(current, &event->pending_task, notify_mode)) {
event->pending_work = pending_id;
local_inc(&event->ctx->nr_no_switch_fast);
+ WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
event->pending_addr = 0;
if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
@@ -12610,7 +12586,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
init_irq_work(&event->pending_irq, perf_pending_irq);
event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
init_task_work(&event->pending_task, perf_pending_task);
- rcuwait_init(&event->pending_work_wait);
mutex_init(&event->mmap_mutex);
raw_spin_lock_init(&event->addr_filters.lock);
@@ -13747,8 +13722,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
* Kick perf_poll() for is_event_hup();
*/
perf_event_wakeup(parent_event);
- free_event(event);
- put_event(parent_event);
+ put_event(event);
return;
}
@@ -13872,13 +13846,11 @@ static void perf_free_event(struct perf_event *event,
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
- put_event(parent);
-
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
- free_event(event);
+ put_event(event);
}
/*
@@ -14016,6 +13988,9 @@ inherit_event(struct perf_event *parent_event,
if (IS_ERR(child_event))
return child_event;
+ get_ctx(child_ctx);
+ child_event->ctx = child_ctx;
+
pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
if (IS_ERR(pmu_ctx)) {
free_event(child_event);
@@ -14037,8 +14012,6 @@ inherit_event(struct perf_event *parent_event,
return NULL;
}
- get_ctx(child_ctx);
-
/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
@@ -14059,7 +14032,6 @@ inherit_event(struct perf_event *parent_event,
local64_set(&hwc->period_left, sample_period);
}
- child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
child_event->overflow_handler_context
= parent_event->overflow_handler_context;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 615b4e6d22c7..8d783b5882b6 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1956,6 +1956,9 @@ static void free_ret_instance(struct uprobe_task *utask,
* to-be-reused return instances for future uretprobes. If ri_timer()
* happens to be running right now, though, we fallback to safety and
* just perform RCU-delated freeing of ri.
+ * Admittedly, this is a rather simple use of seqcount, but it nicely
+ * abstracts away all the necessary memory barriers, so we use
+ * a well-supported kernel primitive here.
*/
if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) {
/* immediate reuse of ri without RCU GP is OK */
@@ -2016,12 +2019,20 @@ static void ri_timer(struct timer_list *timer)
/* RCU protects return_instance from freeing. */
guard(rcu)();
- write_seqcount_begin(&utask->ri_seqcount);
+ /*
+ * See free_ret_instance() for notes on seqcount use.
+ * We also employ raw API variants to avoid lockdep false-positive
+ * warning complaining about enabled preemption. The timer can only be
+ * invoked once for a uprobe_task. Therefore there can only be one
+ * writer. The reader does not require an even sequence count to make
+ * progress, so it is OK to remain preemptible on PREEMPT_RT.
+ */
+ raw_write_seqcount_begin(&utask->ri_seqcount);
for_each_ret_instance_rcu(ri, utask->return_instances)
hprobe_expire(&ri->hprobe, false);
- write_seqcount_end(&utask->ri_seqcount);
+ raw_write_seqcount_end(&utask->ri_seqcount);
}
static struct uprobe_task *alloc_utask(void)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 517ee2590a29..30899a8cc52c 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -366,7 +366,7 @@ static const struct debug_obj_descr hrtimer_debug_descr;
static void *hrtimer_debug_hint(void *addr)
{
- return ((struct hrtimer *) addr)->function;
+ return ACCESS_PRIVATE((struct hrtimer *)addr, function);
}
/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index a47bcf71defc..9a3859443c04 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -509,6 +509,7 @@ void tick_resume(void)
#ifdef CONFIG_SUSPEND
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
+static DEFINE_WAIT_OVERRIDE_MAP(tick_freeze_map, LD_WAIT_SLEEP);
static unsigned int tick_freeze_depth;
/**
@@ -528,9 +529,22 @@ void tick_freeze(void)
if (tick_freeze_depth == num_online_cpus()) {
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), true);
+ /*
+ * All other CPUs have their interrupts disabled and are
+ * suspended to idle. Other tasks have been frozen so there
+ * is no scheduling happening. This means that there is no
+ * concurrency in the system at this point. Therefore it is
+ * okay to acquire a sleeping lock on PREEMPT_RT, such as a
+ * spinlock, because the lock cannot be held by other CPUs
+ * or threads and acquiring it cannot block.
+ *
+ * Inform lockdep about the situation.
+ */
+ lock_map_acquire_try(&tick_freeze_map);
system_state = SYSTEM_SUSPEND;
sched_clock_suspend();
timekeeping_suspend();
+ lock_map_release(&tick_freeze_map);
} else {
tick_suspend_local();
}
@@ -552,8 +566,16 @@ void tick_unfreeze(void)
raw_spin_lock(&tick_freeze_lock);
if (tick_freeze_depth == num_online_cpus()) {
+ /*
+ * Similar to tick_freeze(). On resumption the first CPU may
+ * acquire uncontended sleeping locks while other CPUs block on
+ * tick_freeze_lock.
+ */
+ lock_map_acquire_try(&tick_freeze_map);
timekeeping_resume();
sched_clock_resume();
+ lock_map_release(&tick_freeze_map);
+
system_state = SYSTEM_RUNNING;
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1a48aedb5255..a8a02868b435 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3256,6 +3256,31 @@ static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash,
}
/*
+ * Remove functions from @hash that are in @notrace_hash
+ */
+static void remove_hash(struct ftrace_hash *hash, struct ftrace_hash *notrace_hash)
+{
+ struct ftrace_func_entry *entry;
+ struct hlist_node *tmp;
+ int size;
+ int i;
+
+ /* If the notrace hash is empty, there's nothing to do */
+ if (ftrace_hash_empty(notrace_hash))
+ return;
+
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
+ if (!__ftrace_lookup_ip(notrace_hash, entry->ip))
+ continue;
+ remove_hash_entry(hash, entry);
+ kfree(entry);
+ }
+ }
+}
+
+/*
* Add to @hash only those that are in both @new_hash1 and @new_hash2
*
* The notrace_hash updates uses just the intersect_hash() function
@@ -3295,67 +3320,6 @@ static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_has
return 0;
}
-/* Return a new hash that has a union of all @ops->filter_hash entries */
-static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
-{
- struct ftrace_hash *new_hash = NULL;
- struct ftrace_ops *subops;
- int size_bits;
- int ret;
-
- if (ops->func_hash->filter_hash)
- size_bits = ops->func_hash->filter_hash->size_bits;
- else
- size_bits = FTRACE_HASH_DEFAULT_BITS;
-
- list_for_each_entry(subops, &ops->subop_list, list) {
- ret = append_hash(&new_hash, subops->func_hash->filter_hash, size_bits);
- if (ret < 0) {
- free_ftrace_hash(new_hash);
- return NULL;
- }
- /* Nothing more to do if new_hash is empty */
- if (ftrace_hash_empty(new_hash))
- break;
- }
- /* Can't return NULL as that means this failed */
- return new_hash ? : EMPTY_HASH;
-}
-
-/* Make @ops trace evenything except what all its subops do not trace */
-static struct ftrace_hash *intersect_hashes(struct ftrace_ops *ops)
-{
- struct ftrace_hash *new_hash = NULL;
- struct ftrace_ops *subops;
- int size_bits;
- int ret;
-
- list_for_each_entry(subops, &ops->subop_list, list) {
- struct ftrace_hash *next_hash;
-
- if (!new_hash) {
- size_bits = subops->func_hash->notrace_hash->size_bits;
- new_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->notrace_hash);
- if (!new_hash)
- return NULL;
- continue;
- }
- size_bits = new_hash->size_bits;
- next_hash = new_hash;
- new_hash = alloc_ftrace_hash(size_bits);
- ret = intersect_hash(&new_hash, next_hash, subops->func_hash->notrace_hash);
- free_ftrace_hash(next_hash);
- if (ret < 0) {
- free_ftrace_hash(new_hash);
- return NULL;
- }
- /* Nothing more to do if new_hash is empty */
- if (ftrace_hash_empty(new_hash))
- break;
- }
- return new_hash;
-}
-
static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B)
{
struct ftrace_func_entry *entry;
@@ -3427,6 +3391,93 @@ static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_
return 0;
}
+static int add_first_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
+ struct ftrace_ops_hash *func_hash)
+{
+ /* If the filter hash is not empty, simply remove the nohash from it */
+ if (!ftrace_hash_empty(func_hash->filter_hash)) {
+ *filter_hash = copy_hash(func_hash->filter_hash);
+ if (!*filter_hash)
+ return -ENOMEM;
+ remove_hash(*filter_hash, func_hash->notrace_hash);
+ *notrace_hash = EMPTY_HASH;
+
+ } else {
+ *notrace_hash = copy_hash(func_hash->notrace_hash);
+ if (!*notrace_hash)
+ return -ENOMEM;
+ *filter_hash = EMPTY_HASH;
+ }
+ return 0;
+}
+
+static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
+ struct ftrace_ops_hash *ops_hash, struct ftrace_ops_hash *subops_hash)
+{
+ int size_bits;
+ int ret;
+
+ /* If the subops trace all functions so must the main ops */
+ if (ftrace_hash_empty(ops_hash->filter_hash) ||
+ ftrace_hash_empty(subops_hash->filter_hash)) {
+ *filter_hash = EMPTY_HASH;
+ } else {
+ /*
+ * The main ops filter hash is not empty, so its
+ * notrace_hash had better be, as the notrace hash
+ * is only used for empty main filter hashes.
+ */
+ WARN_ON_ONCE(!ftrace_hash_empty(ops_hash->notrace_hash));
+
+ size_bits = max(ops_hash->filter_hash->size_bits,
+ subops_hash->filter_hash->size_bits);
+
+ /* Copy the subops hash */
+ *filter_hash = alloc_and_copy_ftrace_hash(size_bits, subops_hash->filter_hash);
+ if (!filter_hash)
+ return -ENOMEM;
+ /* Remove any notrace functions from the copy */
+ remove_hash(*filter_hash, subops_hash->notrace_hash);
+
+ ret = append_hash(filter_hash, ops_hash->filter_hash,
+ size_bits);
+ if (ret < 0) {
+ free_ftrace_hash(*filter_hash);
+ return ret;
+ }
+ }
+
+ /*
+ * Only process notrace hashes if the main filter hash is empty
+ * (tracing all functions), otherwise the filter hash will just
+ * remove the notrace hash functions, and the notrace hash is
+ * not needed.
+ */
+ if (ftrace_hash_empty(*filter_hash)) {
+ /*
+ * Intersect the notrace functions. That is, if two
+ * subops are not tracing a set of functions, the
+ * main ops will only not trace the functions that are
+ * in both subops, but has to trace the functions that
+ * are only notrace in one of the subops, for the other
+ * subops to be able to trace them.
+ */
+ size_bits = max(ops_hash->notrace_hash->size_bits,
+ subops_hash->notrace_hash->size_bits);
+ *notrace_hash = alloc_ftrace_hash(size_bits);
+ if (!*notrace_hash)
+ return -ENOMEM;
+
+ ret = intersect_hash(notrace_hash, ops_hash->notrace_hash,
+ subops_hash->notrace_hash);
+ if (ret < 0) {
+ free_ftrace_hash(*notrace_hash);
+ return ret;
+ }
+ }
+ return 0;
+}
+
/**
* ftrace_startup_subops - enable tracing for subops of an ops
* @ops: Manager ops (used to pick all the functions of its subops)
@@ -3443,7 +3494,6 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
struct ftrace_hash *notrace_hash;
struct ftrace_hash *save_filter_hash;
struct ftrace_hash *save_notrace_hash;
- int size_bits;
int ret;
if (unlikely(ftrace_disabled))
@@ -3467,14 +3517,14 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
/* For the first subops to ops just enable it normally */
if (list_empty(&ops->subop_list)) {
- /* Just use the subops hashes */
- filter_hash = copy_hash(subops->func_hash->filter_hash);
- notrace_hash = copy_hash(subops->func_hash->notrace_hash);
- if (!filter_hash || !notrace_hash) {
- free_ftrace_hash(filter_hash);
- free_ftrace_hash(notrace_hash);
- return -ENOMEM;
- }
+
+ /* The ops was empty, should have empty hashes */
+ WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->filter_hash));
+ WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->notrace_hash));
+
+ ret = add_first_hash(&filter_hash, &notrace_hash, subops->func_hash);
+ if (ret < 0)
+ return ret;
save_filter_hash = ops->func_hash->filter_hash;
save_notrace_hash = ops->func_hash->notrace_hash;
@@ -3500,48 +3550,16 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
/*
* Here there's already something attached. Here are the rules:
- * o If either filter_hash is empty then the final stays empty
- * o Otherwise, the final is a superset of both hashes
- * o If either notrace_hash is empty then the final stays empty
- * o Otherwise, the final is an intersection between the hashes
+ * If the new subops and main ops filter hashes are not empty:
+ * o Make a copy of the subops filter hash
+ * o Remove all functions in the nohash from it.
+ * o Add in the main hash filter functions
+ * o Remove any of these functions from the main notrace hash
*/
- if (ftrace_hash_empty(ops->func_hash->filter_hash) ||
- ftrace_hash_empty(subops->func_hash->filter_hash)) {
- filter_hash = EMPTY_HASH;
- } else {
- size_bits = max(ops->func_hash->filter_hash->size_bits,
- subops->func_hash->filter_hash->size_bits);
- filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash);
- if (!filter_hash)
- return -ENOMEM;
- ret = append_hash(&filter_hash, subops->func_hash->filter_hash,
- size_bits);
- if (ret < 0) {
- free_ftrace_hash(filter_hash);
- return ret;
- }
- }
-
- if (ftrace_hash_empty(ops->func_hash->notrace_hash) ||
- ftrace_hash_empty(subops->func_hash->notrace_hash)) {
- notrace_hash = EMPTY_HASH;
- } else {
- size_bits = max(ops->func_hash->filter_hash->size_bits,
- subops->func_hash->filter_hash->size_bits);
- notrace_hash = alloc_ftrace_hash(size_bits);
- if (!notrace_hash) {
- free_ftrace_hash(filter_hash);
- return -ENOMEM;
- }
- ret = intersect_hash(&notrace_hash, ops->func_hash->filter_hash,
- subops->func_hash->filter_hash);
- if (ret < 0) {
- free_ftrace_hash(filter_hash);
- free_ftrace_hash(notrace_hash);
- return ret;
- }
- }
+ ret = add_next_hash(&filter_hash, &notrace_hash, ops->func_hash, subops->func_hash);
+ if (ret < 0)
+ return ret;
list_add(&subops->list, &ops->subop_list);
@@ -3557,6 +3575,42 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
return ret;
}
+static int rebuild_hashes(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
+ struct ftrace_ops *ops)
+{
+ struct ftrace_ops_hash temp_hash;
+ struct ftrace_ops *subops;
+ bool first = true;
+ int ret;
+
+ temp_hash.filter_hash = EMPTY_HASH;
+ temp_hash.notrace_hash = EMPTY_HASH;
+
+ list_for_each_entry(subops, &ops->subop_list, list) {
+ *filter_hash = EMPTY_HASH;
+ *notrace_hash = EMPTY_HASH;
+
+ if (first) {
+ ret = add_first_hash(filter_hash, notrace_hash, subops->func_hash);
+ if (ret < 0)
+ return ret;
+ first = false;
+ } else {
+ ret = add_next_hash(filter_hash, notrace_hash,
+ &temp_hash, subops->func_hash);
+ if (ret < 0) {
+ free_ftrace_hash(temp_hash.filter_hash);
+ free_ftrace_hash(temp_hash.notrace_hash);
+ return ret;
+ }
+ }
+
+ temp_hash.filter_hash = *filter_hash;
+ temp_hash.notrace_hash = *notrace_hash;
+ }
+ return 0;
+}
+
/**
* ftrace_shutdown_subops - Remove a subops from a manager ops
* @ops: A manager ops to remove @subops from
@@ -3605,14 +3659,9 @@ int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, in
}
/* Rebuild the hashes without subops */
- filter_hash = append_hashes(ops);
- notrace_hash = intersect_hashes(ops);
- if (!filter_hash || !notrace_hash) {
- free_ftrace_hash(filter_hash);
- free_ftrace_hash(notrace_hash);
- list_add(&subops->list, &ops->subop_list);
- return -ENOMEM;
- }
+ ret = rebuild_hashes(&filter_hash, &notrace_hash, ops);
+ if (ret < 0)
+ return ret;
ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
if (ret < 0) {
@@ -3628,11 +3677,11 @@ int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, in
static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
struct ftrace_hash **orig_subhash,
- struct ftrace_hash *hash,
- int enable)
+ struct ftrace_hash *hash)
{
struct ftrace_ops *ops = subops->managed;
- struct ftrace_hash **orig_hash;
+ struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash;
struct ftrace_hash *save_hash;
struct ftrace_hash *new_hash;
int ret;
@@ -3649,24 +3698,15 @@ static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
return -ENOMEM;
}
- /* Create a new_hash to hold the ops new functions */
- if (enable) {
- orig_hash = &ops->func_hash->filter_hash;
- new_hash = append_hashes(ops);
- } else {
- orig_hash = &ops->func_hash->notrace_hash;
- new_hash = intersect_hashes(ops);
- }
-
- /* Move the hash over to the new hash */
- ret = __ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable);
-
- free_ftrace_hash(new_hash);
+ ret = rebuild_hashes(&filter_hash, &notrace_hash, ops);
+ if (!ret)
+ ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
if (ret) {
/* Put back the original hash */
- free_ftrace_hash_rcu(*orig_subhash);
+ new_hash = *orig_subhash;
*orig_subhash = save_hash;
+ free_ftrace_hash_rcu(new_hash);
} else {
free_ftrace_hash_rcu(save_hash);
}
@@ -4890,7 +4930,7 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
int enable)
{
if (ops->flags & FTRACE_OPS_FL_SUBOP)
- return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable);
+ return ftrace_hash_move_and_update_subops(ops, orig_hash, hash);
/*
* If this ops is not enabled, it could be sharing its filters
@@ -4909,7 +4949,7 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
list_for_each_entry(subops, &op->subop_list, list) {
if ((subops->flags & FTRACE_OPS_FL_ENABLED) &&
subops->func_hash == ops->func_hash) {
- return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable);
+ return ftrace_hash_move_and_update_subops(subops, orig_hash, hash);
}
}
} while_for_each_ftrace_op(op);
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index 968c5c3b0246..e4077500a91d 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -225,7 +225,12 @@ bool rv_is_nested_monitor(struct rv_monitor_def *mdef)
*/
bool rv_is_container_monitor(struct rv_monitor_def *mdef)
{
- struct rv_monitor_def *next = list_next_entry(mdef, list);
+ struct rv_monitor_def *next;
+
+ if (list_is_last(&mdef->list, &rv_monitors_list))
+ return false;
+
+ next = list_next_entry(mdef, list);
return next->parent == mdef->monitor || !mdef->monitor->enable;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b581e388a9d9..8ddf6b17215c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9806,6 +9806,7 @@ static int instance_mkdir(const char *name)
return ret;
}
+#ifdef CONFIG_MMU
static u64 map_pages(unsigned long start, unsigned long size)
{
unsigned long vmap_start, vmap_end;
@@ -9828,6 +9829,12 @@ static u64 map_pages(unsigned long start, unsigned long size)
return (u64)vmap_start;
}
+#else
+static inline u64 map_pages(unsigned long start, unsigned long size)
+{
+ return 0;
+}
+#endif
/**
* trace_array_get_by_name - Create/Lookup a trace array, given its name.
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 969f48742d72..33cfbd4ed76d 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -370,7 +370,6 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
union trace_synth_field *data = &entry->fields[n_u64];
trace_seq_printf(s, print_fmt, se->fields[i]->name,
- STR_VAR_LEN_MAX,
(char *)entry + data->as_dynamic.offset,
i == se->n_fields - 1 ? "" : " ");
n_u64++;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 2f077d4158e5..0c357a89c58e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -880,8 +880,6 @@ static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entr
if (print_retval || print_retaddr)
trace_seq_puts(s, " /*");
- else
- trace_seq_putc(s, '\n');
} else {
print_retaddr = false;
trace_seq_printf(s, "} /* %ps", func);
@@ -899,7 +897,7 @@ static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entr
}
if (!entry || print_retval || print_retaddr)
- trace_seq_puts(s, " */\n");
+ trace_seq_puts(s, " */");
}
#else
@@ -975,7 +973,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
} else
trace_seq_puts(s, "();");
}
- trace_seq_printf(s, "\n");
+ trace_seq_putc(s, '\n');
print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
cpu, iter->ent->pid, flags);
@@ -1313,10 +1311,11 @@ print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
* that if the funcgraph-tail option is enabled.
*/
if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
- trace_seq_puts(s, "}\n");
+ trace_seq_puts(s, "}");
else
- trace_seq_printf(s, "} /* %ps */\n", (void *)func);
+ trace_seq_printf(s, "} /* %ps */", (void *)func);
}
+ trace_seq_putc(s, '\n');
/* Overrun */
if (flags & TRACE_GRAPH_PRINT_OVERRUN)
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 1d893e313614..25ecc1334b67 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -422,11 +422,20 @@ static int vm_module_tags_populate(void)
unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
unsigned long more_pages;
- unsigned long nr;
+ unsigned long nr = 0;
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
- nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
- NUMA_NO_NODE, more_pages, next_page);
+ while (nr < more_pages) {
+ unsigned long allocated;
+
+ allocated = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
+ NUMA_NO_NODE, more_pages - nr, next_page + nr);
+
+ if (!allocated)
+ break;
+ nr += allocated;
+ }
+
if (nr < more_pages ||
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
next_page, PAGE_SHIFT) < 0) {
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 13da529e2e72..5738ae286b41 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -518,4 +518,5 @@ error:
}
EXPORT_SYMBOL_GPL(asn1_ber_decoder);
+MODULE_DESCRIPTION("Decoder for ASN.1 BER/DER/CER encoded bytestream");
MODULE_LICENSE("GPL");
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8c7fdb7d8c8f..bc9391e55d57 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1191,7 +1191,7 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
return -ENOMEM;
p = *pages;
for (int k = 0; k < n; k++) {
- struct folio *folio = page_folio(page);
+ struct folio *folio = page_folio(page + k);
p[k] = page + k;
if (!folio_test_slab(folio))
folio_get(folio);
diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c
index d47c472b0520..848b682a2d70 100644
--- a/lib/tests/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -325,4 +325,5 @@ static struct kunit_suite test_suite = {
};
kunit_test_suite(test_suite);
+MODULE_DESCRIPTION("Kunit tests for slub allocator");
MODULE_LICENSE("GPL");
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 9308bcfb2ad5..dfb4f2358cab 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -165,4 +165,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
}
EXPORT_SYMBOL(ucs2_as_utf8);
+MODULE_DESCRIPTION("UCS2 string handling");
MODULE_LICENSE("GPL v2");
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c
index 9720114c0672..b8996d90e8bc 100644
--- a/lib/zlib_inflate/inflate_syms.c
+++ b/lib/zlib_inflate/inflate_syms.c
@@ -18,4 +18,5 @@ EXPORT_SYMBOL(zlib_inflateEnd);
EXPORT_SYMBOL(zlib_inflateReset);
EXPORT_SYMBOL(zlib_inflateIncomp);
EXPORT_SYMBOL(zlib_inflate_blob);
+MODULE_DESCRIPTION("Data decompression using the deflation algorithm");
MODULE_LICENSE("GPL");
diff --git a/mm/cma.c b/mm/cma.c
index b06d5fe73399..15632939f20a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -35,7 +35,7 @@
struct cma cma_areas[MAX_CMA_AREAS];
unsigned int cma_area_count;
-static int __init __cma_declare_contiguous_nid(phys_addr_t base,
+static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, const char *name, struct cma **res_cma,
@@ -370,7 +370,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size,
phys_addr_t align, unsigned int order_per_bit,
const char *name, struct cma **res_cma, int nid)
{
- phys_addr_t start, end;
+ phys_addr_t start = 0, end;
phys_addr_t size, sizesum, sizeleft;
struct cma_init_memrange *mrp, *mlp, *failed;
struct cma_memrange *cmrp;
@@ -384,7 +384,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size,
/*
* First, try it the normal way, producing just one range.
*/
- ret = __cma_declare_contiguous_nid(0, total_size, 0, align,
+ ret = __cma_declare_contiguous_nid(&start, total_size, 0, align,
order_per_bit, false, name, res_cma, nid);
if (ret != -ENOMEM)
goto out;
@@ -580,7 +580,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
{
int ret;
- ret = __cma_declare_contiguous_nid(base, size, limit, alignment,
+ ret = __cma_declare_contiguous_nid(&base, size, limit, alignment,
order_per_bit, fixed, name, res_cma, nid);
if (ret != 0)
pr_err("Failed to reserve %ld MiB\n",
@@ -592,14 +592,14 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
return ret;
}
-static int __init __cma_declare_contiguous_nid(phys_addr_t base,
+static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, const char *name, struct cma **res_cma,
int nid)
{
phys_addr_t memblock_end = memblock_end_of_DRAM();
- phys_addr_t highmem_start;
+ phys_addr_t highmem_start, base = *basep;
int ret;
/*
@@ -722,12 +722,15 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t base,
}
ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
- if (ret)
+ if (ret) {
memblock_phys_free(base, size);
+ return ret;
+ }
(*res_cma)->nid = nid;
+ *basep = base;
- return ret;
+ return 0;
}
static void cma_debug_show_areas(struct cma *cma)
diff --git a/mm/compaction.c b/mm/compaction.c
index 139f00c0308a..ca71fd3c3181 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -981,13 +981,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}
if (PageHuge(page)) {
+ const unsigned int order = compound_order(page);
/*
* skip hugetlbfs if we are not compacting for pages
* bigger than its order. THPs and other compound pages
* are handled below.
*/
if (!cc->alloc_contig) {
- const unsigned int order = compound_order(page);
if (order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << order) - 1;
@@ -1011,8 +1011,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* Do not report -EBUSY down the chain */
if (ret == -EBUSY)
ret = 0;
- low_pfn += compound_nr(page) - 1;
- nr_scanned += compound_nr(page) - 1;
+ low_pfn += (1UL << order) - 1;
+ nr_scanned += (1UL << order) - 1;
goto isolate_fail;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index b5e784f34d98..7b90cbeb4a1a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2244,6 +2244,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
*start = folio->index + nr;
goto out;
}
+ xas_advance(&xas, folio_next_index(folio) - 1);
continue;
put_folio:
folio_put(folio);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 39f92aad7bd1..e3e6ac991b9c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2271,7 +2271,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
* as surplus_pages, otherwise it might confuse
* persistent_huge_pages() momentarily.
*/
- __prep_account_new_huge_page(h, nid);
+ __prep_account_new_huge_page(h, folio_nid(folio));
/*
* We could have raced with the pool size change.
@@ -3825,6 +3825,7 @@ found:
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
nodemask_t *nodes_allowed)
{
+ unsigned long persistent_free_count;
unsigned long min_count;
unsigned long allocated;
struct folio *folio;
@@ -3959,8 +3960,24 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.
+ *
+ * min_count is the expected number of persistent pages, we
+ * shouldn't calculate min_count by using
+ * resv_huge_pages + persistent_huge_pages() - free_huge_pages,
+ * because there may exist free surplus huge pages, and this will
+ * lead to subtracting twice. Free surplus huge pages come from HVO
+ * failing to restore vmemmap, see comments in the callers of
+ * hugetlb_vmemmap_restore_folio(). Thus, we should calculate
+ * persistent free count first.
*/
- min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
+ persistent_free_count = h->free_huge_pages;
+ if (h->free_huge_pages > persistent_huge_pages(h)) {
+ if (h->free_huge_pages > h->surplus_huge_pages)
+ persistent_free_count -= h->surplus_huge_pages;
+ else
+ persistent_free_count = 0;
+ }
+ min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count;
min_count = max(count, min_count);
try_to_free_low(h, min_count, nodes_allowed);
@@ -4630,7 +4647,7 @@ static void __init hugetlb_sysfs_init(void)
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
hstate_kobjs, &hstate_attr_group);
if (err)
- pr_err("HugeTLB: Unable to add hstate %s", h->name);
+ pr_err("HugeTLB: Unable to add hstate %s\n", h->name);
}
#ifdef CONFIG_NUMA
diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index 3ea317837c2d..f24e3bef72a4 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -2127,4 +2127,5 @@ static struct kunit_suite kasan_kunit_test_suite = {
kunit_test_suite(kasan_kunit_test_suite);
+MODULE_DESCRIPTION("KUnit tests for checking KASAN bug-detection capabilities");
MODULE_LICENSE("GPL");
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 421740f1bcdc..c96c1f2b9cf5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1759,7 +1759,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
}
struct memcg_stock_pcp {
- localtry_lock_t stock_lock;
+ local_trylock_t stock_lock;
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
@@ -1774,7 +1774,7 @@ struct memcg_stock_pcp {
#define FLUSHING_CACHED_CHARGE 0
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
- .stock_lock = INIT_LOCALTRY_LOCK(stock_lock),
+ .stock_lock = INIT_LOCAL_TRYLOCK(stock_lock),
};
static DEFINE_MUTEX(percpu_charge_mutex);
@@ -1805,11 +1805,10 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
- if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
- if (!gfpflags_allow_spinning(gfp_mask))
- return ret;
- localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
- }
+ if (gfpflags_allow_spinning(gfp_mask))
+ local_lock_irqsave(&memcg_stock.stock_lock, flags);
+ else if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags))
+ return ret;
stock = this_cpu_ptr(&memcg_stock);
stock_pages = READ_ONCE(stock->nr_pages);
@@ -1818,7 +1817,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
ret = true;
}
- localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
return ret;
}
@@ -1857,14 +1856,14 @@ static void drain_local_stock(struct work_struct *dummy)
* drain_stock races is that we always operate on local CPU stock
* here with IRQ disabled
*/
- localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+ local_lock_irqsave(&memcg_stock.stock_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
old = drain_obj_stock(stock);
drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
- localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
obj_cgroup_put(old);
}
@@ -1894,7 +1893,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
unsigned long flags;
- if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
+ if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
/*
* In case of unlikely failure to lock percpu stock_lock
* uncharge memcg directly.
@@ -1907,7 +1906,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
return;
}
__refill_stock(memcg, nr_pages);
- localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
}
/*
@@ -1964,9 +1963,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
stock = &per_cpu(memcg_stock, cpu);
/* drain_obj_stock requires stock_lock */
- localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+ local_lock_irqsave(&memcg_stock.stock_lock, flags);
old = drain_obj_stock(stock);
- localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
drain_stock(stock);
obj_cgroup_put(old);
@@ -2787,7 +2786,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
unsigned long flags;
int *bytes;
- localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+ local_lock_irqsave(&memcg_stock.stock_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
/*
@@ -2836,7 +2835,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
if (nr)
__mod_objcg_mlstate(objcg, pgdat, idx, nr);
- localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
obj_cgroup_put(old);
}
@@ -2846,7 +2845,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
unsigned long flags;
bool ret = false;
- localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+ local_lock_irqsave(&memcg_stock.stock_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
@@ -2854,7 +2853,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
ret = true;
}
- localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
return ret;
}
@@ -2946,7 +2945,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
unsigned long flags;
unsigned int nr_pages = 0;
- localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+ local_lock_irqsave(&memcg_stock.stock_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
@@ -2960,7 +2959,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
stock->nr_bytes &= (PAGE_SIZE - 1);
}
- localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+ local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
obj_cgroup_put(old);
if (nr_pages)
diff --git a/mm/memory.c b/mm/memory.c
index 2d8c265fc7d6..44481fe7c629 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1361,7 +1361,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
struct mm_struct *dst_mm = dst_vma->vm_mm;
struct mm_struct *src_mm = src_vma->vm_mm;
struct mmu_notifier_range range;
- unsigned long next, pfn;
+ unsigned long next, pfn = 0;
bool is_cow;
int ret;
@@ -2938,11 +2938,11 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
if (fn) {
do {
if (create || !pte_none(ptep_get(pte))) {
- err = fn(pte++, addr, data);
+ err = fn(pte, addr, data);
if (err)
break;
}
- } while (addr += PAGE_SIZE, addr != end);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
}
*mask |= PGTBL_PTE_MODIFIED;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a18340b32218..244111c88183 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1396,11 +1396,12 @@ static void free_one_page(struct zone *zone, struct page *page,
struct llist_head *llhead;
unsigned long flags;
- if (!spin_trylock_irqsave(&zone->lock, flags)) {
- if (unlikely(fpi_flags & FPI_TRYLOCK)) {
+ if (unlikely(fpi_flags & FPI_TRYLOCK)) {
+ if (!spin_trylock_irqsave(&zone->lock, flags)) {
add_page_to_zone_llist(zone, page, order);
return;
}
+ } else {
spin_lock_irqsave(&zone->lock, flags);
}
@@ -2178,23 +2179,15 @@ try_to_claim_block(struct zone *zone, struct page *page,
}
/*
- * Try finding a free buddy page on the fallback list.
- *
- * This will attempt to claim a whole pageblock for the requested type
- * to ensure grouping of such requests in the future.
- *
- * If a whole block cannot be claimed, steal an individual page, regressing to
- * __rmqueue_smallest() logic to at least break up as little contiguity as
- * possible.
+ * Try to allocate from some fallback migratetype by claiming the entire block,
+ * i.e. converting it to the allocation's start migratetype.
*
* The use of signed ints for order and current_order is a deliberate
* deviation from the rest of this file, to make the for loop
* condition simpler.
- *
- * Return the stolen page, or NULL if none can be found.
*/
static __always_inline struct page *
-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
+__rmqueue_claim(struct zone *zone, int order, int start_migratetype,
unsigned int alloc_flags)
{
struct free_area *area;
@@ -2232,14 +2225,29 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
page = try_to_claim_block(zone, page, current_order, order,
start_migratetype, fallback_mt,
alloc_flags);
- if (page)
- goto got_one;
+ if (page) {
+ trace_mm_page_alloc_extfrag(page, order, current_order,
+ start_migratetype, fallback_mt);
+ return page;
+ }
}
- if (alloc_flags & ALLOC_NOFRAGMENT)
- return NULL;
+ return NULL;
+}
+
+/*
+ * Try to steal a single page from some fallback migratetype. Leave the rest of
+ * the block as its current migratetype, potentially causing fragmentation.
+ */
+static __always_inline struct page *
+__rmqueue_steal(struct zone *zone, int order, int start_migratetype)
+{
+ struct free_area *area;
+ int current_order;
+ struct page *page;
+ int fallback_mt;
+ bool claim_block;
- /* No luck claiming pageblock. Find the smallest fallback page */
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
@@ -2249,25 +2257,28 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
page = get_page_from_free_area(area, fallback_mt);
page_del_and_expand(zone, page, order, current_order, fallback_mt);
- goto got_one;
+ trace_mm_page_alloc_extfrag(page, order, current_order,
+ start_migratetype, fallback_mt);
+ return page;
}
return NULL;
-
-got_one:
- trace_mm_page_alloc_extfrag(page, order, current_order,
- start_migratetype, fallback_mt);
-
- return page;
}
+enum rmqueue_mode {
+ RMQUEUE_NORMAL,
+ RMQUEUE_CMA,
+ RMQUEUE_CLAIM,
+ RMQUEUE_STEAL,
+};
+
/*
* Do the hard work of removing an element from the buddy allocator.
* Call me with the zone->lock already held.
*/
static __always_inline struct page *
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
- unsigned int alloc_flags)
+ unsigned int alloc_flags, enum rmqueue_mode *mode)
{
struct page *page;
@@ -2286,16 +2297,48 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
}
}
- page = __rmqueue_smallest(zone, order, migratetype);
- if (unlikely(!page)) {
- if (alloc_flags & ALLOC_CMA)
+ /*
+ * First try the freelists of the requested migratetype, then try
+ * fallbacks modes with increasing levels of fragmentation risk.
+ *
+ * The fallback logic is expensive and rmqueue_bulk() calls in
+ * a loop with the zone->lock held, meaning the freelists are
+ * not subject to any outside changes. Remember in *mode where
+ * we found pay dirt, to save us the search on the next call.
+ */
+ switch (*mode) {
+ case RMQUEUE_NORMAL:
+ page = __rmqueue_smallest(zone, order, migratetype);
+ if (page)
+ return page;
+ fallthrough;
+ case RMQUEUE_CMA:
+ if (alloc_flags & ALLOC_CMA) {
page = __rmqueue_cma_fallback(zone, order);
-
- if (!page)
- page = __rmqueue_fallback(zone, order, migratetype,
- alloc_flags);
+ if (page) {
+ *mode = RMQUEUE_CMA;
+ return page;
+ }
+ }
+ fallthrough;
+ case RMQUEUE_CLAIM:
+ page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
+ if (page) {
+ /* Replenished preferred freelist, back to normal mode. */
+ *mode = RMQUEUE_NORMAL;
+ return page;
+ }
+ fallthrough;
+ case RMQUEUE_STEAL:
+ if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
+ page = __rmqueue_steal(zone, order, migratetype);
+ if (page) {
+ *mode = RMQUEUE_STEAL;
+ return page;
+ }
+ }
}
- return page;
+ return NULL;
}
/*
@@ -2307,17 +2350,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, unsigned int alloc_flags)
{
+ enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
unsigned long flags;
int i;
- if (!spin_trylock_irqsave(&zone->lock, flags)) {
- if (unlikely(alloc_flags & ALLOC_TRYLOCK))
+ if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
+ if (!spin_trylock_irqsave(&zone->lock, flags))
return 0;
+ } else {
spin_lock_irqsave(&zone->lock, flags);
}
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype,
- alloc_flags);
+ alloc_flags, &rmqm);
if (unlikely(page == NULL))
break;
@@ -2933,15 +2978,18 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
do {
page = NULL;
- if (!spin_trylock_irqsave(&zone->lock, flags)) {
- if (unlikely(alloc_flags & ALLOC_TRYLOCK))
+ if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
+ if (!spin_trylock_irqsave(&zone->lock, flags))
return NULL;
+ } else {
spin_lock_irqsave(&zone->lock, flags);
}
if (alloc_flags & ALLOC_HIGHATOMIC)
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (!page) {
- page = __rmqueue(zone, order, migratetype, alloc_flags);
+ enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
+
+ page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
/*
* If the allocation fails, allow OOM handling and
diff --git a/mm/slub.c b/mm/slub.c
index b46f87662e71..dc9e729e1d26 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1973,6 +1973,11 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
__GFP_ACCOUNT | __GFP_NOFAIL)
+static inline void init_slab_obj_exts(struct slab *slab)
+{
+ slab->obj_exts = 0;
+}
+
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
gfp_t gfp, bool new_slab)
{
@@ -2058,6 +2063,10 @@ static inline bool need_slab_obj_ext(void)
#else /* CONFIG_SLAB_OBJ_EXT */
+static inline void init_slab_obj_exts(struct slab *slab)
+{
+}
+
static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
gfp_t gfp, bool new_slab)
{
@@ -2637,6 +2646,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
slab->objects = oo_objects(oo);
slab->inuse = 0;
slab->frozen = 0;
+ init_slab_obj_exts(slab);
account_slab(slab, oo_order(oo), s, flags);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index fbf2cf62ab9f..7d5d709cc838 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -1902,6 +1902,14 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
unsigned long end)
{
struct vm_area_struct *ret;
+ bool give_up_on_oom = false;
+
+ /*
+ * If we are modifying only and not splitting, just give up on the merge
+ * if OOM prevents us from merging successfully.
+ */
+ if (start == vma->vm_start && end == vma->vm_end)
+ give_up_on_oom = true;
/* Reset ptes for the whole vma range if wr-protected */
if (userfaultfd_wp(vma))
@@ -1909,7 +1917,7 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
ret = vma_modify_flags_uffd(vmi, prev, vma, start, end,
vma->vm_flags & ~__VM_UFFD_FLAGS,
- NULL_VM_UFFD_CTX);
+ NULL_VM_UFFD_CTX, give_up_on_oom);
/*
* In the vma_merge() successful mprotect-like case 8:
@@ -1960,7 +1968,8 @@ int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
new_flags,
- (struct vm_userfaultfd_ctx){ctx});
+ (struct vm_userfaultfd_ctx){ctx},
+ /* give_up_on_oom = */false);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/mm/vma.c b/mm/vma.c
index 5cdc5612bfc1..839d12f02c88 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -666,6 +666,9 @@ static void vmg_adjust_set_range(struct vma_merge_struct *vmg)
/*
* Actually perform the VMA merge operation.
*
+ * IMPORTANT: We guarantee that, should vmg->give_up_on_oom is set, to not
+ * modify any VMAs or cause inconsistent state should an OOM condition arise.
+ *
* Returns 0 on success, or an error value on failure.
*/
static int commit_merge(struct vma_merge_struct *vmg)
@@ -685,6 +688,12 @@ static int commit_merge(struct vma_merge_struct *vmg)
init_multi_vma_prep(&vp, vma, vmg);
+ /*
+ * If vmg->give_up_on_oom is set, we're safe, because we don't actually
+ * manipulate any VMAs until we succeed at preallocation.
+ *
+ * Past this point, we will not return an error.
+ */
if (vma_iter_prealloc(vmg->vmi, vma))
return -ENOMEM;
@@ -915,7 +924,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
if (anon_dup)
unlink_anon_vmas(anon_dup);
- vmg->state = VMA_MERGE_ERROR_NOMEM;
+ /*
+ * We've cleaned up any cloned anon_vma's, no VMAs have been
+ * modified, no harm no foul if the user requests that we not
+ * report this and just give up, leaving the VMAs unmerged.
+ */
+ if (!vmg->give_up_on_oom)
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
return NULL;
}
@@ -926,7 +941,15 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
abort:
vma_iter_set(vmg->vmi, start);
vma_iter_load(vmg->vmi);
- vmg->state = VMA_MERGE_ERROR_NOMEM;
+
+ /*
+ * This means we have failed to clone anon_vma's correctly, but no
+ * actual changes to VMAs have occurred, so no harm no foul - if the
+ * user doesn't want this reported and instead just wants to give up on
+ * the merge, allow it.
+ */
+ if (!vmg->give_up_on_oom)
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
return NULL;
}
@@ -1068,6 +1091,10 @@ int vma_expand(struct vma_merge_struct *vmg)
/* This should already have been checked by this point. */
VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
vma_start_write(next);
+ /*
+ * In this case we don't report OOM, so vmg->give_up_on_mm is
+ * safe.
+ */
ret = dup_anon_vma(middle, next, &anon_dup);
if (ret)
return ret;
@@ -1090,9 +1117,15 @@ int vma_expand(struct vma_merge_struct *vmg)
return 0;
nomem:
- vmg->state = VMA_MERGE_ERROR_NOMEM;
if (anon_dup)
unlink_anon_vmas(anon_dup);
+ /*
+ * If the user requests that we just give upon OOM, we are safe to do so
+ * here, as commit merge provides this contract to us. Nothing has been
+ * changed - no harm no foul, just don't report it.
+ */
+ if (!vmg->give_up_on_oom)
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
return -ENOMEM;
}
@@ -1534,6 +1567,13 @@ static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
if (vmg_nomem(vmg))
return ERR_PTR(-ENOMEM);
+ /*
+ * Split can fail for reasons other than OOM, so if the user requests
+ * this it's probably a mistake.
+ */
+ VM_WARN_ON(vmg->give_up_on_oom &&
+ (vma->vm_start != start || vma->vm_end != end));
+
/* Split any preceding portion of the VMA. */
if (vma->vm_start < start) {
int err = split_vma(vmg->vmi, vma, start, 1);
@@ -1602,12 +1642,15 @@ struct vm_area_struct
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long new_flags,
- struct vm_userfaultfd_ctx new_ctx)
+ struct vm_userfaultfd_ctx new_ctx,
+ bool give_up_on_oom)
{
VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
vmg.flags = new_flags;
vmg.uffd_ctx = new_ctx;
+ if (give_up_on_oom)
+ vmg.give_up_on_oom = true;
return vma_modify(&vmg);
}
diff --git a/mm/vma.h b/mm/vma.h
index 7356ca5a22d3..149926e8a6d1 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -114,6 +114,12 @@ struct vma_merge_struct {
*/
bool just_expand :1;
+ /*
+ * If a merge is possible, but an OOM error occurs, give up and don't
+ * execute the merge, returning NULL.
+ */
+ bool give_up_on_oom :1;
+
/* Internal flags set during merge process: */
/*
@@ -255,7 +261,8 @@ __must_check struct vm_area_struct
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long new_flags,
- struct vm_userfaultfd_ctx new_ctx);
+ struct vm_userfaultfd_ctx new_ctx,
+ bool give_up_on_oom);
__must_check struct vm_area_struct
*vma_merge_new_range(struct vma_merge_struct *vmg);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index f145f9662653..7cd4bdcee439 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -725,7 +725,6 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
kref_get(&hard_iface->refcount);
- dev_hold(mesh_iface);
netdev_hold(mesh_iface, &hard_iface->meshif_dev_tracker, GFP_ATOMIC);
hard_iface->mesh_iface = mesh_iface;
bat_priv = netdev_priv(hard_iface->mesh_iface);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 95972fd4c784..7e1b53857648 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -3072,6 +3072,7 @@ void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset,
const struct sockcm_cookie *sockc)
{
struct sock *sk = skb ? skb->sk : NULL;
+ int key;
/* This shall be called on a single skb of those generated by user
* sendmsg(), and only when the sendmsg() does not return error to
@@ -3087,13 +3088,16 @@ void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset,
sock_tx_timestamp(sk, sockc, &skb_shinfo(skb)->tx_flags);
+ if (sk->sk_type == SOCK_STREAM)
+ key = atomic_add_return(key_offset, &sk->sk_tskey);
+
if (sockc->tsflags & SOF_TIMESTAMPING_OPT_ID &&
sockc->tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) {
if (sockc->tsflags & SOCKCM_FLAG_TS_OPT_ID) {
skb_shinfo(skb)->tskey = sockc->ts_opt_id;
} else {
- int key = atomic_add_return(key_offset, &sk->sk_tskey);
-
+ if (sk->sk_type != SOCK_STREAM)
+ key = atomic_inc_return(&sk->sk_tskey);
skb_shinfo(skb)->tskey = key - 1;
}
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1d8616f2e740..5f808f0b0e9a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -6160,11 +6160,12 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
* event or send an immediate device found event if the data
* should not be stored for later.
*/
- if (!ext_adv && !has_pending_adv_report(hdev)) {
+ if (!has_pending_adv_report(hdev)) {
/* If the report will trigger a SCAN_REQ store it for
* later merging.
*/
- if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+ if (!ext_adv && (type == LE_ADV_IND ||
+ type == LE_ADV_SCAN_IND)) {
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
rssi, flags, data, len);
return;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index c7b66b2ea9f2..5ca7ac43c58d 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3991,7 +3991,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
/* Check if the ACL is secure enough (if not SDP) */
if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
- !hci_conn_check_link_mode(conn->hcon)) {
+ (!hci_conn_check_link_mode(conn->hcon) ||
+ !l2cap_check_enc_key_size(conn->hcon))) {
conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
result = L2CAP_CR_SEC_BLOCK;
goto response;
@@ -7538,8 +7539,24 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (skb->len > len) {
BT_ERR("Frame is too long (len %u, expected len %d)",
skb->len, len);
+ /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
+ * (Multiple Signaling Command in one PDU, Data
+ * Truncated, BR/EDR) send a C-frame to the IUT with
+ * PDU Length set to 8 and Channel ID set to the
+ * correct signaling channel for the logical link.
+ * The Information payload contains one L2CAP_ECHO_REQ
+ * packet with Data Length set to 0 with 0 octets of
+ * echo data and one invalid command packet due to
+ * data truncated in PDU but present in HCI packet.
+ *
+ * Shorter the socket buffer to the PDU length to
+ * allow to process valid commands from the PDU before
+ * setting the socket unreliable.
+ */
+ skb->len = len;
+ l2cap_recv_frame(conn, skb);
l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
+ goto unlock;
}
/* Append fragment into frame (with header) */
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index d9a69ec9affe..939a3aa78d5c 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -715,8 +715,8 @@ static int br_vlan_add_existing(struct net_bridge *br,
u16 flags, bool *changed,
struct netlink_ext_ack *extack)
{
- bool would_change = __vlan_flags_would_change(vlan, flags);
bool becomes_brentry = false;
+ bool would_change = false;
int err;
if (!br_vlan_is_brentry(vlan)) {
@@ -725,6 +725,8 @@ static int br_vlan_add_existing(struct net_bridge *br,
return -EINVAL;
becomes_brentry = true;
+ } else {
+ would_change = __vlan_flags_would_change(vlan, flags);
}
/* Master VLANs that aren't brentries weren't notified before,
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 17226b2341d0..6fefe7a68761 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -655,6 +655,7 @@ static int j1939_sk_release(struct socket *sock)
sock->sk = NULL;
release_sock(sk);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sock_put(sk);
return 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 03d20a98f8b7..d1a8cad0c99c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1572,6 +1572,8 @@ EXPORT_SYMBOL(netdev_features_change);
void netif_state_change(struct net_device *dev)
{
+ netdev_ops_assert_locked_or_invisible(dev);
+
if (dev->flags & IFF_UP) {
struct netdev_notifier_change_info change_info = {
.info.dev = dev,
@@ -11986,15 +11988,24 @@ void unregister_netdevice_many_notify(struct list_head *head,
BUG_ON(dev->reg_state != NETREG_REGISTERED);
}
- /* If device is running, close it first. */
+ /* If device is running, close it first. Start with ops locked... */
list_for_each_entry(dev, head, unreg_list) {
- list_add_tail(&dev->close_list, &close_head);
- netdev_lock_ops(dev);
+ if (netdev_need_ops_lock(dev)) {
+ list_add_tail(&dev->close_list, &close_head);
+ netdev_lock(dev);
+ }
+ }
+ dev_close_many(&close_head, true);
+ /* ... now unlock them and go over the rest. */
+ list_for_each_entry(dev, head, unreg_list) {
+ if (netdev_need_ops_lock(dev))
+ netdev_unlock(dev);
+ else
+ list_add_tail(&dev->close_list, &close_head);
}
dev_close_many(&close_head, true);
list_for_each_entry(dev, head, unreg_list) {
- netdev_unlock_ops(dev);
/* And unlink it from device chain. */
unlist_netdevice(dev);
netdev_lock(dev);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 06052b6c946b..8ca634964e36 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -257,6 +257,24 @@ static int nla_put_port_range(struct sk_buff *skb, int attrtype,
return nla_put(skb, attrtype, sizeof(*range), range);
}
+static bool fib_rule_iif_match(const struct fib_rule *rule, int iifindex,
+ const struct flowi *fl)
+{
+ u8 iif_is_l3_master = READ_ONCE(rule->iif_is_l3_master);
+
+ return iif_is_l3_master ? l3mdev_fib_rule_iif_match(fl, iifindex) :
+ fl->flowi_iif == iifindex;
+}
+
+static bool fib_rule_oif_match(const struct fib_rule *rule, int oifindex,
+ const struct flowi *fl)
+{
+ u8 oif_is_l3_master = READ_ONCE(rule->oif_is_l3_master);
+
+ return oif_is_l3_master ? l3mdev_fib_rule_oif_match(fl, oifindex) :
+ fl->flowi_oif == oifindex;
+}
+
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
struct flowi *fl, int flags,
struct fib_lookup_arg *arg)
@@ -264,11 +282,11 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
int iifindex, oifindex, ret = 0;
iifindex = READ_ONCE(rule->iifindex);
- if (iifindex && (iifindex != fl->flowi_iif))
+ if (iifindex && !fib_rule_iif_match(rule, iifindex, fl))
goto out;
oifindex = READ_ONCE(rule->oifindex);
- if (oifindex && (oifindex != fl->flowi_oif))
+ if (oifindex && !fib_rule_oif_match(rule, oifindex, fl))
goto out;
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
@@ -736,16 +754,20 @@ static int fib_nl2rule_rtnl(struct fib_rule *nlrule,
struct net_device *dev;
dev = __dev_get_by_name(nlrule->fr_net, nlrule->iifname);
- if (dev)
+ if (dev) {
nlrule->iifindex = dev->ifindex;
+ nlrule->iif_is_l3_master = netif_is_l3_master(dev);
+ }
}
if (tb[FRA_OIFNAME]) {
struct net_device *dev;
dev = __dev_get_by_name(nlrule->fr_net, nlrule->oifname);
- if (dev)
+ if (dev) {
nlrule->oifindex = dev->ifindex;
+ nlrule->oif_is_l3_master = netif_is_l3_master(dev);
+ }
}
return 0;
@@ -1338,11 +1360,17 @@ static void attach_rules(struct list_head *rules, struct net_device *dev)
list_for_each_entry(rule, rules, list) {
if (rule->iifindex == -1 &&
- strcmp(dev->name, rule->iifname) == 0)
+ strcmp(dev->name, rule->iifname) == 0) {
WRITE_ONCE(rule->iifindex, dev->ifindex);
+ WRITE_ONCE(rule->iif_is_l3_master,
+ netif_is_l3_master(dev));
+ }
if (rule->oifindex == -1 &&
- strcmp(dev->name, rule->oifname) == 0)
+ strcmp(dev->name, rule->oifname) == 0) {
WRITE_ONCE(rule->oifindex, dev->ifindex);
+ WRITE_ONCE(rule->oif_is_l3_master,
+ netif_is_l3_master(dev));
+ }
}
}
@@ -1351,10 +1379,14 @@ static void detach_rules(struct list_head *rules, struct net_device *dev)
struct fib_rule *rule;
list_for_each_entry(rule, rules, list) {
- if (rule->iifindex == dev->ifindex)
+ if (rule->iifindex == dev->ifindex) {
WRITE_ONCE(rule->iifindex, -1);
- if (rule->oifindex == dev->ifindex)
+ WRITE_ONCE(rule->iif_is_l3_master, false);
+ }
+ if (rule->oifindex == dev->ifindex) {
WRITE_ONCE(rule->oifindex, -1);
+ WRITE_ONCE(rule->oif_is_l3_master, false);
+ }
}
}
diff --git a/net/core/filter.c b/net/core/filter.c
index bc6828761a47..79cab4d78dc3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -218,24 +218,36 @@ BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
return 0;
}
+static int bpf_skb_load_helper_convert_offset(const struct sk_buff *skb, int offset)
+{
+ if (likely(offset >= 0))
+ return offset;
+
+ if (offset >= SKF_NET_OFF)
+ return offset - SKF_NET_OFF + skb_network_offset(skb);
+
+ if (offset >= SKF_LL_OFF && skb_mac_header_was_set(skb))
+ return offset - SKF_LL_OFF + skb_mac_offset(skb);
+
+ return INT_MIN;
+}
+
BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
- u8 tmp, *ptr;
+ u8 tmp;
const int len = sizeof(tmp);
- if (offset >= 0) {
- if (headlen - offset >= len)
- return *(u8 *)(data + offset);
- if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
- return tmp;
- } else {
- ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
- if (likely(ptr))
- return *(u8 *)ptr;
- }
+ offset = bpf_skb_load_helper_convert_offset(skb, offset);
+ if (offset == INT_MIN)
+ return -EFAULT;
- return -EFAULT;
+ if (headlen - offset >= len)
+ return *(u8 *)(data + offset);
+ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+ return tmp;
+ else
+ return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
@@ -248,21 +260,19 @@ BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
- __be16 tmp, *ptr;
+ __be16 tmp;
const int len = sizeof(tmp);
- if (offset >= 0) {
- if (headlen - offset >= len)
- return get_unaligned_be16(data + offset);
- if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
- return be16_to_cpu(tmp);
- } else {
- ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
- if (likely(ptr))
- return get_unaligned_be16(ptr);
- }
+ offset = bpf_skb_load_helper_convert_offset(skb, offset);
+ if (offset == INT_MIN)
+ return -EFAULT;
- return -EFAULT;
+ if (headlen - offset >= len)
+ return get_unaligned_be16(data + offset);
+ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+ return be16_to_cpu(tmp);
+ else
+ return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
@@ -275,21 +285,19 @@ BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
- __be32 tmp, *ptr;
+ __be32 tmp;
const int len = sizeof(tmp);
- if (likely(offset >= 0)) {
- if (headlen - offset >= len)
- return get_unaligned_be32(data + offset);
- if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
- return be32_to_cpu(tmp);
- } else {
- ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
- if (likely(ptr))
- return get_unaligned_be32(ptr);
- }
+ offset = bpf_skb_load_helper_convert_offset(skb, offset);
+ if (offset == INT_MIN)
+ return -EFAULT;
- return -EFAULT;
+ if (headlen - offset >= len)
+ return get_unaligned_be32(data + offset);
+ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+ return be32_to_cpu(tmp);
+ else
+ return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f5c018090efc..ea2e15de8d99 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3677,11 +3677,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
nla_len(tb[IFLA_BROADCAST]));
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
- if (tb[IFLA_OPERSTATE]) {
- netdev_lock_ops(dev);
+ if (tb[IFLA_OPERSTATE])
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
- netdev_unlock_ops(dev);
- }
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
if (tb[IFLA_GROUP])
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index e827775baf2e..436a7e1b412a 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -862,6 +862,16 @@ static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
kfree(dst->lags);
}
+static void dsa_tree_teardown_routing_table(struct dsa_switch_tree *dst)
+{
+ struct dsa_link *dl, *next;
+
+ list_for_each_entry_safe(dl, next, &dst->rtable, list) {
+ list_del(&dl->list);
+ kfree(dl);
+ }
+}
+
static int dsa_tree_setup(struct dsa_switch_tree *dst)
{
bool complete;
@@ -879,7 +889,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
err = dsa_tree_setup_cpu_ports(dst);
if (err)
- return err;
+ goto teardown_rtable;
err = dsa_tree_setup_switches(dst);
if (err)
@@ -911,14 +921,14 @@ teardown_switches:
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
+teardown_rtable:
+ dsa_tree_teardown_routing_table(dst);
return err;
}
static void dsa_tree_teardown(struct dsa_switch_tree *dst)
{
- struct dsa_link *dl, *next;
-
if (!dst->setup)
return;
@@ -932,10 +942,7 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_cpu_ports(dst);
- list_for_each_entry_safe(dl, next, &dst->rtable, list) {
- list_del(&dl->list);
- kfree(dl);
- }
+ dsa_tree_teardown_routing_table(dst);
pr_info("DSA: tree %d torn down\n", dst->index);
@@ -1478,12 +1485,44 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
static void dsa_switch_release_ports(struct dsa_switch *ds)
{
+ struct dsa_mac_addr *a, *tmp;
struct dsa_port *dp, *next;
+ struct dsa_vlan *v, *n;
dsa_switch_for_each_port_safe(dp, next, ds) {
- WARN_ON(!list_empty(&dp->fdbs));
- WARN_ON(!list_empty(&dp->mdbs));
- WARN_ON(!list_empty(&dp->vlans));
+ /* These are either entries that upper layers lost track of
+ * (probably due to bugs), or installed through interfaces
+ * where one does not necessarily have to remove them, like
+ * ndo_dflt_fdb_add().
+ */
+ list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
+ dev_info(ds->dev,
+ "Cleaning up unicast address %pM vid %u from port %d\n",
+ a->addr, a->vid, dp->index);
+ list_del(&a->list);
+ kfree(a);
+ }
+
+ list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
+ dev_info(ds->dev,
+ "Cleaning up multicast address %pM vid %u from port %d\n",
+ a->addr, a->vid, dp->index);
+ list_del(&a->list);
+ kfree(a);
+ }
+
+ /* These are entries that upper layers have lost track of,
+ * probably due to bugs, but also due to dsa_port_do_vlan_del()
+ * having failed and the VLAN entry still lingering on.
+ */
+ list_for_each_entry_safe(v, n, &dp->vlans, list) {
+ dev_info(ds->dev,
+ "Cleaning up vid %u from port %d\n",
+ v->vid, dp->index);
+ list_del(&v->list);
+ kfree(v);
+ }
+
list_del(&dp->list);
kfree(dp);
}
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 3ee53e28ec2e..53e03fd8071b 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -197,7 +197,7 @@ static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
err = ds->ops->tag_8021q_vlan_del(ds, port, vid);
if (err) {
- refcount_inc(&v->refcount);
+ refcount_set(&v->refcount, 1);
return err;
}
diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c
index 0e2691ccb0df..3057576bc81e 100644
--- a/net/ethtool/cmis_cdb.c
+++ b/net/ethtool/cmis_cdb.c
@@ -351,7 +351,7 @@ ethtool_cmis_module_poll(struct net_device *dev,
struct netlink_ext_ack extack = {};
int err;
- ethtool_cmis_page_init(&page_data, 0, offset, sizeof(rpl));
+ ethtool_cmis_page_init(&page_data, 0, offset, sizeof(*rpl));
page_data.data = (u8 *)rpl;
err = ops->get_module_eeprom_by_page(dev, &page_data, &extack);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e2c6c0b0684b..945857a8bfe3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1771,6 +1771,7 @@ out:
if (!err) {
spin_lock_bh(&f6i->fib6_table->tb6_lock);
fib6_update_sernum(net, f6i);
+ fib6_add_gc_list(f6i);
spin_unlock_bh(&f6i->fib6_table->tb6_lock);
fib6_force_start_gc(net);
}
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index ca10916340b0..5432a5f2dfc8 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -277,8 +277,10 @@ void l3mdev_update_flow(struct net *net, struct flowi *fl)
if (fl->flowi_oif) {
dev = dev_get_by_index_rcu(net, fl->flowi_oif);
if (dev) {
- if (!fl->flowi_l3mdev)
+ if (!fl->flowi_l3mdev) {
fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev);
+ fl->flowi_flags |= FLOWI_FLAG_L3MDEV_OIF;
+ }
/* oif set to L3mdev directs lookup to its table;
* reset to avoid oif match in fib_lookup
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f0f4a250b10e..969b3e2c496a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -659,6 +659,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
ieee80211_txq_remove_vlan(local, sdata);
+ if (sdata->vif.txq)
+ ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
+
sdata->bss = NULL;
if (local->open_count == 0)
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index dd895617defd..9b12ca97f412 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -630,6 +630,9 @@ static int mctp_sk_hash(struct sock *sk)
{
struct net *net = sock_net(sk);
+ /* Bind lookup runs under RCU, remain live during that. */
+ sock_set_flag(sk, SOCK_RCU_FREE);
+
mutex_lock(&net->mctp.bind_lock);
sk_add_node_rcu(sk, &net->mctp.binds);
mutex_unlock(&net->mctp.bind_lock);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 9d8361526f82..9441ac3d8c1a 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -383,8 +383,8 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
void flow_offload_teardown(struct flow_offload *flow)
{
clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
- set_bit(NF_FLOW_TEARDOWN, &flow->flags);
- flow_offload_fixup_ct(flow);
+ if (!test_and_set_bit(NF_FLOW_TEARDOWN, &flow->flags))
+ flow_offload_fixup_ct(flow);
}
EXPORT_SYMBOL_GPL(flow_offload_teardown);
@@ -558,10 +558,12 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) ||
- nf_flow_custom_gc(flow_table, flow))
+ nf_flow_custom_gc(flow_table, flow)) {
flow_offload_teardown(flow);
- else if (!teardown)
+ teardown = true;
+ } else if (!teardown) {
nf_flow_table_extend_ct_timeout(flow->ct);
+ }
if (teardown) {
if (test_bit(NF_FLOW_HW, &flow->flags)) {
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 95e0dd14dc1a..518be23e48ea 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2876,7 +2876,8 @@ static int validate_set(const struct nlattr *a,
size_t key_len;
/* There can be only one key in a action */
- if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
+ if (!nla_ok(ovs_key, nla_len(a)) ||
+ nla_total_size(nla_len(ovs_key)) != nla_len(a))
return -EINVAL;
key_len = nla_len(ovs_key);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 3e6cb35baf25..3760131f1484 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -362,6 +362,9 @@ static void smc_destruct(struct sock *sk)
return;
}
+static struct lock_class_key smc_key;
+static struct lock_class_key smc_slock_key;
+
void smc_sk_init(struct net *net, struct sock *sk, int protocol)
{
struct smc_sock *smc = smc_sk(sk);
@@ -375,6 +378,8 @@ void smc_sk_init(struct net *net, struct sock *sk, int protocol)
INIT_WORK(&smc->connect_work, smc_connect_work);
INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
INIT_LIST_HEAD(&smc->accept_q);
+ sock_lock_init_class_and_name(sk, "slock-AF_SMC", &smc_slock_key,
+ "sk_lock-AF_SMC", &smc_key);
spin_lock_init(&smc->accept_q_lock);
spin_lock_init(&smc->conn.send_lock);
sk->sk_prot->hash(sk);
diff --git a/samples/livepatch/livepatch-callbacks-busymod.c b/samples/livepatch/livepatch-callbacks-busymod.c
index 69105596e72e..fadc2a85cb35 100644
--- a/samples/livepatch/livepatch-callbacks-busymod.c
+++ b/samples/livepatch/livepatch-callbacks-busymod.c
@@ -56,4 +56,5 @@ static void livepatch_callbacks_mod_exit(void)
module_init(livepatch_callbacks_mod_init);
module_exit(livepatch_callbacks_mod_exit);
+MODULE_DESCRIPTION("Live patching demo for (un)patching callbacks, support module");
MODULE_LICENSE("GPL");
diff --git a/samples/livepatch/livepatch-callbacks-demo.c b/samples/livepatch/livepatch-callbacks-demo.c
index 11c3f4357812..9e69d9caed25 100644
--- a/samples/livepatch/livepatch-callbacks-demo.c
+++ b/samples/livepatch/livepatch-callbacks-demo.c
@@ -192,5 +192,6 @@ static void livepatch_callbacks_demo_exit(void)
module_init(livepatch_callbacks_demo_init);
module_exit(livepatch_callbacks_demo_exit);
+MODULE_DESCRIPTION("Live patching demo for (un)patching callbacks");
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-callbacks-mod.c b/samples/livepatch/livepatch-callbacks-mod.c
index 2a074f422a51..d1851b471ad9 100644
--- a/samples/livepatch/livepatch-callbacks-mod.c
+++ b/samples/livepatch/livepatch-callbacks-mod.c
@@ -38,4 +38,5 @@ static void livepatch_callbacks_mod_exit(void)
module_init(livepatch_callbacks_mod_init);
module_exit(livepatch_callbacks_mod_exit);
+MODULE_DESCRIPTION("Live patching demo for (un)patching callbacks, support module");
MODULE_LICENSE("GPL");
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
index cd76d7ebe598..5263a2f31c48 100644
--- a/samples/livepatch/livepatch-sample.c
+++ b/samples/livepatch/livepatch-sample.c
@@ -66,5 +66,6 @@ static void livepatch_exit(void)
module_init(livepatch_init);
module_exit(livepatch_exit);
+MODULE_DESCRIPTION("Kernel Live Patching Sample Module");
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
index f3f153895d6c..cbf68ca40097 100644
--- a/samples/livepatch/livepatch-shadow-fix1.c
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -168,5 +168,6 @@ static void livepatch_shadow_fix1_exit(void)
module_init(livepatch_shadow_fix1_init);
module_exit(livepatch_shadow_fix1_exit);
+MODULE_DESCRIPTION("Live patching demo for shadow variables");
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c
index 361046a4f10c..b99122cb221f 100644
--- a/samples/livepatch/livepatch-shadow-fix2.c
+++ b/samples/livepatch/livepatch-shadow-fix2.c
@@ -128,5 +128,6 @@ static void livepatch_shadow_fix2_exit(void)
module_init(livepatch_shadow_fix2_init);
module_exit(livepatch_shadow_fix2_exit);
+MODULE_DESCRIPTION("Live patching demo for shadow variables");
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
diff --git a/sound/isa/azt2320.c b/sound/isa/azt2320.c
index b937c9138d12..588b9f0831d3 100644
--- a/sound/isa/azt2320.c
+++ b/sound/isa/azt2320.c
@@ -189,8 +189,8 @@ static int snd_card_azt2320_probe(int dev,
if (error < 0)
return error;
- strcpy(card->driver, "AZT2320");
- strcpy(card->shortname, "Aztech AZT2320");
+ strscpy(card->driver, "AZT2320");
+ strscpy(card->shortname, "Aztech AZT2320");
sprintf(card->longname, "%s, WSS at 0x%lx, irq %i, dma %i&%i",
card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]);
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index fb955a205d50..9c427270ff4f 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -96,9 +96,7 @@ config SND_HDA_CIRRUS_SCODEC
config SND_HDA_CIRRUS_SCODEC_KUNIT_TEST
tristate "KUnit test for Cirrus side-codec library" if !KUNIT_ALL_TESTS
- select SND_HDA_CIRRUS_SCODEC
- select GPIOLIB
- depends on KUNIT
+ depends on SND_HDA_CIRRUS_SCODEC && GPIOLIB && KUNIT
default KUNIT_ALL_TESTS
help
This builds KUnit tests for the cirrus side-codec library.
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 79004bc8107b..877137cb09ac 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7969,6 +7969,7 @@ enum {
ALC233_FIXUP_MEDION_MTL_SPK,
ALC294_FIXUP_BASS_SPEAKER_15,
ALC283_FIXUP_DELL_HP_RESUME,
+ ALC294_FIXUP_ASUS_CS35L41_SPI_2,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -10333,6 +10334,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc283_fixup_dell_hp_resume,
},
+ [ALC294_FIXUP_ASUS_CS35L41_SPI_2] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs35l41_fixup_spi_two,
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC,
+ },
};
static const struct hda_quirk alc269_fixup_tbl[] = {
@@ -10835,7 +10842,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
@@ -10925,14 +10932,14 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1fb3, "ASUS ROG Flow Z13 GZ302EA", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x3011, "ASUS B5605CVA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
- SND_PCI_QUIRK(0x1043, 0x3061, "ASUS B3405CCA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x3071, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x30c1, "ASUS B3605CCA / P3605CCA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x30d1, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x30e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3061, "ASUS B3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3071, "ASUS B5405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x30c1, "ASUS B3605CCA / P3605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x30d1, "ASUS B5405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x30e1, "ASUS B5605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x31d0, "ASUS Zen AIO 27 Z272SD_A272SD", ALC274_FIXUP_ASUS_ZEN_AIO_27),
- SND_PCI_QUIRK(0x1043, 0x31e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x31f1, "ASUS B3605CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x31e1, "ASUS B5605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x31f1, "ASUS B3605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
diff --git a/sound/pci/hda/tas2781_hda_spi.c b/sound/pci/hda/tas2781_hda_spi.c
index 399f2e4c3b62..25175ff4b3aa 100644
--- a/sound/pci/hda/tas2781_hda_spi.c
+++ b/sound/pci/hda/tas2781_hda_spi.c
@@ -1003,8 +1003,7 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
*/
out:
- if (fmw)
- release_firmware(fmw);
+ release_firmware(fmw);
pm_runtime_mark_last_busy(tas_hda->priv->dev);
pm_runtime_put_autosuspend(tas_hda->priv->dev);
}
diff --git a/sound/soc/codecs/aw88081.c b/sound/soc/codecs/aw88081.c
index ad16ab6812cd..3dd8428f08cc 100644
--- a/sound/soc/codecs/aw88081.c
+++ b/sound/soc/codecs/aw88081.c
@@ -1295,9 +1295,19 @@ static int aw88081_i2c_probe(struct i2c_client *i2c)
aw88081_dai, ARRAY_SIZE(aw88081_dai));
}
+#if defined(CONFIG_OF)
+static const struct of_device_id aw88081_of_match[] = {
+ { .compatible = "awinic,aw88081" },
+ { .compatible = "awinic,aw88083" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aw88081_of_match);
+#endif
+
static struct i2c_driver aw88081_i2c_driver = {
.driver = {
.name = AW88081_I2C_NAME,
+ .of_match_table = of_match_ptr(aw88081_of_match),
},
.probe = aw88081_i2c_probe,
.id_table = aw88081_i2c_id,
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index ac19a572fe70..20e6ab6f0d4a 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -702,6 +702,9 @@ static void cs42l43_clear_jack(struct cs42l43_codec *priv)
CS42L43_PGA_WIDESWING_MODE_EN_MASK, 0);
regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CTRL,
CS42L43_JACK_STEREO_CONFIG_MASK, 0);
+ regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL,
+ CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK,
+ CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK);
regmap_update_bits(cs42l43->regmap, CS42L43_HS2,
CS42L43_HSDET_MODE_MASK | CS42L43_HSDET_MANUAL_MODE_MASK,
0x2 << CS42L43_HSDET_MODE_SHIFT);
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 17019b1d680b..bc01ff65bd6f 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -842,12 +842,28 @@ static void print_eld_info(struct snd_info_entry *entry,
static int hdmi_dai_proc_new(struct hdmi_codec_priv *hcp,
struct snd_soc_dai *dai)
{
+ struct snd_soc_component *component = dai->component;
+ struct snd_soc_card *card = component->card;
+ struct snd_soc_dai *d;
+ struct snd_soc_pcm_runtime *rtd;
struct snd_info_entry *entry;
char name[32];
- int err;
+ int err, i, id = 0;
- snprintf(name, sizeof(name), "eld#%d", dai->id);
- err = snd_card_proc_new(dai->component->card->snd_card, name, &entry);
+ /*
+ * To avoid duplicate proc entry, find its rtd and use rtd->id
+ * instead of dai->id
+ */
+ for_each_card_rtds(card, rtd) {
+ for_each_rtd_dais(rtd, i, d)
+ if (d == dai) {
+ id = rtd->id;
+ goto found;
+ }
+ }
+found:
+ snprintf(name, sizeof(name), "eld#%d", id);
+ err = snd_card_proc_new(card->snd_card, name, &entry);
if (err < 0)
return err;
diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
index b261fa373e65..c1fb71cfb5d0 100644
--- a/sound/soc/codecs/lpass-wsa-macro.c
+++ b/sound/soc/codecs/lpass-wsa-macro.c
@@ -63,6 +63,10 @@
#define CDC_WSA_TX_SPKR_PROT_CLK_DISABLE 0
#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK GENMASK(3, 0)
#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K 0
+#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_16K 1
+#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_24K 2
+#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_32K 3
+#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_48K 4
#define CDC_WSA_TX0_SPKR_PROT_PATH_CFG0 (0x0248)
#define CDC_WSA_TX1_SPKR_PROT_PATH_CTL (0x0264)
#define CDC_WSA_TX1_SPKR_PROT_PATH_CFG0 (0x0268)
@@ -407,6 +411,7 @@ struct wsa_macro {
int ear_spkr_gain;
int spkr_gain_offset;
int spkr_mode;
+ u32 pcm_rate_vi;
int is_softclip_on[WSA_MACRO_SOFTCLIP_MAX];
int softclip_clk_users[WSA_MACRO_SOFTCLIP_MAX];
struct regmap *regmap;
@@ -1280,6 +1285,7 @@ static int wsa_macro_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
+ struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
int ret;
switch (substream->stream) {
@@ -1292,6 +1298,11 @@ static int wsa_macro_hw_params(struct snd_pcm_substream *substream,
return ret;
}
break;
+ case SNDRV_PCM_STREAM_CAPTURE:
+ if (dai->id == WSA_MACRO_AIF_VI)
+ wsa->pcm_rate_vi = params_rate(params);
+
+ break;
default:
break;
}
@@ -1448,35 +1459,11 @@ static void wsa_macro_mclk_enable(struct wsa_macro *wsa, bool mclk_enable)
}
}
-static int wsa_macro_mclk_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+static void wsa_macro_enable_disable_vi_sense(struct snd_soc_component *component, bool enable,
+ u32 tx_reg0, u32 tx_reg1, u32 val)
{
- struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
- struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
-
- wsa_macro_mclk_enable(wsa, event == SND_SOC_DAPM_PRE_PMU);
- return 0;
-}
-
-static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event)
-{
- struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
- struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
- u32 tx_reg0, tx_reg1;
-
- if (test_bit(WSA_MACRO_TX0, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) {
- tx_reg0 = CDC_WSA_TX0_SPKR_PROT_PATH_CTL;
- tx_reg1 = CDC_WSA_TX1_SPKR_PROT_PATH_CTL;
- } else if (test_bit(WSA_MACRO_TX1, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) {
- tx_reg0 = CDC_WSA_TX2_SPKR_PROT_PATH_CTL;
- tx_reg1 = CDC_WSA_TX3_SPKR_PROT_PATH_CTL;
- }
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- /* Enable V&I sensing */
+ if (enable) {
+ /* Enable V&I sensing */
snd_soc_component_update_bits(component, tx_reg0,
CDC_WSA_TX_SPKR_PROT_RESET_MASK,
CDC_WSA_TX_SPKR_PROT_RESET);
@@ -1485,10 +1472,10 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
CDC_WSA_TX_SPKR_PROT_RESET);
snd_soc_component_update_bits(component, tx_reg0,
CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK,
- CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K);
+ val);
snd_soc_component_update_bits(component, tx_reg1,
CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK,
- CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K);
+ val);
snd_soc_component_update_bits(component, tx_reg0,
CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK,
CDC_WSA_TX_SPKR_PROT_CLK_ENABLE);
@@ -1501,9 +1488,7 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
snd_soc_component_update_bits(component, tx_reg1,
CDC_WSA_TX_SPKR_PROT_RESET_MASK,
CDC_WSA_TX_SPKR_PROT_NO_RESET);
- break;
- case SND_SOC_DAPM_POST_PMD:
- /* Disable V&I sensing */
+ } else {
snd_soc_component_update_bits(component, tx_reg0,
CDC_WSA_TX_SPKR_PROT_RESET_MASK,
CDC_WSA_TX_SPKR_PROT_RESET);
@@ -1516,6 +1501,72 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
snd_soc_component_update_bits(component, tx_reg1,
CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK,
CDC_WSA_TX_SPKR_PROT_CLK_DISABLE);
+ }
+}
+
+static void wsa_macro_enable_disable_vi_feedback(struct snd_soc_component *component,
+ bool enable, u32 rate)
+{
+ struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
+
+ if (test_bit(WSA_MACRO_TX0, &wsa->active_ch_mask[WSA_MACRO_AIF_VI]))
+ wsa_macro_enable_disable_vi_sense(component, enable,
+ CDC_WSA_TX0_SPKR_PROT_PATH_CTL,
+ CDC_WSA_TX1_SPKR_PROT_PATH_CTL, rate);
+
+ if (test_bit(WSA_MACRO_TX1, &wsa->active_ch_mask[WSA_MACRO_AIF_VI]))
+ wsa_macro_enable_disable_vi_sense(component, enable,
+ CDC_WSA_TX2_SPKR_PROT_PATH_CTL,
+ CDC_WSA_TX3_SPKR_PROT_PATH_CTL, rate);
+}
+
+static int wsa_macro_mclk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
+
+ wsa_macro_mclk_enable(wsa, event == SND_SOC_DAPM_PRE_PMU);
+ return 0;
+}
+
+static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
+ u32 rate_val;
+
+ switch (wsa->pcm_rate_vi) {
+ case 8000:
+ rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K;
+ break;
+ case 16000:
+ rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_16K;
+ break;
+ case 24000:
+ rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_24K;
+ break;
+ case 32000:
+ rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_32K;
+ break;
+ case 48000:
+ rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_48K;
+ break;
+ default:
+ rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K;
+ break;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* Enable V&I sensing */
+ wsa_macro_enable_disable_vi_feedback(component, true, rate_val);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* Disable V&I sensing */
+ wsa_macro_enable_disable_vi_feedback(component, false, rate_val);
break;
}
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 4c4171bb3fbb..28001e9857d9 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -199,12 +199,10 @@ static void i2s_start(struct dw_i2s_dev *dev,
else
i2s_write_reg(dev->i2s_base, IRER, 1);
- /* I2S needs to enable IRQ to make a handshake with DMAC on the JH7110 SoC */
- if (dev->use_pio || dev->is_jh7110)
- i2s_enable_irqs(dev, substream->stream, config->chan_nr);
- else
+ if (!(dev->use_pio || dev->is_jh7110))
i2s_enable_dma(dev, substream->stream);
+ i2s_enable_irqs(dev, substream->stream, config->chan_nr);
i2s_write_reg(dev->i2s_base, CER, 1);
}
@@ -218,11 +216,12 @@ static void i2s_stop(struct dw_i2s_dev *dev,
else
i2s_write_reg(dev->i2s_base, IRER, 0);
- if (dev->use_pio || dev->is_jh7110)
- i2s_disable_irqs(dev, substream->stream, 8);
- else
+ if (!(dev->use_pio || dev->is_jh7110))
i2s_disable_dma(dev, substream->stream);
+ i2s_disable_irqs(dev, substream->stream, 8);
+
+
if (!dev->active) {
i2s_write_reg(dev->i2s_base, CER, 0);
i2s_write_reg(dev->i2s_base, IER, 0);
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index f501f47242fb..1bba48318e2d 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -156,11 +156,24 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
for_each_dpcm_be(rtd, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *substream_be;
- struct snd_soc_dai *dai = snd_soc_rtd_to_cpu(be, 0);
+ struct snd_soc_dai *dai_cpu = snd_soc_rtd_to_cpu(be, 0);
+ struct snd_soc_dai *dai_codec = snd_soc_rtd_to_codec(be, 0);
+ struct snd_soc_dai *dai;
if (dpcm->fe != rtd)
continue;
+ /*
+ * With audio graph card, original cpu dai is changed to codec
+ * device in backend, so if cpu dai is dummy device in backend,
+ * get the codec dai device, which is the real hardware device
+ * connected.
+ */
+ if (!snd_soc_dai_is_dummy(dai_cpu))
+ dai = dai_cpu;
+ else
+ dai = dai_codec;
+
substream_be = snd_soc_dpcm_get_substream(be, stream);
dma_params_be = snd_soc_dai_get_dma_data(dai, substream_be);
dev_be = dai->dev;
diff --git a/sound/soc/fsl/fsl_qmc_audio.c b/sound/soc/fsl/fsl_qmc_audio.c
index b2979290c973..5614a8b909ed 100644
--- a/sound/soc/fsl/fsl_qmc_audio.c
+++ b/sound/soc/fsl/fsl_qmc_audio.c
@@ -250,6 +250,9 @@ static int qmc_audio_pcm_trigger(struct snd_soc_component *component,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
bitmap_zero(prtd->chans_pending, 64);
+ prtd->buffer_ended = 0;
+ prtd->ch_dma_addr_current = prtd->ch_dma_addr_start;
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
for (i = 0; i < prtd->channels; i++)
prtd->qmc_dai->chans[i].prtd_tx = prtd;
diff --git a/sound/soc/intel/avs/path.c b/sound/soc/intel/avs/path.c
index ef0c1d125d66..cafb8c6198be 100644
--- a/sound/soc/intel/avs/path.c
+++ b/sound/soc/intel/avs/path.c
@@ -115,6 +115,78 @@ avs_path_find_variant(struct avs_dev *adev,
return NULL;
}
+static struct acpi_nhlt_config *
+avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t);
+
+int avs_path_set_constraint(struct avs_dev *adev, struct avs_tplg_path_template *template,
+ struct snd_pcm_hw_constraint_list *rate_list,
+ struct snd_pcm_hw_constraint_list *channels_list,
+ struct snd_pcm_hw_constraint_list *sample_bits_list)
+{
+ struct avs_tplg_path *path_template;
+ unsigned int *rlist, *clist, *slist;
+ size_t i;
+
+ i = 0;
+ list_for_each_entry(path_template, &template->path_list, node)
+ i++;
+
+ rlist = kcalloc(i, sizeof(rlist), GFP_KERNEL);
+ clist = kcalloc(i, sizeof(clist), GFP_KERNEL);
+ slist = kcalloc(i, sizeof(slist), GFP_KERNEL);
+
+ i = 0;
+ list_for_each_entry(path_template, &template->path_list, node) {
+ struct avs_tplg_pipeline *pipeline_template;
+
+ list_for_each_entry(pipeline_template, &path_template->ppl_list, node) {
+ struct avs_tplg_module *module_template;
+
+ list_for_each_entry(module_template, &pipeline_template->mod_list, node) {
+ const guid_t *type = &module_template->cfg_ext->type;
+ struct acpi_nhlt_config *blob;
+
+ if (!guid_equal(type, &AVS_COPIER_MOD_UUID) &&
+ !guid_equal(type, &AVS_WOVHOSTM_MOD_UUID))
+ continue;
+
+ switch (module_template->cfg_ext->copier.dma_type) {
+ case AVS_DMA_DMIC_LINK_INPUT:
+ case AVS_DMA_I2S_LINK_OUTPUT:
+ case AVS_DMA_I2S_LINK_INPUT:
+ break;
+ default:
+ continue;
+ }
+
+ blob = avs_nhlt_config_or_default(adev, module_template);
+ if (IS_ERR(blob))
+ continue;
+
+ rlist[i] = path_template->fe_fmt->sampling_freq;
+ clist[i] = path_template->fe_fmt->num_channels;
+ slist[i] = path_template->fe_fmt->bit_depth;
+ i++;
+ }
+ }
+ }
+
+ if (i) {
+ rate_list->count = i;
+ rate_list->list = rlist;
+ channels_list->count = i;
+ channels_list->list = clist;
+ sample_bits_list->count = i;
+ sample_bits_list->list = slist;
+ } else {
+ kfree(rlist);
+ kfree(clist);
+ kfree(slist);
+ }
+
+ return i;
+}
+
static void avs_init_node_id(union avs_connector_node_id *node_id,
struct avs_tplg_modcfg_ext *te, u32 dma_id)
{
diff --git a/sound/soc/intel/avs/path.h b/sound/soc/intel/avs/path.h
index 7ed7e94e0a56..c65ed84aa853 100644
--- a/sound/soc/intel/avs/path.h
+++ b/sound/soc/intel/avs/path.h
@@ -69,6 +69,11 @@ int avs_path_reset(struct avs_path *path);
int avs_path_pause(struct avs_path *path);
int avs_path_run(struct avs_path *path, int trigger);
+int avs_path_set_constraint(struct avs_dev *adev, struct avs_tplg_path_template *template,
+ struct snd_pcm_hw_constraint_list *rate_list,
+ struct snd_pcm_hw_constraint_list *channels_list,
+ struct snd_pcm_hw_constraint_list *sample_bits_list);
+
int avs_peakvol_set_volume(struct avs_dev *adev, struct avs_path_module *mod,
struct soc_mixer_control *mc, long *input);
int avs_peakvol_set_mute(struct avs_dev *adev, struct avs_path_module *mod,
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index dac463390da1..d83ef504643b 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -31,6 +31,10 @@ struct avs_dma_data {
struct hdac_ext_stream *host_stream;
};
+ struct snd_pcm_hw_constraint_list rate_list;
+ struct snd_pcm_hw_constraint_list channels_list;
+ struct snd_pcm_hw_constraint_list sample_bits_list;
+
struct work_struct period_elapsed_work;
struct snd_pcm_substream *substream;
};
@@ -74,6 +78,45 @@ void avs_period_elapsed(struct snd_pcm_substream *substream)
schedule_work(&data->period_elapsed_work);
}
+static int hw_rule_param_size(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule);
+static int avs_hw_constraints_init(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_hw_constraint_list *r, *c, *s;
+ struct avs_tplg_path_template *template;
+ struct avs_dma_data *data;
+ int ret;
+
+ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ data = snd_soc_dai_get_dma_data(dai, substream);
+ r = &(data->rate_list);
+ c = &(data->channels_list);
+ s = &(data->sample_bits_list);
+
+ template = avs_dai_find_path_template(dai, !rtd->dai_link->no_pcm, substream->stream);
+ ret = avs_path_set_constraint(data->adev, template, r, c, s);
+ if (ret <= 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, r);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, c);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, s);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int avs_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
@@ -101,7 +144,7 @@ static int avs_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_d
if (rtd->dai_link->ignore_suspend)
adev->num_lp_paths++;
- return 0;
+ return avs_hw_constraints_init(substream, dai);
}
static void avs_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
@@ -114,6 +157,10 @@ static void avs_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc
if (rtd->dai_link->ignore_suspend)
data->adev->num_lp_paths--;
+ kfree(data->rate_list.list);
+ kfree(data->channels_list.list);
+ kfree(data->sample_bits_list.list);
+
snd_soc_dai_set_dma_data(dai, substream, NULL);
kfree(data);
}
@@ -927,7 +974,8 @@ static int avs_component_probe(struct snd_soc_component *component)
else
mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL,
"hda-generic-tplg.bin");
-
+ if (!mach->tplg_filename)
+ return -ENOMEM;
filename = kasprintf(GFP_KERNEL, "%s/%s", component->driver->topology_name_prefix,
mach->tplg_filename);
if (!filename)
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 90dafa810b2e..095d08b3fc82 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -764,6 +764,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
static const struct snd_pci_quirk sof_sdw_ssid_quirk_table[] = {
SND_PCI_QUIRK(0x1043, 0x1e13, "ASUS Zenbook S14", SOC_SDW_CODEC_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1f43, "ASUS Zenbook S16", SOC_SDW_CODEC_MIC),
{}
};
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 27a2bf9a6613..de3ec6f594c1 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -13,10 +13,11 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/sound/qcom,lpass.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
#include "lpass-hdmi.h"
#define LPASS_AHBIX_CLOCK_FREQUENCY 131072000
-#define LPASS_MAX_PORTS (LPASS_CDC_DMA_VA_TX8 + 1)
+#define LPASS_MAX_PORTS (DISPLAY_PORT_RX_7 + 1)
#define LPASS_MAX_MI2S_PORTS (8)
#define LPASS_MAX_DMA_CHANNELS (8)
#define LPASS_MAX_HDMI_DMA_CHANNELS (4)
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 6d44f8c8a18f..af9d9acaf997 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -43,9 +43,6 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
struct kvm_regs {
struct user_pt_regs regs; /* sp = sp_el0 */
@@ -108,6 +105,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */
+#define KVM_ARM_VCPU_HAS_EL2_E2H0 8 /* Limit NV support to E2H RES0 */
struct kvm_vcpu_init {
__u32 target;
@@ -418,6 +416,7 @@ enum {
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
+#define KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ 9
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index 9306726337fe..df36f23876e8 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -1,24 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_TIME32_SYSCALLS
-#define __ARCH_WANT_MEMFD_SECRET
-
-#include <asm-generic/unistd.h>
+#include <asm/unistd_64.h>
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 9e3fa7942e7d..6c2c152d8a67 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -75,8 +75,8 @@
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
#define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
-#define X86_FEATURE_P3 ( 3*32+ 6) /* P3 */
-#define X86_FEATURE_P4 ( 3*32+ 7) /* P4 */
+/* Free ( 3*32+ 6) */
+/* Free ( 3*32+ 7) */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */
#define X86_FEATURE_ART ( 3*32+10) /* "art" Always running timer (ART) */
@@ -329,6 +329,7 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* "irperf" Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* "xsaveerptr" Always save/restore FP error pointers */
+#define X86_FEATURE_INVLPGB (13*32+ 3) /* INVLPGB and TLBSYNC instructions supported */
#define X86_FEATURE_RDPRU (13*32+ 4) /* "rdpru" Read processor register at user level */
#define X86_FEATURE_WBNOINVD (13*32+ 9) /* "wbnoinvd" WBNOINVD instruction */
#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
@@ -377,6 +378,7 @@
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */
#define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */
+#define X86_FEATURE_IDLE_HLT (15*32+30) /* IDLE HLT intercept */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* "avx512vbmi" AVX512 Vector Bit Manipulation instructions*/
@@ -434,15 +436,18 @@
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* Speculative Store Bypass Disable */
/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
-#define X86_FEATURE_SME (19*32+ 0) /* "sme" AMD Secure Memory Encryption */
-#define X86_FEATURE_SEV (19*32+ 1) /* "sev" AMD Secure Encrypted Virtualization */
+#define X86_FEATURE_SME (19*32+ 0) /* "sme" Secure Memory Encryption */
+#define X86_FEATURE_SEV (19*32+ 1) /* "sev" Secure Encrypted Virtualization */
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */
-#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" AMD Secure Encrypted Virtualization - Encrypted State */
-#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" AMD Secure Encrypted Virtualization - Secure Nested Paging */
+#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" Secure Encrypted Virtualization - Secure Nested Paging */
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */
-#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */
-#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
+#define X86_FEATURE_SME_COHERENT (19*32+10) /* hardware-enforced cache coherency */
+#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" SEV-ES full debug state swap support */
+#define X86_FEATURE_RMPREAD (19*32+21) /* RMPREAD instruction */
+#define X86_FEATURE_SEGMENTED_RMP (19*32+23) /* Segmented RMP support */
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
+#define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
@@ -455,6 +460,11 @@
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
+#define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
+#define X86_FEATURE_SRSO_BP_SPEC_REDUCE (20*32+31) /*
+ * BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs.
+ * (SRSO_MSR_FIX in the official doc).
+ */
/*
* Extended auxiliary flags: Linux defined - for features scattered in various
@@ -470,6 +480,7 @@
#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32 + 7) /* Workload Classification */
+#define X86_FEATURE_PREFER_YMM (21*32 + 8) /* Avoid ZMM registers due to downclocking */
/*
* BUG word(s)
@@ -521,4 +532,5 @@
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+#define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index dc1c1057f26e..e6134ef2263d 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -397,7 +397,8 @@
#define MSR_IA32_PASID_VALID BIT_ULL(31)
/* DEBUGCTLMSR bits (others vary by model): */
-#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
+#define DEBUGCTLMSR_LBR_BIT 0 /* last branch recording */
+#define DEBUGCTLMSR_LBR (1UL << DEBUGCTLMSR_LBR_BIT)
#define DEBUGCTLMSR_BTF_SHIFT 1
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
#define DEBUGCTLMSR_BUS_LOCK_DETECT (1UL << 2)
@@ -610,6 +611,7 @@
#define MSR_AMD_PERF_CTL 0xc0010062
#define MSR_AMD_PERF_STATUS 0xc0010063
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
+#define MSR_AMD64_GUEST_TSC_FREQ 0xc0010134
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD_PPIN_CTL 0xc00102f0
@@ -646,6 +648,7 @@
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
+#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0
@@ -684,11 +687,12 @@
#define MSR_AMD64_SNP_SMT_PROT BIT_ULL(MSR_AMD64_SNP_SMT_PROT_BIT)
#define MSR_AMD64_SNP_RESV_BIT 18
#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, MSR_AMD64_SNP_RESV_BIT)
-
-#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
-
#define MSR_AMD64_RMP_BASE 0xc0010132
#define MSR_AMD64_RMP_END 0xc0010133
+#define MSR_AMD64_RMP_CFG 0xc0010136
+#define MSR_AMD64_SEG_RMP_ENABLED_BIT 0
+#define MSR_AMD64_SEG_RMP_ENABLED BIT_ULL(MSR_AMD64_SEG_RMP_ENABLED_BIT)
+#define MSR_AMD64_RMP_SEGMENT_SHIFT(x) (((x) & GENMASK_ULL(13, 8)) >> 8)
#define MSR_SVSM_CAA 0xc001f000
@@ -699,15 +703,17 @@
#define MSR_AMD_CPPC_REQ 0xc00102b3
#define MSR_AMD_CPPC_STATUS 0xc00102b4
-#define AMD_CPPC_LOWEST_PERF(x) (((x) >> 0) & 0xff)
-#define AMD_CPPC_LOWNONLIN_PERF(x) (((x) >> 8) & 0xff)
-#define AMD_CPPC_NOMINAL_PERF(x) (((x) >> 16) & 0xff)
-#define AMD_CPPC_HIGHEST_PERF(x) (((x) >> 24) & 0xff)
+/* Masks for use with MSR_AMD_CPPC_CAP1 */
+#define AMD_CPPC_LOWEST_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_LOWNONLIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_NOMINAL_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_HIGHEST_PERF_MASK GENMASK(31, 24)
-#define AMD_CPPC_MAX_PERF(x) (((x) & 0xff) << 0)
-#define AMD_CPPC_MIN_PERF(x) (((x) & 0xff) << 8)
-#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16)
-#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24)
+/* Masks for use with MSR_AMD_CPPC_REQ */
+#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
/* AMD Performance Counter Global Status and Control MSRs */
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
@@ -719,6 +725,7 @@
/* Zen4 */
#define MSR_ZEN4_BP_CFG 0xc001102e
+#define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4
#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
/* Fam 19h MSRs */
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 88585c1de416..460306b35a4b 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -559,6 +559,9 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)
+#define KVM_XEN_MSR_MIN_INDEX 0x40000000u
+#define KVM_XEN_MSR_MAX_INDEX 0x4fffffffu
+
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
@@ -925,5 +928,6 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_SEV_VM 2
#define KVM_X86_SEV_ES_VM 3
#define KVM_X86_SNP_VM 4
+#define KVM_X86_TDX_VM 5
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h
index 1814b413fd57..ec1321248dac 100644
--- a/tools/arch/x86/include/uapi/asm/svm.h
+++ b/tools/arch/x86/include/uapi/asm/svm.h
@@ -95,6 +95,7 @@
#define SVM_EXIT_CR14_WRITE_TRAP 0x09e
#define SVM_EXIT_CR15_WRITE_TRAP 0x09f
#define SVM_EXIT_INVPCID 0x0a2
+#define SVM_EXIT_IDLE_HLT 0x0a6
#define SVM_EXIT_NPF 0x400
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
@@ -224,6 +225,7 @@
{ SVM_EXIT_CR4_WRITE_TRAP, "write_cr4_trap" }, \
{ SVM_EXIT_CR8_WRITE_TRAP, "write_cr8_trap" }, \
{ SVM_EXIT_INVPCID, "invpcid" }, \
+ { SVM_EXIT_IDLE_HLT, "idle-halt" }, \
{ SVM_EXIT_NPF, "npf" }, \
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \
diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S
index 0199d56cb479..d66b710d628f 100644
--- a/tools/arch/x86/lib/memset_64.S
+++ b/tools/arch/x86/lib/memset_64.S
@@ -3,6 +3,7 @@
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
@@ -28,7 +29,7 @@
* only for the return value that is the same as the source input,
* which the compiler could/should do much better anyway.
*/
-SYM_FUNC_START(__memset)
+SYM_TYPED_FUNC_START(__memset)
ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
movq %rdi,%r9
diff --git a/tools/include/linux/cfi_types.h b/tools/include/linux/cfi_types.h
new file mode 100644
index 000000000000..6b8713675765
--- /dev/null
+++ b/tools/include/linux/cfi_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) type definitions.
+ */
+#ifndef _LINUX_CFI_TYPES_H
+#define _LINUX_CFI_TYPES_H
+
+#ifdef __ASSEMBLY__
+#include <linux/linkage.h>
+
+#ifdef CONFIG_CFI_CLANG
+/*
+ * Use the __kcfi_typeid_<function> type identifier symbol to
+ * annotate indirectly called assembly functions. The compiler emits
+ * these symbols for all address-taken function declarations in C
+ * code.
+ */
+#ifndef __CFI_TYPE
+#define __CFI_TYPE(name) \
+ .4byte __kcfi_typeid_##name
+#endif
+
+#define SYM_TYPED_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ __CFI_TYPE(name) ASM_NL \
+ name:
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_TYPED_ENTRY(name, linkage, align)
+
+#else /* CONFIG_CFI_CLANG */
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_START(name, linkage, align)
+
+#endif /* CONFIG_CFI_CLANG */
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_CFI_TYPES_H */
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 1ea2c4c33b86..ef1c27fa3c57 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -85,6 +85,7 @@
/* compatibility flags */
#define MAP_FILE 0
+#define PKEY_UNRESTRICTED 0x0
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 88dc393c2bca..2892a45023af 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -849,9 +849,11 @@ __SYSCALL(__NR_getxattrat, sys_getxattrat)
__SYSCALL(__NR_listxattrat, sys_listxattrat)
#define __NR_removexattrat 466
__SYSCALL(__NR_removexattrat, sys_removexattrat)
+#define __NR_open_tree_attr 467
+__SYSCALL(__NR_open_tree_attr, sys_open_tree_attr)
#undef __NR_syscalls
-#define __NR_syscalls 467
+#define __NR_syscalls 468
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 5d32d53508d9..ced0fc3c3aa5 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -79,6 +79,8 @@ enum {
#define IPPROTO_MPLS IPPROTO_MPLS
IPPROTO_ETHERNET = 143, /* Ethernet-within-IPv6 Encapsulation */
#define IPPROTO_ETHERNET IPPROTO_ETHERNET
+ IPPROTO_AGGFRAG = 144, /* AGGFRAG in ESP (RFC 9347) */
+#define IPPROTO_AGGFRAG IPPROTO_AGGFRAG
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
IPPROTO_SMC = 256, /* Shared Memory Communications */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 502ea63b5d2e..b6ae8ad8934b 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -617,10 +617,6 @@ struct kvm_ioeventfd {
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
-#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
- KVM_X86_DISABLE_EXITS_HLT | \
- KVM_X86_DISABLE_EXITS_PAUSE | \
- KVM_X86_DISABLE_EXITS_CSTATE)
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
@@ -933,6 +929,7 @@ struct kvm_enable_cap {
#define KVM_CAP_PRE_FAULT_MEMORY 236
#define KVM_CAP_X86_APIC_BUS_CYCLES_NS 237
#define KVM_CAP_X86_GUEST_MODE 238
+#define KVM_CAP_ARM_WRITABLE_IMP_ID_REGS 239
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1070,6 +1067,10 @@ struct kvm_dirty_tlb {
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
+
+#define KVM_REG_SIZE(id) \
+ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
#define KVM_REG_SIZE_U8 0x0000000000000000ULL
#define KVM_REG_SIZE_U16 0x0010000000000000ULL
#define KVM_REG_SIZE_U32 0x0020000000000000ULL
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 0524d541d4e3..5fc753c23734 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -385,6 +385,8 @@ enum perf_event_read_format {
*
* @sample_max_stack: Max number of frame pointers in a callchain,
* should be < /proc/sys/kernel/perf_event_max_stack
+ * Max number of entries of branch stack
+ * should be < hardware limit
*/
struct perf_event_attr {
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
index 887a25286441..f78ee3670dd5 100644
--- a/tools/include/uapi/linux/stat.h
+++ b/tools/include/uapi/linux/stat.h
@@ -98,43 +98,93 @@ struct statx_timestamp {
*/
struct statx {
/* 0x00 */
- __u32 stx_mask; /* What results were written [uncond] */
- __u32 stx_blksize; /* Preferred general I/O size [uncond] */
- __u64 stx_attributes; /* Flags conveying information about the file [uncond] */
+ /* What results were written [uncond] */
+ __u32 stx_mask;
+
+ /* Preferred general I/O size [uncond] */
+ __u32 stx_blksize;
+
+ /* Flags conveying information about the file [uncond] */
+ __u64 stx_attributes;
+
/* 0x10 */
- __u32 stx_nlink; /* Number of hard links */
- __u32 stx_uid; /* User ID of owner */
- __u32 stx_gid; /* Group ID of owner */
- __u16 stx_mode; /* File mode */
+ /* Number of hard links */
+ __u32 stx_nlink;
+
+ /* User ID of owner */
+ __u32 stx_uid;
+
+ /* Group ID of owner */
+ __u32 stx_gid;
+
+ /* File mode */
+ __u16 stx_mode;
__u16 __spare0[1];
+
/* 0x20 */
- __u64 stx_ino; /* Inode number */
- __u64 stx_size; /* File size */
- __u64 stx_blocks; /* Number of 512-byte blocks allocated */
- __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
+ /* Inode number */
+ __u64 stx_ino;
+
+ /* File size */
+ __u64 stx_size;
+
+ /* Number of 512-byte blocks allocated */
+ __u64 stx_blocks;
+
+ /* Mask to show what's supported in stx_attributes */
+ __u64 stx_attributes_mask;
+
/* 0x40 */
- struct statx_timestamp stx_atime; /* Last access time */
- struct statx_timestamp stx_btime; /* File creation time */
- struct statx_timestamp stx_ctime; /* Last attribute change time */
- struct statx_timestamp stx_mtime; /* Last data modification time */
+ /* Last access time */
+ struct statx_timestamp stx_atime;
+
+ /* File creation time */
+ struct statx_timestamp stx_btime;
+
+ /* Last attribute change time */
+ struct statx_timestamp stx_ctime;
+
+ /* Last data modification time */
+ struct statx_timestamp stx_mtime;
+
/* 0x80 */
- __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */
+ /* Device ID of special file [if bdev/cdev] */
+ __u32 stx_rdev_major;
__u32 stx_rdev_minor;
- __u32 stx_dev_major; /* ID of device containing file [uncond] */
+
+ /* ID of device containing file [uncond] */
+ __u32 stx_dev_major;
__u32 stx_dev_minor;
+
/* 0x90 */
__u64 stx_mnt_id;
- __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
- __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+
+ /* Memory buffer alignment for direct I/O */
+ __u32 stx_dio_mem_align;
+
+ /* File offset alignment for direct I/O */
+ __u32 stx_dio_offset_align;
+
/* 0xa0 */
- __u64 stx_subvol; /* Subvolume identifier */
- __u32 stx_atomic_write_unit_min; /* Min atomic write unit in bytes */
- __u32 stx_atomic_write_unit_max; /* Max atomic write unit in bytes */
+ /* Subvolume identifier */
+ __u64 stx_subvol;
+
+ /* Min atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_min;
+
+ /* Max atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_max;
+
/* 0xb0 */
- __u32 stx_atomic_write_segments_max; /* Max atomic write segment count */
- __u32 __spare1[1];
+ /* Max atomic write segment count */
+ __u32 stx_atomic_write_segments_max;
+
+ /* File offset alignment for direct I/O reads */
+ __u32 stx_dio_read_offset_align;
+
/* 0xb8 */
__u64 __spare3[9]; /* Spare space for future expansion */
+
/* 0x100 */
};
@@ -164,6 +214,7 @@ struct statx {
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX_WRITE_ATOMIC 0x00010000U /* Want/got atomic_write_* fields */
+#define STATX_DIO_READ_ALIGN 0x00020000U /* Want/got dio read alignment info */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py
index a9966ff2a143..0d930c17f963 100755
--- a/tools/net/ynl/pyynl/ynl_gen_c.py
+++ b/tools/net/ynl/pyynl/ynl_gen_c.py
@@ -162,9 +162,15 @@ class Type(SpecAttr):
def free_needs_iter(self):
return False
- def free(self, ri, var, ref):
+ def _free_lines(self, ri, var, ref):
if self.is_multi_val() or self.presence_type() == 'len':
- ri.cw.p(f'free({var}->{ref}{self.c_name});')
+ return [f'free({var}->{ref}{self.c_name});']
+ return []
+
+ def free(self, ri, var, ref):
+ lines = self._free_lines(ri, var, ref)
+ for line in lines:
+ ri.cw.p(line)
def arg_member(self, ri):
member = self._complex_member_type(ri)
@@ -263,6 +269,10 @@ class Type(SpecAttr):
var = "req"
member = f"{var}->{'.'.join(ref)}"
+ local_vars = []
+ if self.free_needs_iter():
+ local_vars += ['unsigned int i;']
+
code = []
presence = ''
for i in range(0, len(ref)):
@@ -272,6 +282,10 @@ class Type(SpecAttr):
if i == len(ref) - 1 and self.presence_type() != 'bit':
continue
code.append(presence + ' = 1;')
+ ref_path = '.'.join(ref[:-1])
+ if ref_path:
+ ref_path += '.'
+ code += self._free_lines(ri, var, ref_path)
code += self._setter_lines(ri, member, presence)
func_name = f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}"
@@ -279,7 +293,8 @@ class Type(SpecAttr):
alloc = bool([x for x in code if 'alloc(' in x])
if free and not alloc:
func_name = '__' + func_name
- ri.cw.write_func('static inline void', func_name, body=code,
+ ri.cw.write_func('static inline void', func_name, local_vars=local_vars,
+ body=code,
args=[f'{type_name(ri, direction, deref=deref)} *{var}'] + self.arg_member(ri))
@@ -482,8 +497,7 @@ class TypeString(Type):
['unsigned int len;']
def _setter_lines(self, ri, member, presence):
- return [f"free({member});",
- f"{presence}_len = strlen({self.c_name});",
+ return [f"{presence}_len = strlen({self.c_name});",
f"{member} = malloc({presence}_len + 1);",
f'memcpy({member}, {self.c_name}, {presence}_len);',
f'{member}[{presence}_len] = 0;']
@@ -536,8 +550,7 @@ class TypeBinary(Type):
['unsigned int len;']
def _setter_lines(self, ri, member, presence):
- return [f"free({member});",
- f"{presence}_len = len;",
+ return [f"{presence}_len = len;",
f"{member} = malloc({presence}_len);",
f'memcpy({member}, {self.c_name}, {presence}_len);']
@@ -574,12 +587,14 @@ class TypeNest(Type):
def _complex_member_type(self, ri):
return self.nested_struct_type
- def free(self, ri, var, ref):
+ def _free_lines(self, ri, var, ref):
+ lines = []
at = '&'
if self.is_recursive_for_op(ri):
at = ''
- ri.cw.p(f'if ({var}->{ref}{self.c_name})')
- ri.cw.p(f'{self.nested_render_name}_free({at}{var}->{ref}{self.c_name});')
+ lines += [f'if ({var}->{ref}{self.c_name})']
+ lines += [f'{self.nested_render_name}_free({at}{var}->{ref}{self.c_name});']
+ return lines
def _attr_typol(self):
return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
@@ -632,15 +647,19 @@ class TypeMultiAttr(Type):
def free_needs_iter(self):
return 'type' not in self.attr or self.attr['type'] == 'nest'
- def free(self, ri, var, ref):
+ def _free_lines(self, ri, var, ref):
+ lines = []
if self.attr['type'] in scalars:
- ri.cw.p(f"free({var}->{ref}{self.c_name});")
+ lines += [f"free({var}->{ref}{self.c_name});"]
elif 'type' not in self.attr or self.attr['type'] == 'nest':
- ri.cw.p(f"for (i = 0; i < {var}->{ref}n_{self.c_name}; i++)")
- ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name}[i]);')
- ri.cw.p(f"free({var}->{ref}{self.c_name});")
+ lines += [
+ f"for (i = 0; i < {var}->{ref}n_{self.c_name}; i++)",
+ f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name}[i]);',
+ f"free({var}->{ref}{self.c_name});",
+ ]
else:
raise Exception(f"Free of MultiAttr sub-type {self.attr['type']} not supported yet")
+ return lines
def _attr_policy(self, policy):
return self.base_type._attr_policy(policy)
@@ -654,10 +673,10 @@ class TypeMultiAttr(Type):
def attr_put(self, ri, var):
if self.attr['type'] in scalars:
put_type = self.type
- ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
+ ri.cw.p(f"for (i = 0; i < {var}->n_{self.c_name}; i++)")
ri.cw.p(f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name}[i]);")
elif 'type' not in self.attr or self.attr['type'] == 'nest':
- ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
+ ri.cw.p(f"for (i = 0; i < {var}->n_{self.c_name}; i++)")
self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
f"{self.enum_name}, &{var}->{self.c_name}[i])")
else:
@@ -666,8 +685,7 @@ class TypeMultiAttr(Type):
def _setter_lines(self, ri, member, presence):
# For multi-attr we have a count, not presence, hack up the presence
presence = presence[:-(len('_present.') + len(self.c_name))] + "n_" + self.c_name
- return [f"free({member});",
- f"{member} = {self.c_name};",
+ return [f"{member} = {self.c_name};",
f"{presence} = n_{self.c_name};"]
@@ -696,8 +714,11 @@ class TypeArrayNest(Type):
def _attr_get(self, ri, var):
local_vars = ['const struct nlattr *attr2;']
get_lines = [f'attr_{self.c_name} = attr;',
- 'ynl_attr_for_each_nested(attr2, attr)',
- f'\t{var}->n_{self.c_name}++;']
+ 'ynl_attr_for_each_nested(attr2, attr) {',
+ '\tif (ynl_attr_validate(yarg, attr2))',
+ '\t\treturn YNL_PARSE_CB_ERROR;',
+ f'\t{var}->n_{self.c_name}++;',
+ '}']
return get_lines, None, local_vars
@@ -755,6 +776,7 @@ class Struct:
self.request = False
self.reply = False
self.recursive = False
+ self.in_multi_val = False # used by a MultiAttr or and legacy arrays
self.attr_list = []
self.attrs = dict()
@@ -1122,6 +1144,10 @@ class Family(SpecFamily):
if attr in rs_members['reply']:
self.pure_nested_structs[nested].reply = True
+ if spec.is_multi_val():
+ child = self.pure_nested_structs.get(nested)
+ child.in_multi_val = True
+
self._sort_pure_types()
# Propagate the request / reply / recursive
@@ -1136,6 +1162,8 @@ class Family(SpecFamily):
struct.child_nests.update(child.child_nests)
child.request |= struct.request
child.reply |= struct.reply
+ if spec.is_multi_val():
+ child.in_multi_val = True
if attr_set in struct.child_nests:
struct.recursive = True
@@ -1404,9 +1432,9 @@ class CodeWriter:
def write_func(self, qual_ret, name, body, args=None, local_vars=None):
self.write_func_prot(qual_ret=qual_ret, name=name, args=args)
+ self.block_start()
self.write_func_lvar(local_vars=local_vars)
- self.block_start()
for line in body:
self.p(line)
self.block_end()
@@ -1651,11 +1679,23 @@ def put_req_nested_prototype(ri, struct, suffix=';'):
def put_req_nested(ri, struct):
+ local_vars = []
+ init_lines = []
+
+ local_vars.append('struct nlattr *nest;')
+ init_lines.append("nest = ynl_attr_nest_start(nlh, attr_type);")
+
+ for _, arg in struct.member_list():
+ if arg.presence_type() == 'count':
+ local_vars.append('unsigned int i;')
+ break
+
put_req_nested_prototype(ri, struct, suffix='')
ri.cw.block_start()
- ri.cw.write_func_lvar('struct nlattr *nest;')
+ ri.cw.write_func_lvar(local_vars)
- ri.cw.p("nest = ynl_attr_nest_start(nlh, attr_type);")
+ for line in init_lines:
+ ri.cw.p(line)
for _, arg in struct.member_list():
arg.attr_put(ri, "obj")
@@ -1860,6 +1900,11 @@ def print_req(ri):
local_vars += ['size_t hdr_len;',
'void *hdr;']
+ for _, attr in ri.struct["request"].member_list():
+ if attr.presence_type() == 'count':
+ local_vars += ['unsigned int i;']
+ break
+
print_prototype(ri, direction, terminate=False)
ri.cw.block_start()
ri.cw.write_func_lvar(local_vars)
@@ -2962,6 +3007,9 @@ def main():
for attr_set, struct in parsed.pure_nested_structs.items():
ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
print_type_full(ri, struct)
+ if struct.request and struct.in_multi_val:
+ free_rsp_nested_prototype(ri)
+ cw.nl()
for op_name, op in parsed.ops.items():
cw.p(f"/* ============== {op.enum_name} ============== */")
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 33d861c04ebd..3ce7b54003c2 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -522,7 +522,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
case INAT_PFX_REPNE:
if (modrm == 0xca)
/* eretu/erets */
- insn->type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_SYSRET;
break;
default:
if (modrm == 0xca)
@@ -535,11 +535,15 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
insn->type = INSN_JUMP_CONDITIONAL;
- } else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
- op2 == 0x35) {
+ } else if (op2 == 0x05 || op2 == 0x34) {
- /* sysenter, sysret */
- insn->type = INSN_CONTEXT_SWITCH;
+ /* syscall, sysenter */
+ insn->type = INSN_SYSCALL;
+
+ } else if (op2 == 0x07 || op2 == 0x35) {
+
+ /* sysret, sysexit */
+ insn->type = INSN_SYSRET;
} else if (op2 == 0x0b || op2 == 0xb9) {
@@ -676,7 +680,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
case 0xca: /* retf */
case 0xcb: /* retf */
- insn->type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_SYSRET;
break;
case 0xe0: /* loopne */
@@ -721,7 +725,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
} else if (modrm_reg == 5) {
/* jmpf */
- insn->type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_SYSRET;
} else if (modrm_reg == 6) {
diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
index 403e587676f1..06ca4a2659a4 100644
--- a/tools/objtool/arch/x86/special.c
+++ b/tools/objtool/arch/x86/special.c
@@ -126,7 +126,7 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
* indicates a rare GCC quirk/bug which can leave dead
* code behind.
*/
- if (reloc_type(text_reloc) == R_X86_64_PC32) {
+ if (!file->ignore_unreachables && reloc_type(text_reloc) == R_X86_64_PC32) {
WARN_INSN(insn, "ignoring unreachables due to jump table quirk");
file->ignore_unreachables = true;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 4a1f6c3169b3..b649049b6a11 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -3505,6 +3505,34 @@ next_orig:
return next_insn_same_sec(file, alt_group->orig_group->last_insn);
}
+static bool skip_alt_group(struct instruction *insn)
+{
+ struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
+
+ /* ANNOTATE_IGNORE_ALTERNATIVE */
+ if (insn->alt_group && insn->alt_group->ignore)
+ return true;
+
+ /*
+ * For NOP patched with CLAC/STAC, only follow the latter to avoid
+ * impossible code paths combining patched CLAC with unpatched STAC
+ * or vice versa.
+ *
+ * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
+ * requested not to do that to avoid hurting .s file readability
+ * around CLAC/STAC alternative sites.
+ */
+
+ if (!alt_insn)
+ return false;
+
+ /* Don't override ASM_{CLAC,STAC}_UNSAFE */
+ if (alt_insn->alt_group && alt_insn->alt_group->ignore)
+ return false;
+
+ return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
+}
+
/*
* Follow the branch starting at the given instruction, and recursively follow
* any other branches (jumps). Meanwhile, track the frame pointer state at
@@ -3625,7 +3653,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
}
}
- if (insn->alt_group && insn->alt_group->ignore)
+ if (skip_alt_group(insn))
return 0;
if (handle_insn_ops(insn, next_insn, &state))
@@ -3684,14 +3712,20 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
break;
- case INSN_CONTEXT_SWITCH:
- if (func) {
- if (!next_insn || !next_insn->hint) {
- WARN_INSN(insn, "unsupported instruction in callable function");
- return 1;
- }
- break;
+ case INSN_SYSCALL:
+ if (func && (!next_insn || !next_insn->hint)) {
+ WARN_INSN(insn, "unsupported instruction in callable function");
+ return 1;
+ }
+
+ break;
+
+ case INSN_SYSRET:
+ if (func && (!next_insn || !next_insn->hint)) {
+ WARN_INSN(insn, "unsupported instruction in callable function");
+ return 1;
}
+
return 0;
case INSN_STAC:
@@ -3886,6 +3920,12 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
WARN_INSN(insn, "RET before UNTRAIN");
return 1;
+ case INSN_SYSCALL:
+ break;
+
+ case INSN_SYSRET:
+ return 0;
+
case INSN_NOP:
if (insn->retpoline_safe)
return 0;
@@ -3895,6 +3935,9 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
break;
}
+ if (insn->dead_end)
+ return 0;
+
if (!next) {
WARN_INSN(insn, "teh end!");
return 1;
diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h
index 089a1acc48a8..01ef6f415adf 100644
--- a/tools/objtool/include/objtool/arch.h
+++ b/tools/objtool/include/objtool/arch.h
@@ -19,7 +19,8 @@ enum insn_type {
INSN_CALL,
INSN_CALL_DYNAMIC,
INSN_RETURN,
- INSN_CONTEXT_SWITCH,
+ INSN_SYSCALL,
+ INSN_SYSRET,
INSN_BUG,
INSN_NOP,
INSN_STAC,
diff --git a/tools/perf/arch/arm/entry/syscalls/syscall.tbl b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
index 49eeb2ad8dbd..27c1d5ebcd91 100644
--- a/tools/perf/arch/arm/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
@@ -481,3 +481,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index c844cd5cda62..1e8c44c7b614 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -381,3 +381,4 @@
464 n64 getxattrat sys_getxattrat
465 n64 listxattrat sys_listxattrat
466 n64 removexattrat sys_removexattrat
+467 n64 open_tree_attr sys_open_tree_attr
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index d8b4ab78bef0..9a084bdb8926 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -557,3 +557,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index e9115b4d8b63..a4569b96ef06 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -469,3 +469,4 @@
464 common getxattrat sys_getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr
diff --git a/tools/perf/arch/sh/entry/syscalls/syscall.tbl b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
index c8cad33bf250..52a7652fcff6 100644
--- a/tools/perf/arch/sh/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
@@ -470,3 +470,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
index 727f99d333b3..83e45eb6c095 100644
--- a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
@@ -512,3 +512,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
index 4d0fb2fba7e2..ac007ea00979 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
@@ -396,7 +396,7 @@
381 i386 pkey_alloc sys_pkey_alloc
382 i386 pkey_free sys_pkey_free
383 i386 statx sys_statx
-384 i386 arch_prctl sys_arch_prctl compat_sys_arch_prctl
+384 i386 arch_prctl sys_arch_prctl
385 i386 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
386 i386 rseq sys_rseq
393 i386 semget sys_semget
@@ -472,3 +472,4 @@
464 i386 getxattrat sys_getxattrat
465 i386 listxattrat sys_listxattrat
466 i386 removexattrat sys_removexattrat
+467 i386 open_tree_attr sys_open_tree_attr
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 5eb708bff1c7..cfb5ca41e30d 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -390,6 +390,7 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
index 37effc1b134e..f657a77314f8 100644
--- a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
@@ -437,3 +437,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index a4499e5a6f9c..857f6646cc23 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -20,6 +20,7 @@ FILES=(
"include/uapi/linux/stat.h"
"include/linux/bits.h"
"include/vdso/bits.h"
+ "include/linux/cfi_types.h"
"include/linux/const.h"
"include/vdso/const.h"
"include/vdso/unaligned.h"
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index d18cc47e89bd..c3322eb3d686 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -392,6 +392,8 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
+ void *data);
struct timespec64;
struct __kernel_timespec;
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
index 6e6907e63bfc..a15ac2fa4b20 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
@@ -155,4 +155,8 @@
#define AT_HANDLE_MNT_ID_UNIQUE 0x001 /* Return the u64 unique mount ID. */
#define AT_HANDLE_CONNECTABLE 0x002 /* Request a connectable file handle */
+/* Flags for execveat2(2). */
+#define AT_EXECVE_CHECK 0x10000 /* Only perform a check if execution
+ would be allowed. */
+
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fs.h b/tools/perf/trace/beauty/include/uapi/linux/fs.h
index 753971770733..e762e1af650c 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fs.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fs.h
@@ -40,6 +40,15 @@
#define BLOCK_SIZE_BITS 10
#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
+/* flags for integrity meta */
+#define IO_INTEGRITY_CHK_GUARD (1U << 0) /* enforce guard check */
+#define IO_INTEGRITY_CHK_REFTAG (1U << 1) /* enforce ref check */
+#define IO_INTEGRITY_CHK_APPTAG (1U << 2) /* enforce app check */
+
+#define IO_INTEGRITY_VALID_FLAGS (IO_INTEGRITY_CHK_GUARD | \
+ IO_INTEGRITY_CHK_REFTAG | \
+ IO_INTEGRITY_CHK_APPTAG)
+
#define SEEK_SET 0 /* seek relative to beginning of file */
#define SEEK_CUR 1 /* seek relative to current file position */
#define SEEK_END 2 /* seek relative to end of file */
@@ -203,10 +212,8 @@ struct fsxattr {
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
-/*
- * A jump here: 130-136 are reserved for zoned block devices
- * (see uapi/linux/blkzoned.h)
- */
+/* 130-136 are used by zoned block device ioctls (uapi/linux/blkzoned.h) */
+/* 137-141 are used by blk-crypto ioctls (uapi/linux/blk-crypto.h) */
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -332,9 +339,13 @@ typedef int __bitwise __kernel_rwf_t;
/* Atomic Write */
#define RWF_ATOMIC ((__force __kernel_rwf_t)0x00000040)
+/* buffered IO that drops the cache after reading or writing data */
+#define RWF_DONTCACHE ((__force __kernel_rwf_t)0x00000080)
+
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
- RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC)
+ RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC |\
+ RWF_DONTCACHE)
#define PROCFS_IOCTL_MAGIC 'f'
diff --git a/tools/perf/trace/beauty/include/uapi/linux/mount.h b/tools/perf/trace/beauty/include/uapi/linux/mount.h
index c07008816aca..7fa67c2031a5 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/mount.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/mount.h
@@ -179,7 +179,12 @@ struct statmount {
__u32 opt_array; /* [str] Array of nul terminated fs options */
__u32 opt_sec_num; /* Number of security options */
__u32 opt_sec_array; /* [str] Array of nul terminated security options */
- __u64 __spare2[46];
+ __u64 supported_mask; /* Mask flags that this kernel supports */
+ __u32 mnt_uidmap_num; /* Number of uid mappings */
+ __u32 mnt_uidmap; /* [str] Array of uid mappings (as seen from callers namespace) */
+ __u32 mnt_gidmap_num; /* Number of gid mappings */
+ __u32 mnt_gidmap; /* [str] Array of gid mappings (as seen from callers namespace) */
+ __u64 __spare2[43];
char str[]; /* Variable size part containing strings */
};
@@ -217,6 +222,9 @@ struct mnt_id_req {
#define STATMOUNT_SB_SOURCE 0x00000200U /* Want/got sb_source */
#define STATMOUNT_OPT_ARRAY 0x00000400U /* Want/got opt_... */
#define STATMOUNT_OPT_SEC_ARRAY 0x00000800U /* Want/got opt_sec... */
+#define STATMOUNT_SUPPORTED_MASK 0x00001000U /* Want/got supported mask flags */
+#define STATMOUNT_MNT_UIDMAP 0x00002000U /* Want/got uidmap... */
+#define STATMOUNT_MNT_GIDMAP 0x00004000U /* Want/got gidmap... */
/*
* Special @mnt_id values that can be passed to listmount
diff --git a/tools/perf/trace/beauty/include/uapi/linux/prctl.h b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
index 5c6080680cb2..15c18ef4eb11 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/prctl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
@@ -353,4 +353,15 @@ struct prctl_mm_map {
*/
#define PR_LOCK_SHADOW_STACK_STATUS 76
+/*
+ * Controls the mode of timer_create() for CRIU restore operations.
+ * Enabling this allows CRIU to restore timers with explicit IDs.
+ *
+ * Don't use for normal operations as the result might be undefined.
+ */
+#define PR_TIMER_CREATE_RESTORE_IDS 77
+# define PR_TIMER_CREATE_RESTORE_IDS_OFF 0
+# define PR_TIMER_CREATE_RESTORE_IDS_ON 1
+# define PR_TIMER_CREATE_RESTORE_IDS_GET 2
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/perf/trace/beauty/include/uapi/linux/stat.h b/tools/perf/trace/beauty/include/uapi/linux/stat.h
index 887a25286441..f78ee3670dd5 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/stat.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/stat.h
@@ -98,43 +98,93 @@ struct statx_timestamp {
*/
struct statx {
/* 0x00 */
- __u32 stx_mask; /* What results were written [uncond] */
- __u32 stx_blksize; /* Preferred general I/O size [uncond] */
- __u64 stx_attributes; /* Flags conveying information about the file [uncond] */
+ /* What results were written [uncond] */
+ __u32 stx_mask;
+
+ /* Preferred general I/O size [uncond] */
+ __u32 stx_blksize;
+
+ /* Flags conveying information about the file [uncond] */
+ __u64 stx_attributes;
+
/* 0x10 */
- __u32 stx_nlink; /* Number of hard links */
- __u32 stx_uid; /* User ID of owner */
- __u32 stx_gid; /* Group ID of owner */
- __u16 stx_mode; /* File mode */
+ /* Number of hard links */
+ __u32 stx_nlink;
+
+ /* User ID of owner */
+ __u32 stx_uid;
+
+ /* Group ID of owner */
+ __u32 stx_gid;
+
+ /* File mode */
+ __u16 stx_mode;
__u16 __spare0[1];
+
/* 0x20 */
- __u64 stx_ino; /* Inode number */
- __u64 stx_size; /* File size */
- __u64 stx_blocks; /* Number of 512-byte blocks allocated */
- __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
+ /* Inode number */
+ __u64 stx_ino;
+
+ /* File size */
+ __u64 stx_size;
+
+ /* Number of 512-byte blocks allocated */
+ __u64 stx_blocks;
+
+ /* Mask to show what's supported in stx_attributes */
+ __u64 stx_attributes_mask;
+
/* 0x40 */
- struct statx_timestamp stx_atime; /* Last access time */
- struct statx_timestamp stx_btime; /* File creation time */
- struct statx_timestamp stx_ctime; /* Last attribute change time */
- struct statx_timestamp stx_mtime; /* Last data modification time */
+ /* Last access time */
+ struct statx_timestamp stx_atime;
+
+ /* File creation time */
+ struct statx_timestamp stx_btime;
+
+ /* Last attribute change time */
+ struct statx_timestamp stx_ctime;
+
+ /* Last data modification time */
+ struct statx_timestamp stx_mtime;
+
/* 0x80 */
- __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */
+ /* Device ID of special file [if bdev/cdev] */
+ __u32 stx_rdev_major;
__u32 stx_rdev_minor;
- __u32 stx_dev_major; /* ID of device containing file [uncond] */
+
+ /* ID of device containing file [uncond] */
+ __u32 stx_dev_major;
__u32 stx_dev_minor;
+
/* 0x90 */
__u64 stx_mnt_id;
- __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
- __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+
+ /* Memory buffer alignment for direct I/O */
+ __u32 stx_dio_mem_align;
+
+ /* File offset alignment for direct I/O */
+ __u32 stx_dio_offset_align;
+
/* 0xa0 */
- __u64 stx_subvol; /* Subvolume identifier */
- __u32 stx_atomic_write_unit_min; /* Min atomic write unit in bytes */
- __u32 stx_atomic_write_unit_max; /* Max atomic write unit in bytes */
+ /* Subvolume identifier */
+ __u64 stx_subvol;
+
+ /* Min atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_min;
+
+ /* Max atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_max;
+
/* 0xb0 */
- __u32 stx_atomic_write_segments_max; /* Max atomic write segment count */
- __u32 __spare1[1];
+ /* Max atomic write segment count */
+ __u32 stx_atomic_write_segments_max;
+
+ /* File offset alignment for direct I/O reads */
+ __u32 stx_dio_read_offset_align;
+
/* 0xb8 */
__u64 __spare3[9]; /* Spare space for future expansion */
+
/* 0x100 */
};
@@ -164,6 +214,7 @@ struct statx {
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX_WRITE_ATOMIC 0x00010000U /* Want/got atomic_write_* fields */
+#define STATX_DIO_READ_ALIGN 0x00020000U /* Want/got dio read alignment info */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/tools/perf/trace/beauty/include/uapi/sound/asound.h b/tools/perf/trace/beauty/include/uapi/sound/asound.h
index 4cd513215bcd..5a049eeaecce 100644
--- a/tools/perf/trace/beauty/include/uapi/sound/asound.h
+++ b/tools/perf/trace/beauty/include/uapi/sound/asound.h
@@ -716,7 +716,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 4)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 5)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -728,6 +728,9 @@ enum {
#define SNDRV_RAWMIDI_INFO_INPUT 0x00000002
#define SNDRV_RAWMIDI_INFO_DUPLEX 0x00000004
#define SNDRV_RAWMIDI_INFO_UMP 0x00000008
+#define SNDRV_RAWMIDI_INFO_STREAM_INACTIVE 0x00000010
+
+#define SNDRV_RAWMIDI_DEVICE_UNKNOWN 0
struct snd_rawmidi_info {
unsigned int device; /* RO/WR (control): device number */
@@ -740,7 +743,8 @@ struct snd_rawmidi_info {
unsigned char subname[32]; /* name of active or selected subdevice */
unsigned int subdevices_count;
unsigned int subdevices_avail;
- unsigned char reserved[64]; /* reserved for future use */
+ int tied_device; /* R: tied rawmidi device (UMP/legacy) */
+ unsigned char reserved[60]; /* reserved for future use */
};
#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1974395492d7..3c030da2e477 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -2566,25 +2566,6 @@ check:
return false;
}
-static bool evsel__handle_error_quirks(struct evsel *evsel, int error)
-{
- /*
- * AMD core PMU tries to forward events with precise_ip to IBS PMU
- * implicitly. But IBS PMU has more restrictions so it can fail with
- * supported event attributes. Let's forward it back to the core PMU
- * by clearing precise_ip only if it's from precise_max (:P).
- */
- if ((error == -EINVAL || error == -ENOENT) && x86__is_amd_cpu() &&
- evsel->core.attr.precise_ip && evsel->precise_max) {
- evsel->core.attr.precise_ip = 0;
- pr_debug2_peo("removing precise_ip on AMD\n");
- display_attr(&evsel->core.attr);
- return true;
- }
-
- return false;
-}
-
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads,
int start_cpu_map_idx, int end_cpu_map_idx)
@@ -2730,9 +2711,6 @@ try_fallback:
if (evsel__precise_ip_fallback(evsel))
goto retry_open;
- if (evsel__handle_error_quirks(evsel, err))
- goto retry_open;
-
out_close:
if (err)
threads->err_thread = thread;
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 9fb2c1343c7f..0b037e7389a0 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -371,7 +371,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
* has to be pointed by symsrc_filename
*/
if (ofs == 0) {
- if (dso__data_get_fd(dso, machine, &fd) {
+ if (dso__data_get_fd(dso, machine, &fd)) {
ofs = elf_section_offset(fd, ".debug_frame");
dso__data_put_fd(dso);
}
diff --git a/tools/scripts/syscall.tbl b/tools/scripts/syscall.tbl
index ebbdb3c42e9f..580b4e246aec 100644
--- a/tools/scripts/syscall.tbl
+++ b/tools/scripts/syscall.tbl
@@ -407,3 +407,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 1cf82acb2a3e..0ab4b53bb4f3 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -24,4 +24,10 @@ static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
+static inline unsigned long free_reserved_area(void *start, void *end,
+ int poison, const char *s)
+{
+ return 0;
+}
+
#endif
diff --git a/tools/testing/memblock/linux/mutex.h b/tools/testing/memblock/linux/mutex.h
new file mode 100644
index 000000000000..ae3f497165d6
--- /dev/null
+++ b/tools/testing/memblock/linux/mutex.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MUTEX_H
+#define _MUTEX_H
+
+#define DEFINE_MUTEX(name) int name
+
+static inline void dummy_mutex_guard(int *name)
+{
+}
+
+#define guard(mutex) \
+ dummy_##mutex##_guard
+
+#endif /* _MUTEX_H */ \ No newline at end of file
diff --git a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
index 115287ba441b..0703e987df89 100644
--- a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
@@ -25,8 +25,11 @@ static void *spin_lock_thread(void *arg)
while (!READ_ONCE(skip)) {
err = bpf_prog_test_run_opts(prog_fd, &topts);
- ASSERT_OK(err, "test_run");
- ASSERT_OK(topts.retval, "test_run retval");
+ if (err || topts.retval) {
+ ASSERT_OK(err, "test_run");
+ ASSERT_OK(topts.retval, "test_run retval");
+ break;
+ }
}
pthread_exit(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock.c b/tools/testing/selftests/bpf/progs/res_spin_lock.c
index b33385dfbd35..22c4fb8b9266 100644
--- a/tools/testing/selftests/bpf/progs/res_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/res_spin_lock.c
@@ -38,13 +38,14 @@ int res_spin_lock_test(struct __sk_buff *ctx)
r = bpf_res_spin_lock(&elem1->lock);
if (r)
return r;
- if (!bpf_res_spin_lock(&elem2->lock)) {
+ r = bpf_res_spin_lock(&elem2->lock);
+ if (!r) {
bpf_res_spin_unlock(&elem2->lock);
bpf_res_spin_unlock(&elem1->lock);
return -1;
}
bpf_res_spin_unlock(&elem1->lock);
- return 0;
+ return r != -EDEADLK;
}
SEC("tc")
@@ -124,12 +125,15 @@ int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
/* Trigger AA, after exhausting entries in the held lock table. This
* time, only the timeout can save us, as AA detection won't succeed.
*/
- if (!bpf_res_spin_lock(locks[34])) {
+ ret = bpf_res_spin_lock(locks[34]);
+ if (!ret) {
bpf_res_spin_unlock(locks[34]);
ret = 1;
goto end;
}
+ ret = ret != -ETIMEDOUT ? 2 : 0;
+
end:
for (i = i - 1; i >= 0; i--)
bpf_res_spin_unlock(locks[i]);
diff --git a/tools/testing/selftests/drivers/net/hw/iou-zcrx.py b/tools/testing/selftests/drivers/net/hw/iou-zcrx.py
index 9f271ab6ec04..6a0378e06cab 100755
--- a/tools/testing/selftests/drivers/net/hw/iou-zcrx.py
+++ b/tools/testing/selftests/drivers/net/hw/iou-zcrx.py
@@ -35,6 +35,7 @@ def test_zcrx(cfg) -> None:
rx_ring = _get_rx_ring_entries(cfg)
try:
+ ethtool(f"-G {cfg.ifname} tcp-data-split on", host=cfg.remote)
ethtool(f"-G {cfg.ifname} rx 64", host=cfg.remote)
ethtool(f"-X {cfg.ifname} equal {combined_chans - 1}", host=cfg.remote)
flow_rule_id = _set_flow_rule(cfg, combined_chans - 1)
@@ -48,6 +49,7 @@ def test_zcrx(cfg) -> None:
ethtool(f"-N {cfg.ifname} delete {flow_rule_id}", host=cfg.remote)
ethtool(f"-X {cfg.ifname} default", host=cfg.remote)
ethtool(f"-G {cfg.ifname} rx {rx_ring}", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} tcp-data-split auto", host=cfg.remote)
def test_zcrx_oneshot(cfg) -> None:
@@ -59,6 +61,7 @@ def test_zcrx_oneshot(cfg) -> None:
rx_ring = _get_rx_ring_entries(cfg)
try:
+ ethtool(f"-G {cfg.ifname} tcp-data-split on", host=cfg.remote)
ethtool(f"-G {cfg.ifname} rx 64", host=cfg.remote)
ethtool(f"-X {cfg.ifname} equal {combined_chans - 1}", host=cfg.remote)
flow_rule_id = _set_flow_rule(cfg, combined_chans - 1)
@@ -72,6 +75,7 @@ def test_zcrx_oneshot(cfg) -> None:
ethtool(f"-N {cfg.ifname} delete {flow_rule_id}", host=cfg.remote)
ethtool(f"-X {cfg.ifname} default", host=cfg.remote)
ethtool(f"-G {cfg.ifname} rx {rx_ring}", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} tcp-data-split auto", host=cfg.remote)
def main() -> None:
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc
new file mode 100644
index 000000000000..b6d6a312ead5
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc
@@ -0,0 +1,177 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: ftrace - function graph filters
+# requires: set_ftrace_filter function_graph:tracer
+
+# Make sure that function graph filtering works
+
+INSTANCE1="instances/test1_$$"
+INSTANCE2="instances/test2_$$"
+
+WD=`pwd`
+
+do_reset() {
+ cd $WD
+ if [ -d $INSTANCE1 ]; then
+ echo nop > $INSTANCE1/current_tracer
+ rmdir $INSTANCE1
+ fi
+ if [ -d $INSTANCE2 ]; then
+ echo nop > $INSTANCE2/current_tracer
+ rmdir $INSTANCE2
+ fi
+}
+
+mkdir $INSTANCE1
+if ! grep -q function_graph $INSTANCE1/available_tracers; then
+ echo "function_graph not allowed with instances"
+ rmdir $INSTANCE1
+ exit_unsupported
+fi
+
+mkdir $INSTANCE2
+
+fail() { # msg
+ do_reset
+ echo $1
+ exit_fail
+}
+
+disable_tracing
+clear_trace
+
+function_count() {
+ search=$1
+ vsearch=$2
+
+ if [ -z "$search" ]; then
+ cat enabled_functions | wc -l
+ elif [ -z "$vsearch" ]; then
+ grep $search enabled_functions | wc -l
+ else
+ grep $search enabled_functions | grep $vsearch| wc -l
+ fi
+}
+
+set_fgraph() {
+ instance=$1
+ filter="$2"
+ notrace="$3"
+
+ echo "$filter" > $instance/set_ftrace_filter
+ echo "$notrace" > $instance/set_ftrace_notrace
+ echo function_graph > $instance/current_tracer
+}
+
+check_functions() {
+ orig_cnt=$1
+ test=$2
+
+ cnt=`function_count $test`
+ if [ $cnt -gt $orig_cnt ]; then
+ fail
+ fi
+}
+
+check_cnt() {
+ orig_cnt=$1
+ search=$2
+ vsearch=$3
+
+ cnt=`function_count $search $vsearch`
+ if [ $cnt -gt $orig_cnt ]; then
+ fail
+ fi
+}
+
+reset_graph() {
+ instance=$1
+ echo nop > $instance/current_tracer
+}
+
+# get any functions that were enabled before the test
+total_cnt=`function_count`
+sched_cnt=`function_count sched`
+lock_cnt=`function_count lock`
+time_cnt=`function_count time`
+clock_cnt=`function_count clock`
+locks_clock_cnt=`function_count locks clock`
+clock_locks_cnt=`function_count clock locks`
+
+# Trace functions with "sched" but not "time"
+set_fgraph $INSTANCE1 '*sched*' '*time*'
+
+# Make sure "time" isn't listed
+check_functions $time_cnt 'time'
+instance1_cnt=`function_count`
+
+# Trace functions with "lock" but not "clock"
+set_fgraph $INSTANCE2 '*lock*' '*clock*'
+instance1_2_cnt=`function_count`
+
+# Turn off the first instance
+reset_graph $INSTANCE1
+
+# The second instance doesn't trace "clock" functions
+check_functions $clock_cnt 'clock'
+instance2_cnt=`function_count`
+
+# Start from a clean slate
+reset_graph $INSTANCE2
+check_functions $total_cnt
+
+# Trace functions with "lock" but not "clock"
+set_fgraph $INSTANCE2 '*lock*' '*clock*'
+
+# This should match the last time instance 2 was by itself
+cnt=`function_count`
+if [ $instance2_cnt -ne $cnt ]; then
+ fail
+fi
+
+# And it should not be tracing "clock" functions
+check_functions $clock_cnt 'clock'
+
+# Trace functions with "sched" but not "time"
+set_fgraph $INSTANCE1 '*sched*' '*time*'
+
+# This should match the last time both instances were enabled
+cnt=`function_count`
+if [ $instance1_2_cnt -ne $cnt ]; then
+ fail
+fi
+
+# Turn off the second instance
+reset_graph $INSTANCE2
+
+# This should match the last time instance 1 was by itself
+cnt=`function_count`
+if [ $instance1_cnt -ne $cnt ]; then
+ fail
+fi
+
+# And it should not be tracing "time" functions
+check_functions $time_cnt 'time'
+
+# Start from a clean slate
+reset_graph $INSTANCE1
+check_functions $total_cnt
+
+# Enable all functions but those that have "locks"
+set_fgraph $INSTANCE1 '' '*locks*'
+
+# Enable all functions but those that have "clock"
+set_fgraph $INSTANCE2 '' '*clock*'
+
+# If a function has "locks" it should not have "clock"
+check_cnt $locks_clock_cnt locks clock
+
+# If a function has "clock" it should not have "locks"
+check_cnt $clock_locks_cnt clock locks
+
+reset_graph $INSTANCE1
+reset_graph $INSTANCE2
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c
index efabfcbe0b49..17ed3e9917ca 100644
--- a/tools/testing/selftests/mincore/mincore_selftest.c
+++ b/tools/testing/selftests/mincore/mincore_selftest.c
@@ -283,8 +283,7 @@ out_free:
/*
* Test mincore() behavior on a page backed by a tmpfs file. This test
- * performs the same steps as the previous one. However, we don't expect
- * any readahead in this case.
+ * performs the same steps as the previous one.
*/
TEST(check_tmpfs_mmap)
{
@@ -295,7 +294,6 @@ TEST(check_tmpfs_mmap)
int page_size;
int fd;
int i;
- int ra_pages = 0;
page_size = sysconf(_SC_PAGESIZE);
vec_size = FILE_SIZE / page_size;
@@ -338,8 +336,7 @@ TEST(check_tmpfs_mmap)
}
/*
- * Touch a page in the middle of the mapping. We expect only
- * that page to be fetched into memory.
+ * Touch a page in the middle of the mapping.
*/
addr[FILE_SIZE / 2] = 1;
retval = mincore(addr, FILE_SIZE, vec);
@@ -348,15 +345,6 @@ TEST(check_tmpfs_mmap)
TH_LOG("Page not found in memory after use");
}
- i = FILE_SIZE / 2 / page_size + 1;
- while (i < vec_size && vec[i]) {
- ra_pages++;
- i++;
- }
- ASSERT_EQ(ra_pages, 0) {
- TH_LOG("Read-ahead pages found in memory");
- }
-
munmap(addr, FILE_SIZE);
close(fd);
free(vec);
diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
index 67df7b47087f..e1fe16bcbbe8 100755
--- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
@@ -29,7 +29,7 @@ fi
if [[ $cgroup2 ]]; then
cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}')
if [[ -z "$cgroup_path" ]]; then
- cgroup_path=/dev/cgroup/memory
+ cgroup_path=$(mktemp -d)
mount -t cgroup2 none $cgroup_path
do_umount=1
fi
@@ -37,7 +37,7 @@ if [[ $cgroup2 ]]; then
else
cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
if [[ -z "$cgroup_path" ]]; then
- cgroup_path=/dev/cgroup/memory
+ cgroup_path=$(mktemp -d)
mount -t cgroup memory,hugetlb $cgroup_path
do_umount=1
fi
diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
index f0cb14ea8608..b6cfe0a4b7df 100644
--- a/tools/testing/selftests/mm/cow.c
+++ b/tools/testing/selftests/mm/cow.c
@@ -293,7 +293,7 @@ static void do_test_vmsplice_in_parent(char *mem, size_t size,
.iov_base = mem,
.iov_len = size,
};
- ssize_t cur, total, transferred;
+ ssize_t cur, total, transferred = 0;
struct comm_pipes comm_pipes;
char *old, *new;
int ret, fds[2];
diff --git a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
index 11f9bbe7dc22..0b0d4ba1af27 100755
--- a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
+++ b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
@@ -23,7 +23,7 @@ fi
if [[ $cgroup2 ]]; then
CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}')
if [[ -z "$CGROUP_ROOT" ]]; then
- CGROUP_ROOT=/dev/cgroup/memory
+ CGROUP_ROOT=$(mktemp -d)
mount -t cgroup2 none $CGROUP_ROOT
do_umount=1
fi
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 679542f565a4..532bb732bc6d 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -39,6 +39,7 @@ scm_rights
sk_bind_sendto_listen
sk_connect_zero_addr
sk_so_peek_off
+skf_net_off
socket
so_incoming_cpu
so_netns_cookie
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 6d718b478ed8..124078b56fa4 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -106,6 +106,8 @@ TEST_PROGS += ipv6_route_update_soft_lockup.sh
TEST_PROGS += busy_poll_test.sh
TEST_GEN_PROGS += proc_net_pktgen
TEST_PROGS += lwt_dst_cache_ref_loop.sh
+TEST_PROGS += skf_net_off.sh
+TEST_GEN_FILES += skf_net_off
# YNL files, must be before "include ..lib.mk"
YNL_GEN_FILES := busy_poller netlink-dumps
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index b866bab1d92a..c7cea556b416 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -359,6 +359,23 @@ fib_rule6_test()
"$getnomatch" "iif flowlabel masked redirect to table" \
"iif flowlabel masked no redirect to table"
fi
+
+ $IP link show dev $DEV | grep -q vrf0
+ if [ $? -eq 0 ]; then
+ match="oif vrf0"
+ getmatch="oif $DEV"
+ getnomatch="oif lo"
+ fib_rule6_test_match_n_redirect "$match" "$getmatch" \
+ "$getnomatch" "VRF oif redirect to table" \
+ "VRF oif no redirect to table"
+
+ match="from $SRC_IP6 iif vrf0"
+ getmatch="from $SRC_IP6 iif $DEV"
+ getnomatch="from $SRC_IP6 iif lo"
+ fib_rule6_test_match_n_redirect "$match" "$getmatch" \
+ "$getnomatch" "VRF iif redirect to table" \
+ "VRF iif no redirect to table"
+ fi
}
fib_rule6_vrf_test()
@@ -635,6 +652,23 @@ fib_rule4_test()
"$getnomatch" "iif dscp masked redirect to table" \
"iif dscp masked no redirect to table"
fi
+
+ $IP link show dev $DEV | grep -q vrf0
+ if [ $? -eq 0 ]; then
+ match="oif vrf0"
+ getmatch="oif $DEV"
+ getnomatch="oif lo"
+ fib_rule4_test_match_n_redirect "$match" "$getmatch" \
+ "$getnomatch" "VRF oif redirect to table" \
+ "VRF oif no redirect to table"
+
+ match="from $SRC_IP iif vrf0"
+ getmatch="from $SRC_IP iif $DEV"
+ getnomatch="from $SRC_IP iif lo"
+ fib_rule4_test_match_n_redirect "$match" "$getmatch" \
+ "$getnomatch" "VRF iif redirect to table" \
+ "VRF iif no redirect to table"
+ fi
}
fib_rule4_vrf_test()
diff --git a/tools/testing/selftests/net/skf_net_off.c b/tools/testing/selftests/net/skf_net_off.c
new file mode 100644
index 000000000000..1fdf61d6cd7f
--- /dev/null
+++ b/tools/testing/selftests/net/skf_net_off.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Open a tun device.
+ *
+ * [modifications: use IFF_NAPI_FRAGS, add sk filter]
+ *
+ * Expects the device to have been configured previously, e.g.:
+ * sudo ip tuntap add name tap1 mode tap
+ * sudo ip link set tap1 up
+ * sudo ip link set dev tap1 addr 02:00:00:00:00:01
+ * sudo ip -6 addr add fdab::1 peer fdab::2 dev tap1 nodad
+ *
+ * And to avoid premature pskb_may_pull:
+ *
+ * sudo ethtool -K tap1 gro off
+ * sudo bash -c 'echo 0 > /proc/sys/net/ipv4/ip_early_demux'
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <linux/filter.h>
+#include <linux/if.h>
+#include <linux/if_packet.h>
+#include <linux/if_tun.h>
+#include <linux/ipv6.h>
+#include <netinet/if_ether.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/udp.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/poll.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <unistd.h>
+
+static bool cfg_do_filter;
+static bool cfg_do_frags;
+static int cfg_dst_port = 8000;
+static char *cfg_ifname;
+
+static int tun_open(const char *tun_name)
+{
+ struct ifreq ifr = {0};
+ int fd, ret;
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (fd == -1)
+ error(1, errno, "open /dev/net/tun");
+
+ ifr.ifr_flags = IFF_TAP;
+ if (cfg_do_frags)
+ ifr.ifr_flags |= IFF_NAPI | IFF_NAPI_FRAGS;
+
+ strncpy(ifr.ifr_name, tun_name, IFNAMSIZ - 1);
+
+ ret = ioctl(fd, TUNSETIFF, &ifr);
+ if (ret)
+ error(1, ret, "ioctl TUNSETIFF");
+
+ return fd;
+}
+
+static void sk_set_filter(int fd)
+{
+ const int offset_proto = offsetof(struct ip6_hdr, ip6_nxt);
+ const int offset_dport = sizeof(struct ip6_hdr) + offsetof(struct udphdr, dest);
+
+ /* Filter UDP packets with destination port cfg_dst_port */
+ struct sock_filter filter_code[] = {
+ BPF_STMT(BPF_LD + BPF_B + BPF_ABS, SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, PACKET_HOST, 0, 4),
+ BPF_STMT(BPF_LD + BPF_B + BPF_ABS, SKF_NET_OFF + offset_proto),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 0, 2),
+ BPF_STMT(BPF_LD + BPF_H + BPF_ABS, SKF_NET_OFF + offset_dport),
+ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, cfg_dst_port, 1, 0),
+ BPF_STMT(BPF_RET + BPF_K, 0),
+ BPF_STMT(BPF_RET + BPF_K, 0xFFFF),
+ };
+
+ struct sock_fprog filter = {
+ sizeof(filter_code) / sizeof(filter_code[0]),
+ filter_code,
+ };
+
+ if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &filter, sizeof(filter)))
+ error(1, errno, "setsockopt attach filter");
+}
+
+static int raw_open(void)
+{
+ int fd;
+
+ fd = socket(PF_INET6, SOCK_RAW, IPPROTO_UDP);
+ if (fd == -1)
+ error(1, errno, "socket raw (udp)");
+
+ if (cfg_do_filter)
+ sk_set_filter(fd);
+
+ return fd;
+}
+
+static void tun_write(int fd)
+{
+ const char eth_src[] = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x02 };
+ const char eth_dst[] = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x01 };
+ struct tun_pi pi = {0};
+ struct ipv6hdr ip6h = {0};
+ struct udphdr uh = {0};
+ struct ethhdr eth = {0};
+ uint32_t payload;
+ struct iovec iov[5];
+ int ret;
+
+ pi.proto = htons(ETH_P_IPV6);
+
+ memcpy(eth.h_source, eth_src, sizeof(eth_src));
+ memcpy(eth.h_dest, eth_dst, sizeof(eth_dst));
+ eth.h_proto = htons(ETH_P_IPV6);
+
+ ip6h.version = 6;
+ ip6h.payload_len = htons(sizeof(uh) + sizeof(uint32_t));
+ ip6h.nexthdr = IPPROTO_UDP;
+ ip6h.hop_limit = 8;
+ if (inet_pton(AF_INET6, "fdab::2", &ip6h.saddr) != 1)
+ error(1, errno, "inet_pton src");
+ if (inet_pton(AF_INET6, "fdab::1", &ip6h.daddr) != 1)
+ error(1, errno, "inet_pton src");
+
+ uh.source = htons(8000);
+ uh.dest = htons(cfg_dst_port);
+ uh.len = ip6h.payload_len;
+ uh.check = 0;
+
+ payload = htonl(0xABABABAB); /* Covered in IPv6 length */
+
+ iov[0].iov_base = &pi;
+ iov[0].iov_len = sizeof(pi);
+ iov[1].iov_base = &eth;
+ iov[1].iov_len = sizeof(eth);
+ iov[2].iov_base = &ip6h;
+ iov[2].iov_len = sizeof(ip6h);
+ iov[3].iov_base = &uh;
+ iov[3].iov_len = sizeof(uh);
+ iov[4].iov_base = &payload;
+ iov[4].iov_len = sizeof(payload);
+
+ ret = writev(fd, iov, sizeof(iov) / sizeof(iov[0]));
+ if (ret <= 0)
+ error(1, errno, "writev");
+}
+
+static void raw_read(int fd)
+{
+ struct timeval tv = { .tv_usec = 100 * 1000 };
+ struct msghdr msg = {0};
+ struct iovec iov[2];
+ struct udphdr uh;
+ uint32_t payload[2];
+ int ret;
+
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))
+ error(1, errno, "setsockopt rcvtimeo udp");
+
+ iov[0].iov_base = &uh;
+ iov[0].iov_len = sizeof(uh);
+
+ iov[1].iov_base = payload;
+ iov[1].iov_len = sizeof(payload);
+
+ msg.msg_iov = iov;
+ msg.msg_iovlen = sizeof(iov) / sizeof(iov[0]);
+
+ ret = recvmsg(fd, &msg, 0);
+ if (ret <= 0)
+ error(1, errno, "read raw");
+ if (ret != sizeof(uh) + sizeof(payload[0]))
+ error(1, errno, "read raw: len=%d\n", ret);
+
+ fprintf(stderr, "raw recv: 0x%x\n", payload[0]);
+}
+
+static void parse_opts(int argc, char **argv)
+{
+ int c;
+
+ while ((c = getopt(argc, argv, "fFi:")) != -1) {
+ switch (c) {
+ case 'f':
+ cfg_do_filter = true;
+ printf("bpf filter enabled\n");
+ break;
+ case 'F':
+ cfg_do_frags = true;
+ printf("napi frags mode enabled\n");
+ break;
+ case 'i':
+ cfg_ifname = optarg;
+ break;
+ default:
+ error(1, 0, "unknown option %c", optopt);
+ break;
+ }
+ }
+
+ if (!cfg_ifname)
+ error(1, 0, "must specify tap interface name (-i)");
+}
+
+int main(int argc, char **argv)
+{
+ int fdt, fdr;
+
+ parse_opts(argc, argv);
+
+ fdr = raw_open();
+ fdt = tun_open(cfg_ifname);
+
+ tun_write(fdt);
+ raw_read(fdr);
+
+ if (close(fdt))
+ error(1, errno, "close tun");
+ if (close(fdr))
+ error(1, errno, "close udp");
+
+ fprintf(stderr, "OK\n");
+ return 0;
+}
+
diff --git a/tools/testing/selftests/net/skf_net_off.sh b/tools/testing/selftests/net/skf_net_off.sh
new file mode 100755
index 000000000000..5da5066fb465
--- /dev/null
+++ b/tools/testing/selftests/net/skf_net_off.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+readonly NS="ns-$(mktemp -u XXXXXX)"
+
+cleanup() {
+ ip netns del $NS
+}
+
+ip netns add $NS
+trap cleanup EXIT
+
+ip -netns $NS link set lo up
+ip -netns $NS tuntap add name tap1 mode tap
+ip -netns $NS link set tap1 up
+ip -netns $NS link set dev tap1 addr 02:00:00:00:00:01
+ip -netns $NS -6 addr add fdab::1 peer fdab::2 dev tap1 nodad
+ip netns exec $NS ethtool -K tap1 gro off
+
+# disable early demux, else udp_v6_early_demux pulls udp header into linear
+ip netns exec $NS sysctl -w net.ipv4.ip_early_demux=0
+
+echo "no filter"
+ip netns exec $NS ./skf_net_off -i tap1
+
+echo "filter, linear skb (-f)"
+ip netns exec $NS ./skf_net_off -i tap1 -f
+
+echo "filter, fragmented skb (-f) (-F)"
+ip netns exec $NS ./skf_net_off -i tap1 -f -F
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json b/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json
index 1ba96c467754..d9fc62ab476c 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json
@@ -412,5 +412,27 @@
"teardown": [
"$TC qdisc del dev $DUMMY ingress"
]
+ },
+ {
+ "id": "33f4",
+ "name": "Check echo of big filter command",
+ "category": [
+ "infra",
+ "u32"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY parent root handle 10: fq_codel"
+ ],
+ "cmdUnderTest": "bash -c '$TC -echo filter add dev $DUMMY parent 10: u32 match u32 0 0 $(for i in $(seq 32); do echo action pedit munge ip dport set 22; done) | grep \"added filter\"'",
+ "verifyCmd": "",
+ "expExitCode": "0",
+ "matchCount": "0",
+ "matchPattern": "",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY parent root fq_codel"
+ ]
}
]
diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
index 66dbb362385f..0f97fb0d19e1 100644
--- a/tools/testing/shared/linux.c
+++ b/tools/testing/shared/linux.c
@@ -150,7 +150,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
{
if (kmalloc_verbose)
- pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
+ pr_debug("Bulk free %p[0-%zu]\n", list, size - 1);
pthread_mutex_lock(&cachep->lock);
for (int i = 0; i < size; i++)
@@ -168,7 +168,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
size_t i;
if (kmalloc_verbose)
- pr_debug("Bulk alloc %lu\n", size);
+ pr_debug("Bulk alloc %zu\n", size);
pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs >= size) {
diff --git a/tools/testing/shared/linux/cleanup.h b/tools/testing/shared/linux/cleanup.h
new file mode 100644
index 000000000000..ea3081426ee9
--- /dev/null
+++ b/tools/testing/shared/linux/cleanup.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include "../../../../include/linux/cleanup.h"