aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/vm/hmm.rst2
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/arm/include/asm/dma-mapping.h4
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/dma-mapping.c61
-rw-r--r--arch/arm/mm/init.c5
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h6
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/daifflags.h2
-rw-r--r--arch/arm64/include/asm/efi.h6
-rw-r--r--arch/arm64/include/asm/memory.h10
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h40
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/debug-monitors.c14
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c11
-rw-r--r--arch/arm64/kernel/module.c4
-rw-r--r--arch/arm64/kernel/probes/kprobes.c40
-rw-r--r--arch/arm64/kernel/return_address.c3
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c3
-rw-r--r--arch/arm64/mm/fault.c57
-rw-r--r--arch/mips/vdso/vdso.h1
-rw-r--r--arch/parisc/Makefile5
-rw-r--r--arch/parisc/boot/compressed/Makefile4
-rw-r--r--arch/parisc/boot/compressed/vmlinux.lds.S4
-rw-r--r--arch/parisc/configs/defconfig (renamed from arch/parisc/configs/default_defconfig)0
-rw-r--r--arch/parisc/kernel/ftrace.c3
-rw-r--r--arch/parisc/math-emu/Makefile1
-rw-r--r--arch/parisc/mm/fault.c1
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi1
-rw-r--r--arch/riscv/configs/defconfig10
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/s390/boot/boot.h1
-rw-r--r--arch/s390/boot/kaslr.c1
-rw-r--r--arch/s390/configs/debug_defconfig330
-rw-r--r--arch/s390/configs/defconfig233
-rw-r--r--arch/s390/configs/zfcpdump_defconfig31
-rw-r--r--arch/s390/include/asm/qdio.h10
-rw-r--r--arch/s390/include/asm/setup.h1
-rw-r--r--arch/s390/kernel/machine_kexec_reloc.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c2
-rw-r--r--arch/s390/lib/xor.c1
-rw-r--r--arch/s390/mm/fault.c3
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h36
-rw-r--r--drivers/acpi/device_pm.c4
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/ata/libahci_platform.c3
-rw-r--r--drivers/ata/libata-zpodd.c2
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/loop.c16
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/bluetooth/hci_ath.c3
-rw-r--r--drivers/bluetooth/hci_bcm.c3
-rw-r--r--drivers/bluetooth/hci_intel.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c13
-rw-r--r--drivers/bluetooth/hci_mrvl.c3
-rw-r--r--drivers/bluetooth/hci_qca.c3
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c2
-rw-r--r--drivers/clk/at91/clk-generated.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c46
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c16
-rw-r--r--drivers/clk/sprd/Kconfig1
-rw-r--r--drivers/gpio/gpiolib.c23
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c74
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c48
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c34
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig4
-rw-r--r--drivers/gpu/drm/drm_client.c60
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c51
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c38
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c59
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c67
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h76
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h15
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c47
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c47
-rw-r--r--drivers/infiniband/core/core_priv.h5
-rw-r--r--drivers/infiniband/core/counters.c11
-rw-r--r--drivers/infiniband/core/device.c102
-rw-r--r--drivers/infiniband/core/mad.c20
-rw-r--r--drivers/infiniband/core/user_mad.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c14
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h7
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c11
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c43
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/Kconfig6
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c7
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c50
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c13
-rw-r--r--drivers/infiniband/hw/qedr/main.c10
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c6
-rw-r--r--drivers/iommu/virtio-iommu.c40
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c1
-rw-r--r--drivers/irqchip/irq-mbigen.c9
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/mmc/core/queue.c5
-rw-r--r--drivers/mmc/host/dw_mmc.c3
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c1
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c6
-rw-r--r--drivers/platform/x86/intel_pmc_core.c1
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c6
-rw-r--r--drivers/s390/block/dasd_alias.c22
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/tape_core.c3
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c2
-rw-r--r--drivers/s390/crypto/ap_queue.c1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c17
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c138
-rw-r--r--drivers/scsi/hpsa.c14
-rw-r--r--drivers/scsi/libfc/fc_rport.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/privcmd.c35
-rw-r--r--drivers/xen/swiotlb-xen.c34
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c3
-rw-r--r--drivers/xen/xlate_mmu.c32
-rw-r--r--fs/block_dev.c86
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/send.c77
-rw-r--r--fs/btrfs/transaction.c32
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/coredump.c44
-rw-r--r--fs/dax.c2
-rw-r--r--fs/f2fs/file.c63
-rw-r--r--fs/f2fs/gc.c70
-rw-r--r--fs/f2fs/super.c48
-rw-r--r--fs/gfs2/bmap.c15
-rw-r--r--fs/io_uring.c3
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/super.c5
-rw-r--r--fs/xfs/scrub/dabtree.c6
-rw-r--r--fs/xfs/xfs_itable.c3
-rw-r--r--include/asm-generic/getorder.h50
-rw-r--r--include/drm/drm_client.h2
-rw-r--r--include/drm/drm_mode_config.h7
-rw-r--r--include/linux/clk.h1
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/gpio/consumer.h64
-rw-r--r--include/linux/hmm.h54
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/page-flags-layout.h18
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/rdma/rdmavt_qp.h9
-rw-r--r--include/scsi/libfc.h52
-rw-r--r--include/scsi/libfcoe.h1
-rw-r--r--include/trace/events/dma_fence.h2
-rw-r--r--include/trace/events/napi.h4
-rw-r--r--include/trace/events/qdisc.h4
-rw-r--r--include/trace/events/tegra_apb_dma.h4
-rw-r--r--include/uapi/linux/virtio_iommu.h32
-rw-r--r--include/xen/xen-ops.h3
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/dma/contiguous.c8
-rw-r--r--kernel/dma/mapping.c13
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/trace/trace_functions_graph.c17
-rw-r--r--lib/Kconfig.kasan11
-rw-r--r--lib/Makefile3
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/vdso/gettimeofday.c79
-rw-r--r--mm/Makefile1
-rw-r--r--mm/balloon_compaction.c69
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/hmm.c10
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/memremap.c (renamed from kernel/memremap.c)0
-rw-r--r--mm/migrate.c21
-rw-r--r--mm/slub.c8
-rw-r--r--mm/vmscan.c9
-rw-r--r--security/selinux/ss/policydb.c6
-rw-r--r--sound/core/pcm_native.c3
-rw-r--r--sound/hda/hdac_i915.c10
-rw-r--r--sound/usb/helper.c2
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h12
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h10
-rw-r--r--tools/arch/powerpc/include/uapi/asm/mman.h4
-rw-r--r--tools/arch/sparc/include/uapi/asm/mman.h4
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h22
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h15
-rw-r--r--tools/include/uapi/asm-generic/mman.h10
-rw-r--r--tools/include/uapi/asm-generic/unistd.h8
-rw-r--r--tools/include/uapi/drm/drm.h1
-rw-r--r--tools/include/uapi/drm/i915_drm.h209
-rw-r--r--tools/include/uapi/linux/if_link.h5
-rw-r--r--tools/include/uapi/linux/kvm.h3
-rw-r--r--tools/include/uapi/linux/sched.h30
-rw-r--r--tools/include/uapi/linux/usbdevice_fs.h26
-rw-r--r--tools/lib/bpf/hashmap.h5
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rwxr-xr-xtools/perf/trace/beauty/usbdevfs_ioctl.sh9
-rw-r--r--tools/perf/util/header.c9
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh4
-rwxr-xr-xtools/testing/selftests/kmod/kmod.sh6
-rw-r--r--tools/testing/selftests/livepatch/functions.sh20
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c6
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c2
278 files changed, 2944 insertions, 1654 deletions
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 7d90964abbb0..710ce1c701bf 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -237,7 +237,7 @@ The usage pattern is::
ret = hmm_range_snapshot(&range);
if (ret) {
up_read(&mm->mmap_sem);
- if (ret == -EAGAIN) {
+ if (ret == -EBUSY) {
/*
* No need to check hmm_range_wait_until_valid() return value
* on retry we will get proper error with hmm_range_snapshot()
diff --git a/MAINTAINERS b/MAINTAINERS
index 6426db5198f0..6c49b48cfd69 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6322,7 +6322,8 @@ F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt
F: drivers/counter/ftm-quaddec.c
FLOPPY DRIVER
-S: Orphan
+M: Denis Efremov <efremov@linux.com>
+S: Odd Fixes
L: linux-block@vger.kernel.org
F: drivers/block/floppy.c
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7e0486ad1318..dba9355e2484 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -18,7 +18,9 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
+ if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
+ return &arm_dma_ops;
+ return NULL;
}
#ifdef __arch_page_to_dma
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 820b60a50125..c54cd7ed90ba 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -663,6 +663,11 @@ config ARM_LPAE
depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \
!CPU_32v4 && !CPU_32v3
select PHYS_ADDR_T_64BIT
+ select SWIOTLB
+ select ARCH_HAS_DMA_COHERENT_TO_PFN
+ select ARCH_HAS_DMA_MMAP_PGPROT
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
help
Say Y if you have an ARMv7 processor supporting the LPAE page
table format and you would like to access memory beyond the
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4789c60a86e3..6774b03aa405 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
@@ -1125,6 +1126,19 @@ int arm_dma_supported(struct device *dev, u64 mask)
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
+ /*
+ * When CONFIG_ARM_LPAE is set, physical address can extend above
+ * 32-bits, which then can't be addressed by devices that only support
+ * 32-bit DMA.
+ * Use the generic dma-direct / swiotlb ops code in that case, as that
+ * handles bounce buffering for us.
+ *
+ * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
+ * latter is also selected by the Xen code, but that code for now relies
+ * on non-NULL dev_dma_ops. To be cleaned up later.
+ */
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ return NULL;
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
}
@@ -2329,6 +2343,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+#ifdef CONFIG_SWIOTLB
+ dev->dma_coherent = coherent;
+#endif
/*
* Don't override the dma_ops if they have already been set. Ideally
@@ -2363,3 +2380,47 @@ void arch_teardown_dma_ops(struct device *dev)
/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
set_dma_ops(dev, NULL);
}
+
+#ifdef CONFIG_SWIOTLB
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+ size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+ size, dir);
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ return dma_to_pfn(dev, dma_addr);
+}
+
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ if (!dev_is_dma_coherent(dev))
+ return __get_dma_pgprot(attrs, prot);
+ return prot;
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ return __dma_alloc(dev, size, dma_handle, gfp,
+ __get_dma_pgprot(attrs, PAGE_KERNEL), false,
+ attrs, __builtin_return_address(0));
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
+}
+#endif /* CONFIG_SWIOTLB */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 4920a206dce9..16d373d587c4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -21,6 +21,7 @@
#include <linux/dma-contiguous.h>
#include <linux/sizes.h>
#include <linux/stop_machine.h>
+#include <linux/swiotlb.h>
#include <asm/cp15.h>
#include <asm/mach-types.h>
@@ -463,6 +464,10 @@ static void __init free_highpages(void)
*/
void __init mem_init(void)
{
+#ifdef CONFIG_ARM_LPAE
+ swiotlb_init(1);
+#endif
+
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
/* this will put all unused low memory onto the freelists */
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index bb1f1dbb34e8..61de992bbea3 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -52,7 +52,7 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
ifeq ($(CONFIG_CC_IS_CLANG), y)
$(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built)
- else ifeq ($(CROSS_COMPILE_COMPAT),)
+ else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),)
$(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built)
else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),)
$(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT)
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 79155a8cfe7c..89e4c8b79349 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void)
BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
GIC_PRIO_PSR_I_SET));
BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
+ /*
+ * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
+ * and non-secure PMR accesses are not subject to the shifts that
+ * are applied to IRQ priorities
+ */
+ BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
gic_write_pmr(GIC_PRIO_IRQOFF);
}
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 407e2bf23676..c96ffa4722d3 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -35,9 +35,10 @@
*/
enum ftr_type {
- FTR_EXACT, /* Use a predefined safe value */
- FTR_LOWER_SAFE, /* Smaller value is safe */
- FTR_HIGHER_SAFE,/* Bigger value is safe */
+ FTR_EXACT, /* Use a predefined safe value */
+ FTR_LOWER_SAFE, /* Smaller value is safe */
+ FTR_HIGHER_SAFE, /* Bigger value is safe */
+ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
};
#define FTR_STRICT true /* SANITY check strict matching required */
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 987926ed535e..063c964af705 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -13,6 +13,8 @@
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
+#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 8e79ce9c3f5c..76a144702586 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
#define alloc_screen_info(x...) &screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+ struct screen_info *si)
+{
+}
/* redeclare as 'hidden' so the compiler will generate relative references */
extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index b7ba75809751..fb04f10a78ab 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -210,7 +210,11 @@ extern u64 vabits_user;
#define __tag_reset(addr) untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
-#define __tag_set(addr, tag) (addr)
+static inline const void *__tag_set(const void *addr, u8 tag)
+{
+ return addr;
+}
+
#define __tag_reset(addr) (addr)
#define __tag_get(addr) 0
#endif
@@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x)
#define page_to_virt(page) ({ \
unsigned long __addr = \
((__page_to_voff(page)) | PAGE_OFFSET); \
- unsigned long __addr_tag = \
- __tag_set(__addr, page_kasan_tag(page)); \
+ const void *__addr_tag = \
+ __tag_set((void *)__addr, page_kasan_tag(page)); \
((void *)__addr_tag); \
})
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 3f5461f7b560..5fdcfe237338 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -447,8 +447,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_SECT)
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud) (0)
-#define pud_table(pud) (1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
#else
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_SECT)
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index b1dd039023ef..1dcf63a9ac1f 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -30,7 +30,7 @@
* in the the priority mask, it indicates that PSR.I should be set and
* interrupt disabling temporarily does not rely on IRQ priorities.
*/
-#define GIC_PRIO_IRQON 0xc0
+#define GIC_PRIO_IRQON 0xe0
#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
#define GIC_PRIO_PSR_I_SET (1 << 4)
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index f4812777f5c5..c50ee1b7d5cd 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -16,6 +16,8 @@
#define VDSO_HAS_CLOCK_GETRES 1
+#define VDSO_HAS_32BIT_FALLBACK 1
+
static __always_inline
int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
struct timezone *_tz)
@@ -52,6 +54,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
}
static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_compat_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
register struct __kernel_timespec *ts asm("r1") = _ts;
@@ -72,6 +91,27 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return ret;
}
+static __always_inline
+int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_compat_clock_getres;
+
+ /* The checks below are required for ABI consistency with arm */
+ if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
+ return -EINVAL;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
{
u64 res;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f29f36a65175..d19d14ba9ae4 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
@@ -468,6 +468,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_LOWER_SAFE:
ret = new < cur ? new : cur;
break;
+ case FTR_HIGHER_OR_ZERO_SAFE:
+ if (!cur || !new)
+ break;
+ /* Fallthrough */
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f8719bd30850..48222a4760c2 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -207,16 +207,16 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
- rcu_read_lock();
-
+ /*
+ * Since single-step exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
list_for_each_entry_rcu(hook, list, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
}
- rcu_read_unlock();
-
return retval;
}
NOKPROBE_SYMBOL(call_step_hook);
@@ -305,14 +305,16 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
- rcu_read_lock();
+ /*
+ * Since brk exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
list_for_each_entry_rcu(hook, list, node) {
unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn;
}
- rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index dceb84520948..38ee1514cd9c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -536,13 +536,18 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+
+ /* Fallthrough */
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
+
+ /* Fallthrough */
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 46e643e30708..03ff15bffbb6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -314,18 +314,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ);
@@ -393,6 +396,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bd5dfffca272..c4452827419b 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -21,6 +21,7 @@
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
#include <asm/system_misc.h>
#include <asm/insn.h>
#include <linux/uaccess.h>
@@ -168,33 +169,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
}
/*
- * When PSTATE.D is set (masked), then software step exceptions can not be
- * generated.
- * SPSR's D bit shows the value of PSTATE.D immediately before the
- * exception was taken. PSTATE.D is set while entering into any exception
- * mode, however software clears it for any normal (none-debug-exception)
- * mode in the exception entry. Therefore, when we are entering into kprobe
- * breakpoint handler from any normal mode then SPSR.D bit is already
- * cleared, however it is set when we are entering from any debug exception
- * mode.
- * Since we always need to generate single step exception after a kprobe
- * breakpoint exception therefore we need to clear it unconditionally, when
- * we become sure that the current breakpoint exception is for kprobe.
- */
-static void __kprobes
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
-{
- unsigned long spsr = regs->pstate;
-
- if (mask)
- spsr |= PSR_D_BIT;
- else
- spsr &= ~PSR_D_BIT;
-
- regs->pstate = spsr;
-}
-
-/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
@@ -205,17 +179,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask)
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- kcb->saved_irqflag = regs->pstate;
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
regs->pstate |= PSR_I_BIT;
+ /* Unmask PSTATE.D for enabling software step exceptions. */
+ regs->pstate &= ~PSR_D_BIT;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- if (kcb->saved_irqflag & PSR_I_BIT)
- regs->pstate |= PSR_I_BIT;
- else
- regs->pstate &= ~PSR_I_BIT;
+ regs->pstate &= ~DAIF_MASK;
+ regs->pstate |= kcb->saved_irqflag;
}
static void __kprobes
@@ -252,8 +226,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
set_ss_context(kcb, slot); /* mark pending ss */
- spsr_set_debug_flag(regs, 0);
-
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
kernel_enable_single_step(regs);
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index c4ae647d2306..a5e8b3b9d798 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
return 0;
}
}
+NOKPROBE_SYMBOL(save_return_addr);
void *return_address(unsigned int level)
{
@@ -49,3 +51,4 @@ void *return_address(unsigned int level)
return NULL;
}
EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ea90d3bd9253..018a33e01b0e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -152,8 +152,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_crit("CPU%u: died during early boot\n", cpu);
break;
}
- /* Fall through */
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+ /* Fall through */
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
if (status & CPU_STUCK_REASON_52_BIT_VA)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 2b160ae594eb..a336cb124320 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
@@ -111,6 +112,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
return 0;
}
+NOKPROBE_SYMBOL(unwind_frame);
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
@@ -125,6 +127,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
break;
}
}
+NOKPROBE_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9568c116ac7f..cfd65b63f36f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -777,6 +777,53 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}
+/*
+ * In debug exception context, we explicitly disable preemption despite
+ * having interrupts disabled.
+ * This serves two purposes: it makes it much less likely that we would
+ * accidentally schedule in exception context and it will force a warning
+ * if we somehow manage to schedule by accident.
+ */
+static void debug_exception_enter(struct pt_regs *regs)
+{
+ /*
+ * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
+ * already disabled to preserve the last enabled/disabled addresses.
+ */
+ if (interrupts_enabled(regs))
+ trace_hardirqs_off();
+
+ if (user_mode(regs)) {
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+ } else {
+ /*
+ * We might have interrupted pretty much anything. In
+ * fact, if we're a debug exception, we can even interrupt
+ * NMI processing. We don't want this code makes in_nmi()
+ * to return true, but we need to notify RCU.
+ */
+ rcu_nmi_enter();
+ }
+
+ preempt_disable();
+
+ /* This code is a bit fragile. Test it. */
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
+}
+NOKPROBE_SYMBOL(debug_exception_enter);
+
+static void debug_exception_exit(struct pt_regs *regs)
+{
+ preempt_enable_no_resched();
+
+ if (!user_mode(regs))
+ rcu_nmi_exit();
+
+ if (interrupts_enabled(regs))
+ trace_hardirqs_on();
+}
+NOKPROBE_SYMBOL(debug_exception_exit);
+
#ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -817,12 +864,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
if (cortex_a76_erratum_1463225_debug_handler(regs))
return;
- /*
- * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
- * already disabled to preserve the last enabled/disabled addresses.
- */
- if (interrupts_enabled(regs))
- trace_hardirqs_off();
+ debug_exception_enter(regs);
if (user_mode(regs) && !is_ttbr0_addr(pc))
arm64_apply_bp_hardening();
@@ -832,7 +874,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
inf->sig, inf->code, (void __user *)pc, esr);
}
- if (interrupts_enabled(regs))
- trace_hardirqs_on();
+ debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);
diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h
index 14b1931be69c..b65b169778e3 100644
--- a/arch/mips/vdso/vdso.h
+++ b/arch/mips/vdso/vdso.h
@@ -9,6 +9,7 @@
#if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT)
/* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */
+#define BUILD_VDSO32_64
#undef CONFIG_64BIT
#define CONFIG_32BIT 1
#ifndef __ASSEMBLY__
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 8acb8fa1f8d6..3b77d729057f 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -19,8 +19,6 @@
KBUILD_IMAGE := vmlinuz
-KBUILD_DEFCONFIG := default_defconfig
-
NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
@@ -182,5 +180,8 @@ define archhelp
@echo ' zinstall - Install compressed vmlinuz kernel'
endef
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
archheaders:
$(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index 2da8624e5cf6..1e5879c6a752 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -12,6 +12,7 @@ UBSAN_SANITIZE := n
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
+targets += real2.S firmware.c
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -55,7 +56,8 @@ $(obj)/misc.o: $(obj)/sizes.h
CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
$(obj)/vmlinux.lds: $(obj)/sizes.h
-$(obj)/vmlinux.bin: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S
+$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
vmlinux.bin.all-y := $(obj)/vmlinux.bin
diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S
index bfd7872739a3..2ac3a643f2eb 100644
--- a/arch/parisc/boot/compressed/vmlinux.lds.S
+++ b/arch/parisc/boot/compressed/vmlinux.lds.S
@@ -48,8 +48,8 @@ SECTIONS
*(.rodata.compressed)
}
- /* bootloader code and data starts behind area of extracted kernel */
- . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
+ /* bootloader code and data starts at least behind area of extracted kernel */
+ . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
/* align on next page boundary */
. = ALIGN(4096);
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/defconfig
index 5b877ca34ebf..5b877ca34ebf 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/defconfig
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index d784ccdd8fef..b6fb30f2e4bf 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -181,8 +181,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
for (i = 0; i < ARRAY_SIZE(insn); i++)
insn[i] = INSN_NOP;
+ __patch_text((void *)rec->ip, INSN_NOP);
__patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
- insn, sizeof(insn));
+ insn, sizeof(insn)-4);
return 0;
}
#endif
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
index b6c4b254901a..55c1396580a4 100644
--- a/arch/parisc/math-emu/Makefile
+++ b/arch/parisc/math-emu/Makefile
@@ -18,3 +18,4 @@ obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
# other very old or stripped-down PA-RISC CPUs -- not currently supported
obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o
+CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough=3
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 6dd4669ce7a5..adbd5e2144a3 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -66,6 +66,7 @@ parisc_acctyp(unsigned long code, unsigned int inst)
case 0x30000000: /* coproc2 */
if (bit22set(inst))
return VM_WRITE;
+ /* fall through */
case 0x0: /* indexed/memory management */
if (bit22set(inst)) {
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index 9bf63f0ab253..42b5ec223100 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -21,7 +21,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- timebase-frequency = <1000000>;
cpu0: cpu@0 {
compatible = "sifive,e51", "sifive,rocket0", "riscv";
device_type = "cpu";
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index b7b749b18853..93205c0bf71d 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
@@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
@@ -66,8 +69,9 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
CONFIG_VIRTIO_MMIO=y
-CONFIG_SPI_SIFIVE=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
@@ -83,8 +87,4 @@ CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
-CONFIG_SPI=y
-CONFIG_MMC_SPI=y
-CONFIG_MMC=y
-CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index f1d6ffe43e42..49a5852fd07d 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -37,7 +37,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
# these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- -Wl,--hash-style=both
+ -Wl,--build-id -Wl,--hash-style=both
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold)
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 082905d97309..1c3b2b257637 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -8,6 +8,7 @@ void store_ipl_parmblock(void);
void setup_boot_command_line(void);
void parse_boot_command_line(void);
void setup_memory_end(void);
+void verify_facilities(void);
void print_missing_facilities(void);
unsigned long get_random_base(unsigned long safe_addr);
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 3bdd8132e56b..c34a6387ce38 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -7,6 +7,7 @@
#include <asm/timex.h>
#include <asm/sclp.h>
#include "compressed/decompressor.h"
+#include "boot.h"
#define PRNG_MODE_TDES 1
#define PRNG_MODE_SHA512 2
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index e26d4413d34c..74e78ec5beb6 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -3,6 +3,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -18,55 +19,71 @@ CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_TUNE_ZEC12=y
+CONFIG_NR_CPUS=512
+CONFIG_NUMA=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
+CONFIG_CHSC_SCH=y
+CONFIG_VFIO_CCW=m
+CONFIG_VFIO_AP=m
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_REFCOUNT_FULL=y
+CONFIG_LOCK_EVENT_COUNTS=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
-CONFIG_BLK_WBT_SQ=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
CONFIG_MINIX_SUBPARTITION=y
CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=512
-CONFIG_NUMA=y
-CONFIG_PREEMPT=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_VERIFY_SIG=y
-CONFIG_EXPOLINE=y
-CONFIG_EXPOLINE_AUTO=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -82,17 +99,8 @@ CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_PCI=y
-CONFIG_PCI_DEBUG=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_S390=y
-CONFIG_CHSC_SCH=y
-CONFIG_VFIO_AP=m
-CONFIG_VFIO_CCW=m
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
+CONFIG_PERCPU_STATS=y
+CONFIG_GUP_BENCHMARK=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -121,9 +129,6 @@ CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
@@ -139,10 +144,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_SIT=m
CONFIG_IPV6_GRE=m
@@ -264,11 +265,8 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=y
-CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=y
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -287,10 +285,7 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=y
-CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -309,7 +304,7 @@ CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
@@ -375,9 +370,11 @@ CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
@@ -395,7 +392,6 @@ CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
@@ -415,17 +411,19 @@ CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
+CONFIG_MD_CLUSTER=m
+CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=m
+CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -445,23 +443,78 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+# CONFIG_MLXFW is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
@@ -473,10 +526,13 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_NULL_TTY=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+CONFIG_PPS=m
+# CONFIG_PTP_1588_CLOCK is not set
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
@@ -498,8 +554,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
-CONFIG_S390_AP_IOMMU=y
CONFIG_S390_CCW_IOMMU=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -519,6 +575,7 @@ CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
+CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
@@ -552,8 +609,10 @@ CONFIG_ECRYPT_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V3_ACL=y
@@ -564,7 +623,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
@@ -580,19 +638,112 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_DLM=m
+CONFIG_UNICODE=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
+CONFIG_IMA=y
+CONFIG_IMA_DEFAULT_HASH_SHA256=y
+CONFIG_IMA_WRITE_POLICY=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
+CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_STATS=y
+CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
+CONFIG_CORDIC=m
+CONFIG_CRC32_SELFTEST=y
+CONFIG_CRC4=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_RANDOM32_SELFTEST=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_DMA_API_DEBUG=y
+CONFIG_STRING_SELFTEST=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
CONFIG_FRAME_WARN=1024
-CONFIG_READABLE_ASM=y
CONFIG_UNUSED_SYMBOLS=y
CONFIG_HEADERS_INSTALL=y
CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_SELFTEST=y
@@ -645,7 +796,6 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_DMA_API_DEBUG=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_SORT=y
@@ -657,85 +807,3 @@ CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
-CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
-CONFIG_ENCRYPTED_KEYS=m
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_FORTIFY_SOURCE=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_INTEGRITY_SIGNATURE=y
-CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
-CONFIG_IMA=y
-CONFIG_IMA_DEFAULT_HASH_SHA256=y
-CONFIG_IMA_WRITE_POLICY=y
-CONFIG_IMA_APPRAISE=y
-CONFIG_CRYPTO_DH=m
-CONFIG_CRYPTO_ECDH=m
-CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
-CONFIG_CRYPTO_PCRYPT=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
-CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES_TI=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_842=m
-CONFIG_CRYPTO_LZ4=m
-CONFIG_CRYPTO_LZ4HC=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_USER_API_SKCIPHER=m
-CONFIG_CRYPTO_USER_API_RNG=m
-CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_ZCRYPT=m
-CONFIG_PKEY=m
-CONFIG_CRYPTO_PAES_S390=m
-CONFIG_CRYPTO_SHA1_S390=m
-CONFIG_CRYPTO_SHA256_S390=m
-CONFIG_CRYPTO_SHA512_S390=m
-CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_PKCS7_MESSAGE_PARSER=y
-CONFIG_SYSTEM_TRUSTED_KEYRING=y
-CONFIG_CRC7=m
-CONFIG_CRC8=m
-CONFIG_RANDOM32_SELFTEST=y
-CONFIG_CORDIC=m
-CONFIG_CMM=m
-CONFIG_APPLDATA_BASE=y
-CONFIG_KVM=m
-CONFIG_KVM_S390_UCONTROL=y
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index e4bc40073003..68d3ca83302b 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -12,30 +12,51 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
-# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_TUNE_ZEC12=y
+CONFIG_NR_CPUS=512
+CONFIG_NUMA=y
+# CONFIG_NUMA_EMU is not set
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
+CONFIG_CHSC_SCH=y
+CONFIG_VFIO_CCW=m
+CONFIG_VFIO_AP=m
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
@@ -47,27 +68,18 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_SHA256=y
-CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
-CONFIG_BLK_WBT_SQ=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
CONFIG_MINIX_SUBPARTITION=y
CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=512
-CONFIG_NUMA=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_VERIFY_SIG=y
-CONFIG_EXPOLINE=y
-CONFIG_EXPOLINE_AUTO=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -81,16 +93,8 @@ CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_S390=y
-CONFIG_CHSC_SCH=y
-CONFIG_VFIO_AP=m
-CONFIG_VFIO_CCW=m
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
+CONFIG_PERCPU_STATS=y
+CONFIG_GUP_BENCHMARK=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -119,9 +123,6 @@ CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
@@ -137,10 +138,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_SIT=m
CONFIG_IPV6_GRE=m
@@ -262,11 +259,8 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=y
-CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=y
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -285,10 +279,7 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=y
-CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -307,7 +298,7 @@ CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
@@ -372,9 +363,11 @@ CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
@@ -383,6 +376,7 @@ CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XPRAM is not set
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=m
@@ -392,7 +386,6 @@ CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
@@ -412,17 +405,19 @@ CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
+CONFIG_MD_CLUSTER=m
+CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=m
+CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -435,6 +430,7 @@ CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_DM_VERITY=m
CONFIG_DM_SWITCH=m
+CONFIG_DM_INTEGRITY=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
@@ -442,23 +438,78 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+# CONFIG_MLXFW is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
@@ -470,17 +521,21 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_NULL_TTY=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_PTP_1588_CLOCK is not set
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
CONFIG_DRM=y
CONFIG_DRM_VIRTIO_GPU=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
@@ -495,8 +550,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
-CONFIG_S390_AP_IOMMU=y
CONFIG_S390_CCW_IOMMU=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -546,8 +601,10 @@ CONFIG_ECRYPT_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V3_ACL=y
@@ -558,7 +615,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
@@ -574,31 +630,7 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_DLM=m
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_GDB_SCRIPTS=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_RCU_TORTURE_TEST=m
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_LATENCYTOP=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-CONFIG_HIST_TRIGGERS=y
-CONFIG_LKDTM=m
-CONFIG_PERCPU_TEST=m
-CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_TEST_BPF=m
-CONFIG_BUG_ON_DATA_CORRUPTION=y
-CONFIG_S390_PTDUMP=y
+CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
@@ -606,7 +638,6 @@ CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
@@ -615,31 +646,42 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_CRYPTO_FIPS=y
-CONFIG_CRYPTO_DH=m
-CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
+CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_XXHASH=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
@@ -649,16 +691,19 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_STATS=y
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
@@ -669,12 +714,34 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
+CONFIG_CORDIC=m
+CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
-CONFIG_CORDIC=m
-CONFIG_CMM=m
-CONFIG_APPLDATA_BASE=y
-CONFIG_KVM=m
-CONFIG_KVM_S390_UCONTROL=y
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
+CONFIG_LKDTM=m
+CONFIG_PERCPU_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_S390_PTDUMP=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index d92bab844b73..be09a208b608 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,27 +1,33 @@
# CONFIG_SWAP is not set
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_CPU_ISOLATION is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_COMPAT_BRK is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_IBM_PARTITION=y
-CONFIG_DEFAULT_DEADLINE=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
-# CONFIG_HOTPLUG_CPU is not set
CONFIG_HZ_100=y
# CONFIG_ARCH_RANDOM is not set
-# CONFIG_COMPACTION is not set
-# CONFIG_MIGRATION is not set
-# CONFIG_BOUNCE is not set
-# CONFIG_CHECK_STACK is not set
+# CONFIG_RELOCATABLE is not set
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
CONFIG_CRASH_DUMP=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SECCOMP is not set
+# CONFIG_PFAULT is not set
+# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_S390_GUEST is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+# CONFIG_BOUNCE is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
CONFIG_DEVTMPFS=y
@@ -43,7 +49,6 @@ CONFIG_ZFCP=y
# CONFIG_HVC_IUCV is not set
# CONFIG_HW_RANDOM_S390 is not set
CONFIG_RAW_DRIVER=y
-# CONFIG_SCLP_ASYNC is not set
# CONFIG_HMC_DRV is not set
# CONFIG_S390_TAPE is not set
# CONFIG_VMCP is not set
@@ -56,6 +61,7 @@ CONFIG_RAW_DRIVER=y
CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_DIMLIB is not set
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
@@ -64,7 +70,4 @@ CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
-# CONFIG_PFAULT is not set
-# CONFIG_S390_HYPFS_FS is not set
-# CONFIG_VIRTUALIZATION is not set
-# CONFIG_S390_GUEST is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index db5ef22c46e4..f647d565bd6d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -28,7 +28,7 @@
* @sliba: storage list information block address
* @sla: storage list address
* @slsba: storage list state block address
- * @akey: access key for DLIB
+ * @akey: access key for SLIB
* @bkey: access key for SL
* @ckey: access key for SBALs
* @dkey: access key for SLSB
@@ -50,11 +50,10 @@ struct qdesfmt0 {
/**
* struct qdr - queue description record (QDR)
* @qfmt: queue format
- * @pfmt: implementation dependent parameter format
* @ac: adapter characteristics
* @iqdcnt: input queue descriptor count
* @oqdcnt: output queue descriptor count
- * @iqdsz: inpout queue descriptor size
+ * @iqdsz: input queue descriptor size
* @oqdsz: output queue descriptor size
* @qiba: queue information block address
* @qkey: queue information block key
@@ -62,8 +61,7 @@ struct qdesfmt0 {
*/
struct qdr {
u32 qfmt : 8;
- u32 pfmt : 8;
- u32 : 8;
+ u32 : 16;
u32 ac : 8;
u32 : 8;
u32 iqdcnt : 8;
@@ -327,6 +325,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* struct qdio_initialize - qdio initialization data
* @cdev: associated ccw device
* @q_format: queue format
+ * @qdr_ac: feature flags to set
* @adapter_name: name for the adapter
* @qib_param_field_format: format for qib_parm_field
* @qib_param_field: pointer to 128 bytes or NULL, if no param field
@@ -338,6 +337,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
* @queue_start_poll_array: polling handlers (one per input queue or NULL)
+ * @scan_threshold: # of in-use buffers that triggers scan on output queue
* @int_parm: interruption parameter
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
* @output_sbal_addr_array: address of no_output_qs * 128 pointers
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index c5cfff7b1f91..70bd65724ec4 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -84,6 +84,7 @@ extern int noexec_disabled;
extern int memory_end_set;
extern unsigned long memory_end;
extern unsigned long max_physmem_end;
+extern unsigned long __swsusp_reset_dma;
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c
index 1dded39239f8..3b664cb3ec4d 100644
--- a/arch/s390/kernel/machine_kexec_reloc.c
+++ b/arch/s390/kernel/machine_kexec_reloc.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/elf.h>
+#include <asm/kexec.h>
int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
unsigned long addr)
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index d4e031f7b9c8..5f1fd1581330 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -34,7 +34,7 @@ struct cf_diag_csd { /* Counter set data per CPU */
unsigned char start[PAGE_SIZE]; /* Counter set at event start */
unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
};
-DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
+static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
/* Counter sets are stored as data stream in a page sized memory buffer and
* exported to user space via raw data attached to the event sample data.
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index 96580590ccaf..29d9470dbceb 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/export.h>
#include <linux/raid/xor.h>
+#include <asm/xor.h>
static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 63507662828f..7b0bb475c166 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -327,6 +327,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
case VM_FAULT_BADACCESS:
if (access == VM_EXEC && signal_return(regs) == 0)
break;
+ /* fallthrough */
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (user_mode(regs)) {
@@ -336,7 +337,9 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
do_sigsegv(regs, si_code);
break;
}
+ /* fallthrough */
case VM_FAULT_BADCONTEXT:
+ /* fallthrough */
case VM_FAULT_PFAULT:
do_no_context(regs);
break;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 1e668b95e0c6..39c3a6e3d262 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2424,8 +2424,8 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
* This function is assumed to be called with the guest_table_lock
* held.
*/
-bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
- unsigned long gaddr)
+static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
+ unsigned long gaddr)
{
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
return false;
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index ae91429129a6..ba71a63cdac4 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -96,6 +96,8 @@ long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
#else
+#define VDSO_HAS_32BIT_FALLBACK 1
+
static __always_inline
long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
@@ -114,6 +116,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
}
static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ long ret;
+
+ asm (
+ "mov %%ebx, %%edx \n"
+ "mov %[clock], %%ebx \n"
+ "call __kernel_vsyscall \n"
+ "mov %%edx, %%ebx \n"
+ : "=a" (ret), "=m" (*_ts)
+ : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts)
+ : "edx");
+
+ return ret;
+}
+
+static __always_inline
long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
struct timezone *_tz)
{
@@ -148,6 +167,23 @@ clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return ret;
}
+static __always_inline
+long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ long ret;
+
+ asm (
+ "mov %%ebx, %%edx \n"
+ "mov %[clock], %%ebx \n"
+ "call __kernel_vsyscall \n"
+ "mov %%edx, %%ebx \n"
+ : "=a" (ret), "=m" (*_ts)
+ : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts)
+ : "edx");
+
+ return ret;
+}
+
#endif
#ifdef CONFIG_PARAVIRT_CLOCK
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 28cffaaf9d82..f616b16c1f0b 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -232,13 +232,15 @@ int acpi_device_set_power(struct acpi_device *device, int state)
if (device->power.flags.power_resources)
result = acpi_power_transition(device, target_state);
} else {
+ int cur_state = device->power.state;
+
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, ACPI_STATE_D0);
if (result)
goto end;
}
- if (device->power.state == ACPI_STATE_D0) {
+ if (cur_state == ACPI_STATE_D0) {
int psc;
/* Nothing to do here if _PSC is not present. */
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0e28270b0fd8..aad6be5c0af0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void)
acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
+ /*
+ * Although we call __add_memory() that is documented to require the
+ * device_hotplug_lock, it is not necessary here because this is an
+ * early code when userspace or any other code path cannot trigger
+ * hotplug/hotunplug operations.
+ */
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 3a36e76eca83..9e9583a6bba9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
hpriv->phys[port] = NULL;
rc = 0;
break;
+ case -EPROBE_DEFER:
+ /* Do not complain yet */
+ break;
default:
dev_err(dev,
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 173e6f2dd9af..eefda51f97d3 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc;
struct ata_taskfile tf;
- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 85f20e371f2f..bd7d3bb8b890 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
+ /* Fall through */
case FDFMTEND:
case FDFLUSH:
/* invalidate the buffer track to force a reread */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 44c9985f352a..3036883fc9f8 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct file *file;
struct inode *inode;
struct address_space *mapping;
+ struct block_device *claimed_bdev = NULL;
int lo_flags = 0;
int error;
loff_t size;
@@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
* here to avoid changing device under exclusive owner.
*/
if (!(mode & FMODE_EXCL)) {
- bdgrab(bdev);
- error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
- if (error)
+ claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
+ if (IS_ERR(claimed_bdev)) {
+ error = PTR_ERR(claimed_bdev);
goto out_putf;
+ }
}
error = mutex_lock_killable(&loop_ctl_mutex);
@@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mutex_unlock(&loop_ctl_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
- if (!(mode & FMODE_EXCL))
- blkdev_put(bdev, mode | FMODE_EXCL);
+ if (claimed_bdev)
+ bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
return 0;
out_unlock:
mutex_unlock(&loop_ctl_mutex);
out_bdev:
- if (!(mode & FMODE_EXCL))
- blkdev_put(bdev, mode | FMODE_EXCL);
+ if (claimed_bdev)
+ bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
out_putf:
fput(file);
out:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9bcde2325893..e21d2ded732b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1231,7 +1231,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
sock_shutdown(nbd);
- kill_bdev(bdev);
+ __invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index a55be205b91a..dbfe34664633 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
ath = kzalloc(sizeof(*ath), GFP_KERNEL);
if (!ath)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8905ad2edde7..ae2624fce913 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
if (!bcm)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 207bae5e0d46..31f25153087d 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
intel = kzalloc(sizeof(*intel), GFP_KERNEL);
if (!intel)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 8950e07889fe..85a30fb9177b 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+ /* serdev nodes check if the needed operations are present */
+ if (hu->serdev)
+ return true;
+
+ if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+ return true;
+
+ return false;
+}
+
/* Flow control or un-flow control the device */
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
{
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index f98e5cc343b2..fbc3f7c3a5c7 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
if (!mrvl)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 9a5c9c1f9484..82a0a3691a63 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu)
BT_DBG("hu %p qca_open", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
if (!qca)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index f11af3912ce6..6ab631101019 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
void hci_uart_init_work(struct work_struct *work);
void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 57204335c5f5..285e0b8f9a97 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
struct ipmb_request_elem *queue_elem;
struct ipmb_msg msg;
- ssize_t ret;
+ ssize_t ret = 0;
memset(&msg, 0, sizeof(msg));
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 44db83a6d01c..44a46dcc0518 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
continue;
div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+ if (div > GENERATED_MAX_DIV + 1)
+ div = GENERATED_MAX_DIV + 1;
clk_generated_best_diff(req, parent, parent_rate, div,
&best_diff, &best_rate);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 1aa5f4059251..73b7e238eee7 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
};
+static const struct mtk_fixed_factor top_early_divs[] = {
+ FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
+};
+
static const struct mtk_fixed_factor top_divs[] = {
- FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
- 2),
FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
2),
FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
@@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt8183_top_init_early(struct device_node *node)
+{
+ int i;
+
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ for (i = 0; i < CLK_TOP_NR_CLK; i++)
+ top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+}
+
+CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
+ clk_mt8183_top_init_early);
+
static int clk_mt8183_top_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *base;
- struct clk_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
+ top_clk_data);
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
- node, &mt8183_clk_lock, clk_data);
+ node, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
- base, &mt8183_clk_lock, clk_data);
+ base, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
- base, &mt8183_clk_lock, clk_data);
+ base, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
+ top_clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
}
static int clk_mt8183_infra_probe(struct platform_device *pdev)
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 52bbb9ce3807..d4075b130674 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- unsigned long flags;
- u32 value;
dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
/* Reset module */
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SRCR(reg));
- value |= bitmask;
- writel(value, priv->base + SRCR(reg));
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ writel(bitmask, priv->base + SRCR(reg));
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
udelay(35);
@@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- unsigned long flags;
- u32 value;
dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SRCR(reg));
- value |= bitmask;
- writel(value, priv->base + SRCR(reg));
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ writel(bitmask, priv->base + SRCR(reg));
return 0;
}
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 91d3d721c801..3c219af25100 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK
tristate "Clock support for Spreadtrum SoCs"
depends on ARCH_SPRD || COMPILE_TEST
default ARCH_SPRD
+ select REGMAP_MMIO
if SPRD_COMMON_CLK
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3ee99d070608..f497003f119c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= IRQF_TRIGGER_RISING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= IRQF_TRIGGER_FALLING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
INIT_KFIFO(le->events);
@@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
- if (chip->get_direction && gpiochip_line_is_valid(chip, i))
- desc->flags = !chip->get_direction(chip, i) ?
- (1 << FLAG_IS_OUT) : 0;
- else
- desc->flags = !chip->direction_input ?
- (1 << FLAG_IS_OUT) : 0;
+ if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
+ if (!chip->get_direction(chip, i))
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ } else {
+ if (!chip->direction_input)
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ }
}
acpi_gpiochip_add(chip);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1d80222587ad..3c88420e3497 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -394,7 +394,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
# !PREEMPT because of missing ioctl locking
- depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1d3ee9c42f7e..6a5c96e519b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
adev->asic_type != CHIP_FIJI &&
adev->asic_type != CHIP_POLARIS10 &&
adev->asic_type != CHIP_POLARIS11 &&
- adev->asic_type != CHIP_POLARIS12) ?
+ adev->asic_type != CHIP_POLARIS12 &&
+ adev->asic_type != CHIP_VEGAM) ?
VI_BO_SIZE_ALIGN : 1;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e069de8b54e6..4e4094f842e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
return r;
}
- fence = amdgpu_ctx_get_fence(ctx, entity,
- deps[i].handle);
+ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+ amdgpu_ctx_put(ctx);
+
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
- struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+ struct drm_sched_fence *s_fence;
struct dma_fence *old = fence;
+ s_fence = to_drm_sched_fence(fence);
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(old);
}
- if (IS_ERR(fence)) {
- r = PTR_ERR(fence);
- amdgpu_ctx_put(ctx);
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+ dma_fence_put(fence);
+ if (r)
return r;
- } else if (fence) {
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
- true);
- dma_fence_put(fence);
- amdgpu_ctx_put(ctx);
- if (r)
- return r;
- }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 6d54decef7f8..5652cc72ed3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 03ca8c69114f..2b546567853b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
- pm = amdgpu_smu_get_current_power_state(adev);
- else if (adev->powerplay.pp_funcs->get_current_power_state)
+ if (is_support_sw_smu(adev)) {
+ if (adev->smu.ppt_funcs->get_current_power_state)
+ pm = amdgpu_smu_get_current_power_state(adev);
+ else
+ pm = adev->pm.dpm.user_state;
+ } else if (adev->powerplay.pp_funcs->get_current_power_state) {
pm = amdgpu_dpm_get_current_power_state(adev);
- else
+ } else {
pm = adev->pm.dpm.user_state;
+ }
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
goto fail;
}
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.user_state = state;
+ mutex_unlock(&adev->pm.mutex);
+ } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
} else {
mutex_lock(&adev->pm.mutex);
@@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
- /* UVD clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "UVD: Disabled\n");
- } else {
- seq_printf(m, "UVD: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ if (adev->asic_type > CHIP_VEGA20) {
+ /* VCN clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "VCN: Disabled\n");
+ } else {
+ seq_printf(m, "VCN: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
}
- }
- seq_printf(m, "\n");
+ seq_printf(m, "\n");
+ } else {
+ /* UVD clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "UVD: Disabled\n");
+ } else {
+ seq_printf(m, "UVD: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
+ }
+ seq_printf(m, "\n");
- /* VCE clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "VCE: Disabled\n");
- } else {
- seq_printf(m, "VCE: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ /* VCE clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "VCE: Disabled\n");
+ } else {
+ seq_printf(m, "VCE: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 9f661bf96ed0..5b1ebb7f995a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -123,6 +123,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ AMDGPU_PP_SENSOR_VCN_POWER_STATE,
};
enum amd_pp_task {
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index c097113c3976..0685a3388e38 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -306,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu,
/* not support power state */
memset(state_info, 0, sizeof(struct pp_states_info));
- state_info->nums = 0;
+ state_info->nums = 1;
+ state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
return 0;
}
@@ -337,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
+ *size = 4;
+ break;
default:
ret = -EINVAL;
break;
@@ -723,6 +728,12 @@ static int smu_sw_init(void *handle)
return ret;
}
+ ret = smu_register_irq_handler(smu);
+ if (ret) {
+ pr_err("Failed to register smc irq handler!\n");
+ return ret;
+ }
+
return 0;
}
@@ -732,6 +743,9 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
+ kfree(smu->irq_source);
+ smu->irq_source = NULL;
+
ret = smu_smc_table_sw_fini(smu);
if (ret) {
pr_err("Failed to sw fini smc table!\n");
@@ -1088,10 +1102,6 @@ static int smu_hw_init(void *handle)
if (ret)
goto failed;
- ret = smu_register_irq_handler(smu);
- if (ret)
- goto failed;
-
if (!smu->pm_enabled)
adev->pm.dpm_enabled = false;
else
@@ -1121,9 +1131,6 @@ static int smu_hw_fini(void *handle)
kfree(table_context->overdrive_table);
table_context->overdrive_table = NULL;
- kfree(smu->irq_source);
- smu->irq_source = NULL;
-
ret = smu_fini_fb_allocations(smu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index e32ae9d3373c..18e780f566fa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
uint32_t sclk, mclk;
int ret = 0;
@@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
break;
+ case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+ *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1;
+ *size = 4;
+ break;
default:
ret = -EINVAL;
break;
@@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
if (bgate) {
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerDownVcn, 0);
+ smu10_data->vcn_power_gated = true;
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerUpVcn, 0);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
+ smu10_data->vcn_power_gated = false;
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 22e46a289a16..208e6711d506 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -429,7 +429,6 @@ struct smu_table_context
struct smu_table *tables;
uint32_t table_count;
struct smu_table memory_pool;
- uint16_t software_shutdown_temp;
uint8_t thermal_controller_type;
uint16_t TDPODLimit;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 4aaad255a288..cc0a3b2256af 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -23,6 +23,7 @@
#include "pp_debug.h"
#include <linux/firmware.h>
+#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
@@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
{
int ret = 0;
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
- if (enable && power_gate->uvd_gated) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
- if (ret)
- return ret;
- }
- power_gate->uvd_gated = false;
+ if (enable) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+ if (ret)
+ return ret;
} else {
- if (!enable && !power_gate->uvd_gated) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
- if (ret)
- return ret;
- }
- power_gate->uvd_gated = true;
- }
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
}
- return 0;
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable);
+
+ return ret;
}
static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
@@ -1573,7 +1566,7 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
uint32_t sclk_freq = 0, uclk_freq = 0;
uint32_t uclk_level = 0;
- switch (adev->rev_id) {
+ switch (adev->pdev->revision) {
case 0xf0: /* XTX */
case 0xc0:
sclk_freq = NAVI10_PEAK_SCLK_XTX;
@@ -1620,6 +1613,22 @@ static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_fo
return ret;
}
+static int navi10_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
+
+ if (!range || !powerplay_table)
+ return -EINVAL;
+
+ /* The unit is temperature */
+ range->min = 0;
+ range->max = powerplay_table->software_shutdown_temp;
+
+ return 0;
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1657,6 +1666,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_ppfeature_status = navi10_get_ppfeature_status,
.set_ppfeature_status = navi10_set_ppfeature_status,
.set_performance_level = navi10_set_performance_level,
+ .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index caca9091bfcc..ac5b26228e75 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
struct amdgpu_device *adev = smu->adev;
- int low = SMU_THERMAL_MINIMUM_ALERT_TEMP *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
if (!range)
@@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
if (high > range->max)
high = range->max;
+ low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
+ high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
+
if (low > high)
return -EINVAL;
@@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
@@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
if (!smu->pm_enabled)
return ret;
+
ret = smu_get_thermal_temperature_range(smu, &range);
+ if (ret)
+ return ret;
if (smu->smu_table.thermal_controller_type) {
ret = smu_v11_0_set_thermal_range(smu, &range);
@@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
- adev->pm.dpm.thermal.min_temp = range.min;
- adev->pm.dpm.thermal.max_temp = range.max;
- adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
- adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
- adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
- adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
- adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
- adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
- adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
+ adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index dc139a6feeb1..dd6fd1c8bf24 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
sizeof(PPTable_t));
- table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
@@ -3234,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu,
return 0;
}
-static const struct smu_temperature_range vega20_thermal_policy[] =
-{
- {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
- { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
-};
-
static int vega20_get_thermal_temperature_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
-
+ struct smu_table_context *table_context = &smu->smu_table;
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- if (!range)
+ if (!range || !powerplay_table)
return -EINVAL;
- memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range));
-
- range->max = pptable->TedgeLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->hotspot_crit_max = pptable->ThotspotLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->mem_crit_max = pptable->ThbmLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ /* The unit is temperature */
+ range->min = 0;
+ range->max = powerplay_table->usSoftwareShutdownTemp;
+ range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
+ range->hotspot_crit_max = pptable->ThotspotLimit;
+ range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
+ range->mem_crit_max = pptable->ThbmLimit;
+ range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
return 0;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index bc19dbd531ef..359030d5d818 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.fb_base = bochs->fb_base;
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
+ bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index ee777469293a..e4e22bbae2a7 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
config DRM_LVDS_ENCODER
tristate "Transparent parallel to LVDS encoder support"
depends on OF
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Support for transparent parallel to LVDS encoders that don't require
@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
config DRM_TOSHIBA_TC358764
tristate "TC358764 DSI/LVDS bridge"
- depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_PANEL
help
Toshiba TC358764 DSI/LVDS bridge driver.
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 410572f14257..e1dafb0cc5e2 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer;
struct drm_gem_object *obj;
- void *vaddr;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
buffer->gem = obj;
+ return buffer;
+
+err_delete:
+ drm_client_buffer_delete(buffer);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * drm_client_buffer_vmap - Map DRM client buffer into address space
+ * @buffer: DRM client buffer
+ *
+ * This function maps a client buffer into kernel address space. If the
+ * buffer is already mapped, it returns the mapping's address.
+ *
+ * Client buffer mappings are not ref'counted. Each call to
+ * drm_client_buffer_vmap() should be followed by a call to
+ * drm_client_buffer_vunmap(); or the client buffer should be mapped
+ * throughout its lifetime.
+ *
+ * Returns:
+ * The mapped memory's address
+ */
+void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->vaddr)
+ return buffer->vaddr;
+
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
- vaddr = drm_gem_vmap(obj);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_delete;
- }
+ vaddr = drm_gem_vmap(buffer->gem);
+ if (IS_ERR(vaddr))
+ return vaddr;
buffer->vaddr = vaddr;
- return buffer;
-
-err_delete:
- drm_client_buffer_delete(buffer);
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_client_buffer_vmap);
- return ERR_PTR(ret);
+/**
+ * drm_client_buffer_vunmap - Unmap DRM client buffer
+ * @buffer: DRM client buffer
+ *
+ * This function removes a client buffer's memory mapping. Calling this
+ * function is only required by clients that manage their buffer mappings
+ * by themselves.
+ */
+void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
+{
+ drm_gem_vunmap(buffer->gem, buffer->vaddr);
+ buffer->vaddr = NULL;
}
+EXPORT_SYMBOL(drm_client_buffer_vunmap);
static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
{
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1984e5c54d58..a7ba5b4902d6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
struct drm_clip_rect *clip = &helper->dirty_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
+ void *vaddr;
spin_lock_irqsave(&helper->dirty_lock, flags);
clip_copy = *clip;
@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
/* call dirty callback only when it has been really touched */
if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
+
/* Generic fbdev uses a shadow buffer */
- if (helper->buffer)
+ if (helper->buffer) {
+ vaddr = drm_client_buffer_vmap(helper->buffer);
+ if (IS_ERR(vaddr))
+ return;
drm_fb_helper_dirty_blit_real(helper, &clip_copy);
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ }
+ if (helper->fb->funcs->dirty)
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
+ &clip_copy, 1);
+
+ if (helper->buffer)
+ drm_client_buffer_vunmap(helper->buffer);
}
}
@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ return dev->mode_config.prefer_shadow_fbdev ||
+ dev->mode_config.prefer_shadow ||
+ fb->funcs->dirty;
+}
+
static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
u32 width, u32 height)
{
@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
struct drm_clip_rect *clip = &helper->dirty_clip;
unsigned long flags;
- if (!helper->fb->funcs->dirty)
+ if (!drm_fbdev_use_shadow_fb(helper))
return;
spin_lock_irqsave(&helper->dirty_lock, flags);
@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
+ void *vaddr;
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
- fbi->screen_buffer = buffer->vaddr;
- /* Shamelessly leak the physical address to user-space */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
- fbi->fix.smem_start =
- page_to_phys(virt_to_page(fbi->screen_buffer));
-#endif
+
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
- if (fb->funcs->dirty) {
+ if (drm_fbdev_use_shadow_fb(fb_helper)) {
struct fb_ops *fbops;
void *shadow;
@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbdefio = &drm_fbdev_defio;
fb_deferred_io_init(fbi);
+ } else {
+ /* buffer is mapped for HW framebuffer */
+ vaddr = drm_client_buffer_vmap(fb_helper->buffer);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ fbi->screen_buffer = vaddr;
+ /* Shamelessly leak the physical address to user-space */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+ if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
+ fbi->fix.smem_start =
+ page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
}
return 0;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 60ce4a8ad9e1..6f7d3b3b3628 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index a594ab7be2c0..164d914cbe9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc;
module_param_named(fimc_devs, fimc_mask, uint, 0644);
MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
-#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_fimc_context(dev) dev_get_drvdata(dev)
enum {
FIMC_CLK_LCLK,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 50904eee96f7..2a3382d43bc9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d)
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
- struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct g2d_cmdlist_node *node;
int nr;
int ret;
struct g2d_buf_info *buf_info;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1e4b21c49a06..1c524db9570f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -58,7 +58,7 @@
#define GSC_COEF_DEPTH 3
#define GSC_AUTOSUSPEND_DELAY 2000
-#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_gsc_context(dev) dev_get_drvdata(dev)
#define gsc_read(offset) readl(ctx->regs + (offset))
#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 9af096479e1c..b24ba948b725 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
do {
cpu_relax();
- } while (retry > 1 &&
+ } while (--retry > 1 &&
scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
do {
cpu_relax();
scaler_write(1, SCALER_INT_EN);
- } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+ } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
return retry ? 0 : -EIO;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index c4710889cb32..3ef4e9f573cf 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
}
if (bdb->version >= 226) {
- u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+ u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
switch (wakeup_time) {
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 753ac3165061..7b908e10d32e 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+ bi->num_qgv_points = qi.num_points;
+
for (j = 0; j < qi.num_points; j++) {
const struct intel_qgv_point *sp = &qi.points[j];
int ct, bw;
@@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
bi->deratedbw[j] = min(maxdebw,
bw * 9 / 10); /* 90% */
- DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
+ DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
{
int i;
- /* Did we initialize the bw limits successfully? */
- if (dev_priv->max_bw[0].num_planes == 0)
- return UINT_MAX;
-
for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
const struct intel_bw_info *bi =
&dev_priv->max_bw[i];
+ /*
+ * Pcode will not expose all QGV points when
+ * SAGV is forced to off/min/med/max.
+ */
+ if (qgv_point >= bi->num_qgv_points)
+ return UINT_MAX;
+
if (num_planes >= bi->num_planes)
return bi->deratedbw[qgv_point];
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 8993ab283562..0d19bbd08122 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2240,6 +2240,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(2 * 96000, min_cdclk);
/*
+ * "For DP audio configuration, cdclk frequency shall be set to
+ * meet the following requirements:
+ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
+ * 270 | 320 or higher
+ * 162 | 200 or higher"
+ */
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
+ min_cdclk = max(crtc_state->port_clock, min_cdclk);
+
+ /*
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 30b97ded6fdd..592b92782fab 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
- trace_intel_pipe_enable(dev_priv, pipe);
+ trace_intel_pipe_enable(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
@@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- trace_intel_pipe_disable(dev_priv, pipe);
+ trace_intel_pipe_disable(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c93ad512014c..2d1939db108f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+ int pw_idx = power_well->desc->hsw.idx;
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ enum aux_ch aux_ch;
u32 val;
+ aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
val = I915_READ(DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (power_well->desc->hsw.is_tc_tbt)
+ if (is_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 2f4894e9a03d..5ddbe71ab423 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -478,13 +478,13 @@ struct psr_table {
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
-
- /* PSR2 TP2/TP3 wakeup time for 16 panels */
- u32 psr2_tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
+
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 05011d4a3b88..914b5d4112bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
+ if (i915_gem_init_hw(i915))
+ goto err_wedged;
+
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- intel_gt_resume(i915);
-
- if (i915_gem_init_hw(i915))
+ if (intel_gt_resume(i915))
goto err_wedged;
intel_uc_resume(i915);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 528b61678334..2caa594322bc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
- set_page_dirty(page);
+ /*
+ * As this may not be anonymous memory (e.g. shmem)
+ * but exist on a real mapping, we have to lock
+ * the page in order to dirty it -- holding
+ * the page reference is not sufficient to
+ * prevent the inode from being truncated.
+ * Play safe and take the lock.
+ */
+ set_page_dirty_lock(page);
mark_page_accessed(page);
put_page(page);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 2c454f227c2e..23120901c55f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active)
if (ce->state)
__context_unpin_state(ce->state);
+ intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
@@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
intel_context_get(ce);
+ err = intel_ring_pin(ce->ring);
+ if (err)
+ goto err_put;
+
if (!ce->state)
return 0;
err = __context_pin_state(ce->state, flags);
- if (err) {
- i915_active_cancel(&ce->active);
- intel_context_put(ce);
- return err;
- }
+ if (err)
+ goto err_ring;
/* Preallocate tracking nodes */
if (!i915_gem_context_is_kernel(ce->gem_context)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
- if (err) {
- i915_active_release(&ce->active);
- return err;
- }
+ if (err)
+ goto err_state;
}
return 0;
+
+err_state:
+ __context_unpin_state(ce->state);
+err_ring:
+ intel_ring_unpin(ce->ring);
+err_put:
+ intel_context_put(ce);
+ i915_active_cancel(&ce->active);
+ return err;
}
void intel_context_active_release(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 7fd33e81c2d9..f25632c9b292 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ unsigned int slice = fls(sseu->slice_mask) - 1;
+ unsigned int subslice;
u32 mcr_s_ss_select;
- u32 slice = fls(sseu->slice_mask);
- u32 subslice = fls(sseu->subslice_mask[slice]);
+
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
+ subslice = fls(sseu->subslice_mask[slice]);
+ GEM_BUG_ON(!subslice);
+ subslice--;
if (IS_GEN(dev_priv, 10))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
@@ -1471,6 +1476,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
+ unsigned long flags;
if (header) {
va_list ap;
@@ -1490,10 +1496,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_engine_count(error, engine),
i915_reset_count(error));
- rcu_read_lock();
-
drm_printf(m, "\tRequests:\n");
+ spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@@ -1513,8 +1518,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request_ring(m, rq);
}
-
- rcu_read_unlock();
+ spin_unlock_irqrestore(&engine->active.lock, flags);
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
@@ -1672,7 +1676,6 @@ struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
- unsigned long flags;
/*
* We are called by the error capture, reset and to dump engine
@@ -1685,7 +1688,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
- spin_lock_irqsave(&engine->active.lock, flags);
+ lockdep_assert_held(&engine->active.lock);
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
@@ -1700,7 +1703,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
active = request;
break;
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
return active;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 2ce00d3dc42a..ae5b6baf6dff 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
{
intel_wakeref_init(&engine->wakeref);
}
-
-int intel_engines_resume(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id) {
- intel_engine_pm_get(engine);
- engine->serial++; /* kernel context lost */
- err = engine->resume(engine);
- intel_engine_pm_put(engine);
- if (err) {
- dev_err(i915->drm.dev,
- "Failed to restart %s (%d)\n",
- engine->name, err);
- break;
- }
- }
- intel_gt_pm_put(i915);
-
- return err;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index b326cd993d60..a11c893f64c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,16 +7,22 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
+#include "intel_engine_types.h"
+#include "intel_wakeref.h"
+
struct drm_i915_private;
-struct intel_engine_cs;
void intel_engine_pm_get(struct intel_engine_cs *engine);
void intel_engine_pm_put(struct intel_engine_cs *engine);
+static inline bool
+intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+{
+ return intel_wakeref_get_if_active(&engine->wakeref);
+}
+
void intel_engine_park(struct intel_engine_cs *engine);
void intel_engine_init__pm(struct intel_engine_cs *engine);
-int intel_engines_resume(struct drm_i915_private *i915);
-
#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 868b220214f8..43e975a26016 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -70,6 +70,18 @@ struct intel_ring {
struct list_head request_list;
struct list_head active_link;
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
u32 head;
u32 tail;
u32 emit;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 7b5967751762..9f8f7f54191f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,6 +5,7 @@
*/
#include "i915_drv.h"
+#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_pm.h"
#include "intel_wakeref.h"
@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
intel_engine_reset(engine, false);
}
-void intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int err = 0;
/*
* After resume, we may need to poke into the pinned kernel
@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
+ intel_gt_pm_get(i915);
for_each_engine(engine, i915, id) {
struct intel_context *ce;
+ intel_engine_pm_get(engine);
+
ce = engine->kernel_context;
if (ce)
ce->ops->reset(ce);
@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915)
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
+
+ engine->serial++; /* kernel context lost */
+ err = engine->resume(engine);
+
+ intel_engine_pm_put(engine);
+ if (err) {
+ dev_err(i915->drm.dev,
+ "Failed to restart %s (%d)\n",
+ engine->name, err);
+ break;
+ }
}
+ intel_gt_pm_put(i915);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 7dd1130a19a4..53f342b20181 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915);
void intel_gt_pm_init(struct drm_i915_private *i915);
void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+int intel_gt_resume(struct drm_i915_private *i915);
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b42b5f158295..82b7ace62d97 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+ GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
@@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce)
{
i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
- intel_ring_unpin(ce->ring);
}
static void
@@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce,
goto unpin_active;
}
- ret = intel_ring_pin(ce->ring);
- if (ret)
- goto unpin_map;
-
ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
- goto unpin_ring;
+ goto unpin_map;
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce,
return 0;
-unpin_ring:
- intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_active:
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 4c478b38e420..3f907701ef4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
- intel_engine_pm_get(engine);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
@@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
}
}
-static void reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
+ if (intel_engine_pm_get_if_awake(engine))
+ awake |= engine->mask;
reset_prepare_engine(engine);
+ }
intel_uc_reset_prepare(i915);
+
+ return awake;
}
static void gt_revoke(struct drm_i915_private *i915)
@@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915,
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
- intel_engine_pm_put(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_signal_breadcrumbs(engine);
}
-static void reset_finish(struct drm_i915_private *i915)
+static void reset_finish(struct drm_i915_private *i915,
+ intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
- intel_engine_signal_breadcrumbs(engine);
+ if (awake & engine->mask)
+ intel_engine_pm_put(engine);
}
- intel_gt_pm_put(i915);
}
static void nop_submit_request(struct i915_request *request)
@@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &error->flags))
@@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- reset_prepare(i915);
+ awake = reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
- reset_finish(i915);
+ reset_finish(i915, awake);
GEM_TRACE("end\n");
}
@@ -951,6 +958,21 @@ static int do_reset(struct drm_i915_private *i915,
return gt_reset(i915, stalled_mask);
}
+static int resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int ret;
+
+ for_each_engine(engine, i915, id) {
+ ret = engine->resume(engine);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* i915_reset - reset chip after a hang
* @i915: #drm_i915_private to reset
@@ -973,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
+ intel_engine_mask_t awake;
int ret;
GEM_TRACE("flags=%lx\n", error->flags);
@@ -989,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915,
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
error->reset_count++;
- reset_prepare(i915);
+ awake = reset_prepare(i915);
if (!intel_has_gpu_reset(i915)) {
if (i915_modparams.reset)
@@ -1024,13 +1047,17 @@ void i915_reset(struct drm_i915_private *i915,
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
- goto error;
+ goto taint;
}
+ ret = resume(i915);
+ if (ret)
+ goto taint;
+
i915_queue_hangcheck(i915);
finish:
- reset_finish(i915);
+ reset_finish(i915, awake);
unlock:
mutex_unlock(&error->wedge_mutex);
return;
@@ -1081,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
- if (!intel_wakeref_active(&engine->wakeref))
+ if (!intel_engine_pm_get_if_awake(engine))
return 0;
reset_prepare_engine(engine);
@@ -1116,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* process to program RING_MODE, HWSP and re-enable submission.
*/
ret = engine->resume(engine);
- if (ret)
- goto out;
out:
intel_engine_cancel_stop_cs(engine);
reset_finish_engine(engine);
+ intel_engine_pm_put(engine);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index c6023bc9452d..12010e798868 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq,
int intel_ring_pin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
- enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
unsigned int flags;
void *addr;
int ret;
- GEM_BUG_ON(ring->vaddr);
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
ret = i915_timeline_pin(ring->timeline);
if (ret)
- return ret;
+ goto err_unpin;
flags = PIN_GLOBAL;
@@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring)
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
- goto unpin_timeline;
+ goto err_timeline;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
else
- addr = i915_gem_object_pin_map(vma->obj, map);
+ addr = i915_gem_object_pin_map(vma->obj,
+ i915_coherent_map_type(vma->vm->i915));
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
- goto unpin_ring;
+ goto err_ring;
}
vma->obj->pin_global++;
+ GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
+
return 0;
-unpin_ring:
+err_ring:
i915_vma_unpin(vma);
-unpin_timeline:
+err_timeline:
i915_timeline_unpin(ring->timeline);
+err_unpin:
+ atomic_dec(&ring->pin_count);
return ret;
}
@@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
void intel_ring_unpin(struct intel_ring *ring)
{
- GEM_BUG_ON(!ring->vma);
- GEM_BUG_ON(!ring->vaddr);
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
+ GEM_BUG_ON(!ring->vma);
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
i915_gem_object_unpin_map(ring->vma->obj);
+
+ GEM_BUG_ON(!ring->vaddr);
ring->vaddr = NULL;
ring->vma->obj->pin_global--;
@@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ intel_engine_cleanup_common(engine);
+
intel_ring_unpin(engine->buffer);
intel_ring_put(engine->buffer);
- intel_engine_cleanup_common(engine);
kfree(engine);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 15e90fd2cfdc..98dfb086320f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine)
static void cfl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
if (engine->class != RENDER_CLASS)
return;
- gen9_whitelist_build(&engine->whitelist);
+ gen9_whitelist_build(w);
+
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
+ *
+ * This covers 4 register which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
}
static void cnl_whitelist_build(struct intel_engine_cs *engine)
@@ -1129,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* WaEnableStateCacheRedirectToCS:icl */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
+ *
+ * This covers 4 register which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
@@ -1258,8 +1286,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
GEN7_SARCHKMD,
- GEN7_DISABLE_DEMAND_PREFETCH |
- GEN7_DISABLE_SAMPLER_PREFETCH);
+ GEN7_DISABLE_DEMAND_PREFETCH);
+
+ /* Wa_1606682166:icl */
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_SAMPLER_PREFETCH);
}
if (IS_GEN_RANGE(i915, 9, 11)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 086801b51441..486c6953dcb1 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
ring->base.timeline = &ring->timeline;
+ atomic_set(&ring->base.pin_count, 1);
INIT_LIST_HEAD(&ring->base.request_list);
intel_ring_update_space(&ring->base);
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 89da9e7cc1ba..b5c590c9ccba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg)
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
+ intel_engine_mask_t awake;
+
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+ awake = reset_prepare(i915);
p->critical_section_begin();
reset_prepare(i915);
err = intel_gpu_reset(i915, ALL_ENGINES);
- reset_finish(i915);
p->critical_section_end();
+ reset_finish(i915, awake);
if (err) {
pr_err("intel_gpu_reset failed under %s\n", p->name);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 9eaf030affd0..44becd9538be 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
err = 0;
for (i = 0; i < engine->whitelist.count; i++) {
- if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
+ const struct i915_wa *wa = &engine->whitelist.list[i];
+
+ if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
+ continue;
+
+ if (!fn(engine, a[i], b[i], wa->reg))
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6ea88270c818..b09dc315e2da 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
-
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
@@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.workload = workload;
s.is_ctx_wa = true;
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
-
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 65e847392aea..8bb292b01271 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
alpha_plane, alpha_force);
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format;
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 53115bdae12b..4b04af569c05 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
unsigned long index = off >> info->gtt_entry_size_shift;
+ unsigned long gma;
struct intel_gvt_gtt_entry e;
if (bytes != 4 && bytes != 8)
return -EINVAL;
+ gma = index << I915_GTT_PAGE_SHIFT;
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ gma, 1 << I915_GTT_PAGE_SHIFT)) {
+ gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
+ memset(p_data, 0, bytes);
+ return 0;
+ }
+
ggtt_get_guest_entry(ggtt_mm, &e, index);
memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
bytes);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 144301b778df..23aa3e50cbf8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1911,6 +1911,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
+ } else if (entry->size != size) {
+ /* the same gfn with different size: unmap and re-map */
+ gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
+
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
+ if (ret)
+ goto err_unlock;
+
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ if (ret)
+ goto err_unmap;
} else {
kref_get(&entry->ref);
*dma_addr = entry->dma_addr;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 2144fb46d0e1..9f3fd7d96a69 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.shadow_va = NULL;
}
-static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
- struct i915_gem_context *ctx)
+static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+ struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0;
- if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
- return -EINVAL;
-
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else {
@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
-
- return 0;
}
static int
@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return ret;
}
+ if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
+ !workload->shadow_mm->ppgtt_mm.shadowed) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ return -EINVAL;
+ }
+
update_shadow_pdps(workload);
+ set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = set_context_ppgtt_from_shadow(workload,
- s->shadow[ring_id]->gem_context);
- if (ret < 0) {
- gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
- goto err_req;
- }
-
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
@@ -990,6 +987,7 @@ static int workload_thread(void *priv)
int ret;
bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
kfree(p);
@@ -1013,6 +1011,8 @@ static int workload_thread(void *priv)
workload->ring_id, workload,
workload->vgpu->id);
+ intel_runtime_pm_get(rpm);
+
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
@@ -1042,6 +1042,7 @@ complete:
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
+ intel_runtime_pm_put_unchecked(rpm);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+ if (!intel_gvt_ggtt_validate_range(vgpu, start,
+ _RING_CTL_BUF_SIZE(ctl))) {
+ gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
+ return ERR_PTR(-EINVAL);
+ }
+
workload = alloc_workload(vgpu);
if (IS_ERR(workload))
return workload;
@@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
+
+ if (workload->wa_ctx.indirect_ctx.size != 0) {
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ workload->wa_ctx.indirect_ctx.guest_gma,
+ workload->wa_ctx.indirect_ctx.size)) {
+ kmem_cache_free(s->workloads, workload);
+ gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
+ workload->wa_ctx.indirect_ctx.guest_gma);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+ if (workload->wa_ctx.per_ctx.valid) {
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ workload->wa_ctx.per_ctx.guest_gma,
+ CACHELINE_BYTES)) {
+ kmem_cache_free(s->workloads, workload);
+ gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
+ workload->wa_ctx.per_ctx.guest_gma);
+ return ERR_PTR(-EINVAL);
+ }
+ }
}
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c
index a3deed692b9c..fe552e877e09 100644
--- a/drivers/gpu/drm/i915/gvt/trace_points.c
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -28,8 +28,6 @@
*
*/
-#include "trace.h"
-
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bc909ec5d9c3..fe7a6ec2c199 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1674,8 +1674,9 @@ struct drm_i915_private {
} dram_info;
struct intel_bw_info {
- int num_planes;
- int deratedbw[3];
+ unsigned int deratedbw[3]; /* for each QGV point */
+ u8 num_qgv_points;
+ u8 num_planes;
} max_bw[6];
struct drm_private_obj bw_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 190ad54fb072..8a659d3d7435 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
-#include "gt/intel_engine_pm.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
intel_mocs_init_l3cc_table(dev_priv);
- /* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_engines_resume(dev_priv);
- if (ret)
- goto cleanup_uc;
-
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_engines_set_scheduler_caps(dev_priv);
return 0;
-cleanup_uc:
- intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
return ret;
}
@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto err_uc_init;
+ /* Only when the HW is re-initialised, can we replay the requests */
+ ret = intel_gt_resume(dev_priv);
+ if (ret)
+ goto err_init_hw;
+
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
ret = intel_engines_verify_workarounds(dev_priv);
if (ret)
- goto err_init_hw;
+ goto err_gt;
ret = __intel_engines_record_defaults(dev_priv);
if (ret)
- goto err_init_hw;
+ goto err_gt;
if (i915_inject_load_failure()) {
ret = -ENODEV;
- goto err_init_hw;
+ goto err_gt;
}
if (i915_inject_load_failure()) {
ret = -EIO;
- goto err_init_hw;
+ goto err_gt;
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
-err_init_hw:
+err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_set_wedged(dev_priv);
@@ -1630,6 +1626,7 @@ err_init_hw:
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
+err_init_hw:
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8ab820145ea6..7015a97b1097 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1444,9 +1444,11 @@ unwind_pd:
spin_lock(&pdp->lock);
if (atomic_dec_and_test(&pd->used)) {
gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
+ pdp->entry[pdpe] = vm->scratch_pd;
GEM_BUG_ON(!atomic_read(&pdp->used));
atomic_dec(&pdp->used);
- free_pd(vm, pd);
+ GEM_BUG_ON(alloc);
+ alloc = pd; /* defer the free to after the lock */
}
spin_unlock(&pdp->lock);
unwind:
@@ -1515,7 +1517,9 @@ unwind_pdp:
spin_lock(&pml4->lock);
if (atomic_dec_and_test(&pdp->used)) {
gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- free_pd(vm, pdp);
+ pml4->entry[pml4e] = vm->scratch_pdp;
+ GEM_BUG_ON(alloc);
+ alloc = pdp; /* defer the free until after the lock */
}
spin_unlock(&pml4->lock);
unwind:
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 41a511d5267f..8bc76fcff70d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1418,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct i915_request *request;
+ unsigned long flags;
ee->engine_id = -1;
@@ -1429,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
+ spin_lock_irqsave(&engine->active.lock, flags);
request = intel_engine_find_active_request(engine);
if (request) {
struct i915_gem_context *ctx = request->gem_context;
- struct intel_ring *ring;
+ struct intel_ring *ring = request->ring;
ee->vm = ctx->vm ?: &ggtt->vm;
@@ -1462,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->rq_post = request->postfix;
ee->rq_tail = request->tail;
- ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
@@ -1470,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
engine_record_requests(engine, request, ee);
}
+ spin_unlock_irqrestore(&engine->active.lock, flags);
ee->hws_page =
i915_error_object_create(i915,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a700c5c3d167..5140017f9a39 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
}
}
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static void delay_after_mux(void)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
-
- /* PRM:
- *
- * OA unit is using “crclk” for its functionality. When trunk
- * level clock gating takes place, OA clock would be gated,
- * unable to count the events from non-render clock domain.
- * Render clock gating must be disabled when OA is enabled to
- * count the events from non-render domain. Unit level clock
- * gating for RCS should also be disabled.
- */
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN7_DOP_CLOCK_GATE_ENABLE));
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
-
- /* It apparently takes a fairly long time for a new MUX
+ /*
+ * It apparently takes a fairly long time for a new MUX
* configuration to be be applied after these register writes.
* This delay duration was derived empirically based on the
* render_basic config but hopefully it covers the maximum
@@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
* a delay at this location would mitigate any invalid reports.
*/
usleep_range(15000, 20000);
+}
+
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
+
+ /*
+ * PRM:
+ *
+ * OA unit is using “crclk” for its functionality. When trunk
+ * level clock gating takes place, OA clock would be gated,
+ * unable to count the events from non-render clock domain.
+ * Render clock gating must be disabled when OA is enabled to
+ * count the events from non-render domain. Unit level clock
+ * gating for RCS should also be disabled.
+ */
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+ ~GEN7_DOP_CLOCK_GATE_ENABLE));
+ I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+
+ config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+ delay_after_mux();
config_oa_regs(dev_priv, oa_config->b_counter_regs,
oa_config->b_counter_regs_len);
@@ -1835,6 +1841,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
return ret;
config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+ delay_after_mux();
config_oa_regs(dev_priv, oa_config->b_counter_regs,
oa_config->b_counter_regs_len);
@@ -2515,6 +2522,9 @@ static int i915_perf_release(struct inode *inode, struct file *file)
i915_perf_destroy_locked(stream);
mutex_unlock(&dev_priv->perf.lock);
+ /* Release the reference the perf stream kept on the driver. */
+ drm_dev_put(&dev_priv->drm);
+
return 0;
}
@@ -2650,6 +2660,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
if (!(param->flags & I915_PERF_FLAG_DISABLED))
i915_perf_enable_locked(stream);
+ /* Take a reference on the driver that will be kept with stream_fd
+ * until its release.
+ */
+ drm_dev_get(&dev_priv->drm);
+
return stream_fd;
err_open:
@@ -3477,9 +3492,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
-
+ if (IS_GEN(dev_priv, 10)) {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ } else {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
+ }
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
}
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f4ce643b3bc3..cce426b23a24 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -21,24 +21,22 @@
/* watermark/fifo updates */
TRACE_EVENT(intel_pipe_enable,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
- TP_ARGS(dev_priv, pipe),
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
TP_STRUCT__entry(
__array(u32, frame, 3)
__array(u32, scanline, 3)
__field(enum pipe, pipe)
),
-
TP_fast_assign(
- enum pipe _pipe;
- for_each_pipe(dev_priv, _pipe) {
- __entry->frame[_pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
- __entry->scanline[_pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
}
- __entry->pipe = pipe;
+ __entry->pipe = crtc->pipe;
),
TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable,
);
TRACE_EVENT(intel_pipe_disable,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
- TP_ARGS(dev_priv, pipe),
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
TP_STRUCT__entry(
__array(u32, frame, 3)
@@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable,
),
TP_fast_assign(
- enum pipe _pipe;
- for_each_pipe(dev_priv, _pipe) {
- __entry->frame[_pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
- __entry->scanline[_pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
}
- __entry->pipe = pipe;
+ __entry->pipe = crtc->pipe;
),
TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
),
@@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
),
TP_fast_assign(
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
__entry->pipe = pipe;
- __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun,
TP_fast_assign(
enum pipe pipe = pch_transcoder;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
__entry->pipe = pipe;
- __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pch transcoder %c, frame=%u, scanline=%u",
@@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr,
),
TP_fast_assign(
- enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
- __entry->frame[pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline[pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ struct intel_crtc *crtc;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
}
__entry->old = old;
__entry->new = new;
@@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
__entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
@@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->level = wm->level;
__entry->cxsr = wm->cxsr;
@@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->sprite0_start = sprite0_start;
__entry->sprite1_start = sprite1_start;
@@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane,
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->name = plane->name;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
@@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane,
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->name = plane->name;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
@@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->min = crtc->debug.min_vbl;
__entry->max = crtc->debug.max_vbl;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 502c54428570..8d1aebc3e857 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
static void
dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
{
- struct drm_printer p;
+ if (debug->count) {
+ struct drm_printer p = drm_debug_printer("i915");
- if (!debug->count)
- return;
-
- p = drm_debug_printer("i915");
- __print_intel_runtime_pm_wakeref(&p, debug);
+ __print_intel_runtime_pm_wakeref(&p, debug);
+ }
kfree(debug->owners);
}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 9cbb2ebf575b..38275310b196 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -66,6 +66,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm,
}
/**
+ * intel_wakeref_get_if_in_use: Acquire the wakeref
+ * @wf: the wakeref
+ *
+ * Acquire a hold on the wakeref, but only if the wakeref is already
+ * active.
+ *
+ * Returns: true if the wakeref was acquired, false otherwise.
+ */
+static inline bool
+intel_wakeref_get_if_active(struct intel_wakeref *wf)
+{
+ return atomic_inc_not_zero(&wf->count);
+}
+
+/**
* intel_wakeref_put: Release the wakeref
* @i915: the drm_i915_private device
* @wf: the wakeref
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 1671db47aa57..e9c55d1d6c04 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj;
@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index be39cf01e51e..dc8ec2c94301 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 9acbbc0f3232..048c8be426f3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index ff14555372d0..78d5fa230c16 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp5_crtc->enabled = false;
}
+static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+ u32 count;
+
+ count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
+ drm_crtc_set_max_vblank_count(crtc, count);
+
+ drm_crtc_vblank_on(crtc);
+}
+
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
}
/* Restore vblank irq handling after power is enabled */
- drm_crtc_vblank_on(crtc);
+ mdp5_crtc_vblank_on(crtc);
mdp5_crtc_mode_set_nofb(crtc);
@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
mdp5_crtc_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+
+ drm_crtc_vblank_reset(crtc);
}
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 4a60f5fca6b0..fec6ef1ae3b9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
- dev->max_vblank_count = 0xffffffff;
+ dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
return kms;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c226156f2dea..c356f5ccf253 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- drm_of_component_match_add(dev, matchptr, compare_of, np);
+ if (of_device_is_available(np))
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index c2114c748c2f..8cf6362e64bf 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
return !msm_obj->vram_node;
}
+/*
+ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
+ * API. Really GPU cache is out of scope here (handled on cmdstream)
+ * and all we need to do is invalidate newly allocated pages before
+ * mapping to CPU as uncached/writecombine.
+ *
+ * On top of this, we have the added headache, that depending on
+ * display generation, the display's iommu may be wired up to either
+ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
+ * that here we either have dma-direct or iommu ops.
+ *
+ * Let this be a cautionary tail of abstraction gone wrong.
+ */
+
+static void sync_for_device(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev)) {
+ dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_map_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
+static void sync_for_cpu(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev)) {
+ dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ sync_for_device(msm_obj);
}
return msm_obj->pages;
@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents,
- DMA_BIDIRECTIONAL);
+ sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8497768f1b41..126703816794 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
connector->display_info.bpc * 3);
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (crtc_state->mode_changed) {
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
mstc->port,
asyh->dp.pbn);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 8c92374afcf2..a835cebb6d90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
+static inline bool
+nouveau_range_done(struct hmm_range *range)
+{
+ bool ret = hmm_range_valid(range);
+
+ hmm_range_unregister(range);
+ return ret;
+}
+
+static int
+nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
+{
+ long ret;
+
+ range->default_flags = 0;
+ range->pfn_flags_mask = -1UL;
+
+ ret = hmm_range_register(range, mirror,
+ range->start, range->end,
+ PAGE_SHIFT);
+ if (ret) {
+ up_read(&range->vma->vm_mm->mmap_sem);
+ return (int)ret;
+ }
+
+ if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
+ up_read(&range->vma->vm_mm->mmap_sem);
+ return -EAGAIN;
+ }
+
+ ret = hmm_range_fault(range, true);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -EBUSY;
+ up_read(&range->vma->vm_mm->mmap_sem);
+ hmm_range_unregister(range);
+ return ret;
+ }
+ return 0;
+}
+
static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range);
if (ret == 0) {
mutex_lock(&svmm->mutex);
- if (!hmm_vma_range_done(&range)) {
+ if (!nouveau_range_done(&range)) {
mutex_unlock(&svmm->mutex);
goto again;
}
@@ -666,8 +707,8 @@ again:
NULL);
svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
+ up_read(&svmm->mm->mmap_sem);
}
- up_read(&svmm->mm->mmap_sem);
/* Cancel any faults in the window whose pages didn't manage
* to keep their valid bit, or stay writeable when required.
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 888d89ce81df..beee7b7e0d9a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
struct ib_udata *udata,
struct ib_uobject *uobj)
{
+ enum ib_qp_type qp_type = attr->qp_type;
struct ib_qp *qp;
+ bool is_xrc;
if (!dev->ops.create_qp)
return ERR_PTR(-EOPNOTSUPP);
@@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
* and more importantly they are created internaly by driver,
* see mlx5 create_dev_resources() as an example.
*/
- if (attr->qp_type < IB_QPT_XRC_INI) {
+ is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
+ if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
qp->res.type = RDMA_RESTRACK_QP;
if (uobj)
rdma_restrack_uadd(&qp->res);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 01faef7bc061..45d5164e9574 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
u64 sum;
port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return 0;
+
sum = get_running_counters_hwstat_sum(dev, port, index);
sum += port_counter->hstats->value[index];
@@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev)
struct rdma_port_counter *port_counter;
u32 port;
- if (!dev->ops.alloc_hw_stats || !dev->port_data)
+ if (!dev->port_data)
return;
rdma_for_each_port(dev, port) {
@@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev)
port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
mutex_init(&port_counter->lock);
+ if (!dev->ops.alloc_hw_stats)
+ continue;
+
port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
if (!port_counter->hstats)
goto fail;
@@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev)
struct rdma_port_counter *port_counter;
u32 port;
- if (!dev->ops.alloc_hw_stats)
- return;
-
rdma_for_each_port(dev, port) {
port_counter = &dev->port_data[port].port_counter;
kfree(port_counter->hstats);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9773145dee09..ea8661a00651 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
static DECLARE_RWSEM(devices_rwsem);
#define DEVICE_REGISTERED XA_MARK_1
-static LIST_HEAD(client_list);
+static u32 highest_client_id;
#define CLIENT_REGISTERED XA_MARK_1
static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
static DECLARE_RWSEM(clients_rwsem);
+static void ib_client_put(struct ib_client *client)
+{
+ if (refcount_dec_and_test(&client->uses))
+ complete(&client->uses_zero);
+}
+
/*
* If client_data is registered then the corresponding client must also still
* be registered.
@@ -661,6 +667,14 @@ static int add_client_context(struct ib_device *device,
down_write(&device->client_data_rwsem);
/*
+ * So long as the client is registered hold both the client and device
+ * unregistration locks.
+ */
+ if (!refcount_inc_not_zero(&client->uses))
+ goto out_unlock;
+ refcount_inc(&device->refcount);
+
+ /*
* Another caller to add_client_context got here first and has already
* completely initialized context.
*/
@@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device,
return 0;
out:
+ ib_device_put(device);
+ ib_client_put(client);
+out_unlock:
up_write(&device->client_data_rwsem);
return ret;
}
@@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device,
client_data = xa_load(&device->client_data, client_id);
xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
client = xa_load(&clients, client_id);
- downgrade_write(&device->client_data_rwsem);
+ up_write(&device->client_data_rwsem);
/*
* Notice we cannot be holding any exclusive locks when calling the
@@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device,
*
* For this reason clients and drivers should not call the
* unregistration functions will holdling any locks.
- *
- * It tempting to drop the client_data_rwsem too, but this is required
- * to ensure that unregister_client does not return until all clients
- * are completely unregistered, which is required to avoid module
- * unloading races.
*/
if (client->remove)
client->remove(device, client_data);
xa_erase(&device->client_data, client_id);
- up_read(&device->client_data_rwsem);
+ ib_device_put(device);
+ ib_client_put(client);
}
static int alloc_port_data(struct ib_device *device)
@@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device)
static void disable_device(struct ib_device *device)
{
- struct ib_client *client;
+ u32 cid;
WARN_ON(!refcount_read(&device->refcount));
@@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device)
xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
up_write(&devices_rwsem);
+ /*
+ * Remove clients in LIFO order, see assign_client_id. This could be
+ * more efficient if xarray learns to reverse iterate. Since no new
+ * clients can be added to this ib_device past this point we only need
+ * the maximum possible client_id value here.
+ */
down_read(&clients_rwsem);
- list_for_each_entry_reverse(client, &client_list, list)
- remove_client_context(device, client->client_id);
+ cid = highest_client_id;
up_read(&clients_rwsem);
+ while (cid) {
+ cid--;
+ remove_client_context(device, cid);
+ }
/* Pairs with refcount_set in enable_device */
ib_device_put(device);
@@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client)
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
- * registration order, and retain a linked list we can reverse iterate
- * to get the LIFO order. The extra linked list can go away if xarray
- * learns to reverse iterate.
+ * registration order.
*/
- if (list_empty(&client_list)) {
- client->client_id = 0;
- } else {
- struct ib_client *last;
-
- last = list_last_entry(&client_list, struct ib_client, list);
- client->client_id = last->client_id + 1;
- }
+ client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
goto out;
+ highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
- list_add_tail(&client->list, &client_list);
out:
up_write(&clients_rwsem);
return ret;
}
+static void remove_client_id(struct ib_client *client)
+{
+ down_write(&clients_rwsem);
+ xa_erase(&clients, client->client_id);
+ for (; highest_client_id; highest_client_id--)
+ if (xa_load(&clients, highest_client_id - 1))
+ break;
+ up_write(&clients_rwsem);
+}
+
/**
* ib_register_client - Register an IB client
* @client:Client to register
@@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client)
unsigned long index;
int ret;
+ refcount_set(&client->uses, 1);
+ init_completion(&client->uses_zero);
ret = assign_client_id(client);
if (ret)
return ret;
@@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client)
unsigned long index;
down_write(&clients_rwsem);
+ ib_client_put(client);
xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
up_write(&clients_rwsem);
- /*
- * Every device still known must be serialized to make sure we are
- * done with the client callbacks before we return.
- */
- down_read(&devices_rwsem);
- xa_for_each (&devices, index, device)
+
+ /* We do not want to have locks while calling client->remove() */
+ rcu_read_lock();
+ xa_for_each (&devices, index, device) {
+ if (!ib_device_try_get(device))
+ continue;
+ rcu_read_unlock();
+
remove_client_context(device, client->client_id);
- up_read(&devices_rwsem);
- down_write(&clients_rwsem);
- list_del(&client->list);
- xa_erase(&clients, client->client_id);
- up_write(&clients_rwsem);
+ ib_device_put(device);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+
+ /*
+ * remove_client_context() is not a fence, it can return even though a
+ * removal is ongoing. Wait until all removals are completed.
+ */
+ wait_for_completion(&client->uses_zero);
+ remove_client_id(client);
}
EXPORT_SYMBOL(ib_unregister_client);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index cc99479b2c09..9947d16edef2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device,
if (has_smi)
cq_size *= 2;
+ port_priv->pd = ib_alloc_pd(device, 0);
+ if (IS_ERR(port_priv->pd)) {
+ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+ ret = PTR_ERR(port_priv->pd);
+ goto error3;
+ }
+
port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
IB_POLL_UNBOUND_WORKQUEUE);
if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq);
- goto error3;
- }
-
- port_priv->pd = ib_alloc_pd(device, 0);
- if (IS_ERR(port_priv->pd)) {
- dev_err(&device->dev, "Couldn't create ib_mad PD\n");
- ret = PTR_ERR(port_priv->pd);
goto error4;
}
@@ -3278,11 +3278,11 @@ error8:
error7:
destroy_mad_qp(&port_priv->qp_info[0]);
error6:
- ib_dealloc_pd(port_priv->pd);
-error4:
ib_free_cq(port_priv->cq);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+ ib_dealloc_pd(port_priv->pd);
error3:
kfree(port_priv);
@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
destroy_workqueue(port_priv->wq);
destroy_mad_qp(&port_priv->qp_info[1]);
destroy_mad_qp(&port_priv->qp_info[0]);
- ib_dealloc_pd(port_priv->pd);
ib_free_cq(port_priv->cq);
+ ib_dealloc_pd(port_priv->pd);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
/* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 9f8a48016b41..ffdeaf6e0b68 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
@@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
if (get_user(id, arg))
return -EFAULT;
+ if (id >= IB_UMAD_MAX_AGENTS)
+ return -EINVAL;
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+ if (!__get_agent(file, id)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a91653aabf38..098ab883733e 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
struct bnxt_qplib_gid *gid_to_del;
+ u16 vlan_id = 0xFFFF;
/* Delete the entry from the hardware */
ctx = *context;
@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
if (sgid_tbl && sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
- gid_to_del = &sgid_tbl->tbl[ctx->idx];
+ gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
+ vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
/* DEL_GID is called in WQ context(netdevice_event_work_handler)
* or via the ib_unregister_device path. In the former case QP1
* may not be destroyed yet, in which case just return as FW
@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
}
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
+ vlan_id, true);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 37928b1111df..bdbde8e22420 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl,
u16 max)
{
- sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
+ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
if (!sgid_tbl->tbl)
return -ENOMEM;
@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
for (i = 0; i < sgid_tbl->max; i++) {
if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero)))
- bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
+ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
+ sgid_tbl->tbl[i].vlan_id, true);
}
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
sgid_tbl->active = 0;
@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct net_device *netdev)
{
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+ u32 i;
+
+ for (i = 0; i < sgid_tbl->max; i++)
+ sgid_tbl->tbl[i].vlan_id = 0xffff;
+
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 30c42c92fac7..fbda11a7ab1a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
};
struct bnxt_qplib_sgid_tbl {
- struct bnxt_qplib_gid *tbl;
+ struct bnxt_qplib_gid_info *tbl;
u16 *hw_id;
u16 max;
u16 active;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 48793d3512ac..40296b97d21e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
index, sgid_tbl->max);
return -EINVAL;
}
- memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
+ memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
return 0;
}
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, bool update)
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return -ENOMEM;
}
for (index = 0; index < sgid_tbl->max; index++) {
- if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
+ if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
+ vlan_id == sgid_tbl->tbl[index].vlan_id)
break;
}
if (index == sgid_tbl->max) {
@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
if (rc)
return rc;
}
- memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+ memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
+ sgid_tbl->tbl[index].vlan_id = 0xFFFF;
sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
free_idx = sgid_tbl->max;
for (i = 0; i < sgid_tbl->max; i++) {
- if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
+ if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
+ sgid_tbl->tbl[i].vlan_id == vlan_id) {
dev_dbg(&res->pdev->dev,
"SGID entry already exist in entry %d!\n", i);
*index = i;
@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
+ sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
sgid_tbl->active++;
if (vlan_id != 0xFFFF)
sgid_tbl->vlan[free_idx] = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 0ec3b12b0bcd..13d9432d5ce2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
u8 data[16];
};
+struct bnxt_qplib_gid_info {
+ struct bnxt_qplib_gid gid;
+ u16 vlan_id;
+};
+
struct bnxt_qplib_ah {
struct bnxt_qplib_gid dgid;
struct bnxt_qplib_pd *pd;
@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, bool update);
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
bool update, u32 *index);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index d5b643a1d9fd..67052dc3100c 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
-static void init_rxe(struct hfi1_devdata *dd)
+static int init_rxe(struct hfi1_devdata *dd)
{
struct rsm_map_table *rmt;
u64 val;
@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
write_csr(dd, RCV_ERR_MASK, ~0ull);
rmt = alloc_rsm_map_table(dd);
+ if (!rmt)
+ return -ENOMEM;
+
/* set up QOS, including the QPN map table */
init_qos(dd, rmt);
init_fecn_handling(dd, rmt);
@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
RCV_BYPASS_HDR_SIZE_SHIFT);
write_csr(dd, RCV_BYPASS, val);
+ return 0;
}
static void init_other(struct hfi1_devdata *dd)
@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
goto bail_cleanup;
/* set initial RXE CSRs */
- init_rxe(dd);
+ ret = init_rxe(dd);
+ if (ret)
+ goto bail_cleanup;
+
/* set initial TXE CSRs */
init_txe(dd);
/* set initial non-RXE, non-TXE CSRs */
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 0477c14633ab..024a7c2b6124 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
trdma_clean_swqe(qp, wqe);
- rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp,
wqe,
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
trdma_clean_swqe(qp, wqe);
- rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp,
wqe,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 92acccaaaa86..996fc298207e 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
flows[i].req = req;
flows[i].npagesets = 0;
flows[i].pagesets[0].mapped = 0;
+ flows[i].resync_npkts = 0;
}
req->flows = flows;
return 0;
@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
return NULL;
}
-static struct tid_rdma_flow *
-__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
- u32 psn, u16 *fidx)
-{
- for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
- tail = CIRC_NEXT(tail, MAX_FLOWS)) {
- struct tid_rdma_flow *flow = &req->flows[tail];
- u32 spsn, lpsn;
-
- spsn = full_flow_psn(flow, flow->flow_state.spsn);
- lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
-
- if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
- if (fidx)
- *fidx = tail;
- return flow;
- }
- }
- return NULL;
-}
-
-static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
- u32 psn, u16 *fidx)
-{
- return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
- fidx);
-}
-
/* TID RDMA READ functions */
u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, u32 *bth1,
@@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
* to prevent continuous Flow Sequence errors for any
* packets that could be still in the fabric.
*/
- flow = find_flow(req, psn, NULL);
- if (!flow) {
- /*
- * We can't find the IB PSN matching the
- * received KDETH PSN. The only thing we can
- * do at this point is report the error to
- * the QP.
- */
- hfi1_kern_read_tid_flow_free(qp);
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return ret;
- }
+ flow = &req->flows[req->clear_tail];
if (priv->s_flags & HFI1_R_TID_SW_PSN) {
diff = cmp_psn(psn,
flow->flow_state.r_next_psn);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index c4b243f50c76..646f61545ed6 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -54,6 +54,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <rdma/opa_addr.h>
+#include <linux/nospec.h>
#include "hfi.h"
#include "common.h"
@@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
sl = rdma_ah_get_sl(ah_attr);
if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
return -EINVAL;
+ sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 8bf847bcd8d3..54782197c717 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HNS
- tristate "HNS RoCE Driver"
+ bool "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
@@ -11,7 +11,7 @@ config INFINIBAND_HNS
To compile HIP06 or HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP06
- bool "Hisilicon Hip06 Family RoCE support"
+ tristate "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
@@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06
module will be called hns-roce-hw-v1
config INFINIBAND_HNS_HIP08
- bool "Hisilicon Hip08 Family RoCE support"
+ tristate "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index e105945b94a1..449a2d81319d 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
-ifdef CONFIG_INFINIBAND_HNS_HIP06
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
-ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 627aa46ef683..c00714c2f16a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db)
{
+ unsigned long page_addr = virt & PAGE_MASK;
struct hns_roce_user_db_page *page;
+ unsigned int offset;
int ret = 0;
mutex_lock(&context->page_mutex);
list_for_each_entry(page, &context->page_list, list)
- if (page->user_virt == (virt & PAGE_MASK))
+ if (page->user_virt == page_addr)
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
}
refcount_set(&page->refcount, 1);
- page->user_virt = (virt & PAGE_MASK);
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->user_virt = page_addr;
+ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
list_add(&page->list, &context->page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) +
- (virt & ~PAGE_MASK);
- page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
- db->virt_addr = sg_virt(page->umem->sg_head.sgl);
+ offset = virt - page_addr;
+ db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
+ db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
db->u.user_page = page;
refcount_inc(&page->refcount);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 81e6dedb1e02..c07e387a07a3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
- if (!pd)
+ if (!pd) {
+ ret = -ENOMEM;
goto alloc_mem_failed;
+ }
pd->device = ibdev;
ret = hns_roce_alloc_pd(pd, NULL);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c2a5780cb394..e12a4404096b 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
return;
}
- if (mpi->mdev_events.notifier_call)
- mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
- mpi->mdev_events.notifier_call = NULL;
-
mpi->ibdev = NULL;
spin_unlock(&port->mp.mpi_lock);
+ if (mpi->mdev_events.notifier_call)
+ mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+ mpi->mdev_events.notifier_call = NULL;
mlx5_remove_netdev_notifier(ibdev, port_num);
spin_lock(&port->mp.mpi_lock);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index c482f19958b3..f6a53455bf8b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -481,6 +481,7 @@ struct mlx5_umr_wr {
u64 length;
int access_flags;
u32 mkey;
+ u8 ignore_free_state:1;
};
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 20ece6e0b2fc..b74fad08412f 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
}
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
- return order <= mr_cache_max_order(dev) &&
- umr_can_modify_entity_size(dev);
-}
-
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return;
c = order2idx(dev, mr->order);
- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
- return;
- }
+ WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
- if (unreg_umr(dev, mr))
+ if (unreg_umr(dev, mr)) {
+ mr->allocated_from_cache = false;
+ destroy_mkey(dev, mr);
+ ent = &cache->ent[c];
+ if (ent->cur < ent->limit)
+ queue_work(cache->wq, &ent->work);
return;
+ }
ent = &cache->ent[c];
spin_lock_irq(&ent->lock);
@@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
- bool populate_mtts = false;
+ bool use_umr;
struct ib_umem *umem;
int page_shift;
int npages;
@@ -1300,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0)
return ERR_PTR(err);
- if (use_umr(dev, order)) {
+ use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+ (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+ !MLX5_CAP_GEN(dev->mdev, atomic));
+
+ if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL;
}
- populate_mtts = false;
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL;
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error;
}
- populate_mtts = true;
+ use_umr = false;
}
if (!mr) {
- if (!umr_can_modify_entity_size(dev))
- populate_mtts = true;
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags, populate_mtts);
+ page_shift, access_flags, !use_umr);
mutex_unlock(&dev->slow_path_mutex);
}
@@ -1338,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
update_odp_mr(mr);
- if (!populate_mtts) {
+ if (use_umr) {
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
if (access_flags & IB_ACCESS_ON_DEMAND)
@@ -1373,9 +1367,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
- MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
+ umrwr.pd = dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
+ umrwr.ignore_free_state = 1;
return mlx5_ib_post_send_wait(dev, &umrwr);
}
@@ -1577,10 +1573,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig = NULL;
}
- mlx5_free_priv_descs(mr);
-
- if (!allocated_from_cache)
+ if (!allocated_from_cache) {
destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
+ }
}
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5b642d81e617..81da82050d05 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
-
+ mutex_lock(&umem_odp->umem_mutex);
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&umem_odp->umem_mutex);
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
w->num_sge, 0);
- kfree(w);
+ kvfree(w);
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (valid_req)
queue_work(system_unbound_wq, &work->work);
else
- kfree(work);
+ kvfree(work);
srcu_read_unlock(&dev->mr_srcu, srcu_key);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2a97619ed603..379328b2598f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, ucmd.rx_hash_key, len);
break;
}
@@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
memset(umr, 0, sizeof(*umr));
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
- else
- umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+ if (!umrwr->ignore_free_state) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ /* fail if free */
+ umr->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ }
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 533157a2a3be..f97b3d65b30c 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct qedr_dev *dev =
rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
}
static DEVICE_ATTR_RO(hw_rev);
static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+ struct qedr_dev *dev =
+ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
+
+ return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
+ dev->pdev->device,
+ rdma_protocol_iwarp(&dev->ibdev, 1) ?
+ "iWARP" : "RoCE");
}
static DEVICE_ATTR_RO(hca_type);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index a7cde98e73e8..9ce8a1b925d2 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work)
static void siw_cep_set_inuse(struct siw_cep *cep)
{
unsigned long flags;
- int rv;
retry:
spin_lock_irqsave(&cep->lock, flags);
if (cep->in_use) {
spin_unlock_irqrestore(&cep->lock, flags);
- rv = wait_event_interruptible(cep->waitq, !cep->in_use);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
if (signal_pending(current))
flush_signals(current);
goto retry;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index f55c4e80aea4..d0f140daf659 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -612,6 +612,7 @@ static __init int siw_init_module(void)
if (!siw_create_tx_threads()) {
pr_info("siw: Could not start any TX thread\n");
+ rv = -ENOMEM;
goto out_error;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 11383d9f95ef..e27bd5b35b96 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp)
{
struct siw_rx_stream *c_rx = &qp->rx_stream;
struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
- int size = crypto_shash_descsize(siw_crypto_shash) +
- sizeof(struct shash_desc);
+ int size;
if (siw_crypto_shash == NULL)
return -ENOENT;
+ size = crypto_shash_descsize(siw_crypto_shash) +
+ sizeof(struct shash_desc);
+
c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) {
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 433f4d2ee956..80a740df0737 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -2,7 +2,7 @@
/*
* Virtio driver for the paravirtualized IOMMU
*
- * Copyright (C) 2018 Arm Limited
+ * Copyright (C) 2019 Arm Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -47,7 +47,10 @@ struct viommu_dev {
/* Device configuration */
struct iommu_domain_geometry geometry;
u64 pgsize_bitmap;
- u8 domain_bits;
+ u32 first_domain;
+ u32 last_domain;
+ /* Supported MAP flags */
+ u32 map_flags;
u32 probe_size;
};
@@ -62,6 +65,7 @@ struct viommu_domain {
struct viommu_dev *viommu;
struct mutex mutex; /* protects viommu pointer */
unsigned int id;
+ u32 map_flags;
spinlock_t mappings_lock;
struct rb_root_cached mappings;
@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len)
return -ENOENT;
case VIRTIO_IOMMU_S_FAULT:
return -EFAULT;
+ case VIRTIO_IOMMU_S_NOMEM:
+ return -ENOMEM;
case VIRTIO_IOMMU_S_IOERR:
case VIRTIO_IOMMU_S_DEVERR:
default:
@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
{
int ret;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
- (1U << viommu->domain_bits) - 1;
vdomain->viommu = viommu;
+ vdomain->map_flags = viommu->map_flags;
domain->pgsize_bitmap = viommu->pgsize_bitmap;
domain->geometry = viommu->geometry;
- ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
if (ret >= 0)
vdomain->id = (unsigned int)ret;
@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
int ret;
- int flags;
+ u32 flags;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
+ if (flags & ~vdomain->map_flags)
+ return -EINVAL;
+
ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
if (ret)
return ret;
@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev)
goto err_free_vqs;
}
- viommu->domain_bits = 32;
+ viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
+ viommu->last_domain = ~0U;
/* Optional features */
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev)
struct virtio_iommu_config, input_range.end,
&input_end);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
- struct virtio_iommu_config, domain_bits,
- &viommu->domain_bits);
+ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.start,
+ &viommu->first_domain);
+
+ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.end,
+ &viommu->last_domain);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
struct virtio_iommu_config, probe_size,
@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev)
.force_aperture = true,
};
+ if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
+ viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
virtio_device_ready(vdev);
@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev)
static unsigned int features[] = {
VIRTIO_IOMMU_F_MAP_UNMAP,
- VIRTIO_IOMMU_F_DOMAIN_BITS,
VIRTIO_IOMMU_F_INPUT_RANGE,
+ VIRTIO_IOMMU_F_DOMAIN_RANGE,
VIRTIO_IOMMU_F_PROBE,
+ VIRTIO_IOMMU_F_MMIO,
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 730fbe0e2a9d..1b5c3672aea2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe)
if (!its_alloc_vpe_table(vpe_id)) {
its_vpe_id_free(vpe_id);
- its_free_pending_table(vpe->vpt_page);
+ its_free_pending_table(vpt_page);
return -ENOMEM;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9bca4896fa6f..96d927f0f91a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -771,8 +771,10 @@ static void gic_cpu_sys_reg_init(void)
case 7:
write_gicreg(0, ICC_AP0R3_EL1);
write_gicreg(0, ICC_AP0R2_EL1);
+ /* Fall through */
case 6:
write_gicreg(0, ICC_AP0R1_EL1);
+ /* Fall through */
case 5:
case 4:
write_gicreg(0, ICC_AP0R0_EL1);
@@ -786,8 +788,10 @@ static void gic_cpu_sys_reg_init(void)
case 7:
write_gicreg(0, ICC_AP1R3_EL1);
write_gicreg(0, ICC_AP1R2_EL1);
+ /* Fall through */
case 6:
write_gicreg(0, ICC_AP1R1_EL1);
+ /* Fall through */
case 5:
case 4:
write_gicreg(0, ICC_AP1R0_EL1);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index bf2237ac5d09..4f74c15c4755 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
.irq_unmask = imx_gpcv2_irq_unmask,
.irq_set_wake = imx_gpcv2_irq_set_wake,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 3dd28382d5f5..3f09f658e8e2 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -241,12 +241,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
parent = platform_bus_type.dev_root;
child = of_platform_device_create(np, NULL, parent);
- if (!child)
+ if (!child) {
+ of_node_put(np);
return -ENOMEM;
+ }
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
dev_err(&pdev->dev, "No num-pins property\n");
+ of_node_put(np);
return -EINVAL;
}
@@ -254,8 +257,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
mbigen_write_msg,
&mbigen_domain_ops,
mgn_chip);
- if (!domain)
+ if (!domain) {
+ of_node_put(np);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index caaee8032afe..7b6c3ee9e755 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
/* validate the dax capability of the target device span */
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+ sector_t start, sector_t len, void *data)
{
int blocksize = *(int *) data;
return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
- start, len);
+ start, len);
}
/* Check devices support synchronous DAX */
-static int device_synchronous(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- return dax_synchronous(dev->dax_dev);
+ return dev->dax_dev && dax_synchronous(dev->dax_dev);
}
bool dm_table_supports_dax(struct dm_table *t,
- iterate_devices_callout_fn iterate_fn, int *blocksize)
+ iterate_devices_callout_fn iterate_fn, int *blocksize)
{
struct dm_target *ti;
unsigned i;
@@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t,
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, iterate_fn, blocksize))
+ !ti->type->iterate_devices(ti, iterate_fn, blocksize))
return false;
}
@@ -1921,7 +1921,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax(t, device_synchronous, NULL))
+ if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
set_dax_synchronous(t->md->dax_dev);
}
else
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index e327f80ebe70..7102e2ebc614 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -10,6 +10,7 @@
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/backing-dev.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
goto free_tag_set;
}
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ mq->queue->backing_dev_info->capabilities |=
+ BDI_CAP_STABLE_WRITES;
+
mq->queue->queuedata = mq;
blk_queue_rq_timeout(mq->queue, 60 * HZ);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index faaaf52a46d2..eea52e2c5a0c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if ((err != -ETIMEDOUT) &&
- (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+ if (err != -ETIMEDOUT) {
state = STATE_SENDING_DATA;
continue;
}
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 2d736e416775..ba9a63db73da 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -73,7 +73,7 @@
#define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
- #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10)
#define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
#define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
#define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 6ee340a3fb3a..603a5d9f045a 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -624,6 +624,7 @@ err_cleanup_host:
sdhci_cleanup_host(host);
pm_runtime_disable:
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2d06b8095a19..df352b334ea7 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
cpu_pm_pmu_setup(armpmu, cmd);
break;
case CPU_PM_EXIT:
- cpu_pm_pmu_setup(armpmu, cmd);
case CPU_PM_ENTER_FAILED:
+ cpu_pm_pmu_setup(armpmu, cmd);
armpmu->start(armpmu);
break;
default:
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 48d6f0d87583..83ed1fbf73cf 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = {
};
MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match);
+static const struct spi_device_id olpc_xo175_ec_id_table[] = {
+ { "xo1.75-ec", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table);
+
static struct spi_driver olpc_xo175_ec_spi_driver = {
.driver = {
.name = "olpc-xo175-ec",
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 235c0b89f824..c510d0d72475 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
+ INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
{}
};
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
index b0d3110ae378..e4c68efac0c2 100644
--- a/drivers/platform/x86/pcengines-apuv2.c
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = {
static struct gpio_keys_button apu2_keys_buttons[] = {
{
- .code = KEY_SETUP,
+ .code = KEY_RESTART,
.active_low = 1,
.desc = "front button",
.type = EV_KEY,
@@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
MODULE_ALIAS("platform:pcengines-apuv2");
-MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME);
-MODULE_SOFTDEP("pre: platform:leds-gpio");
-MODULE_SOFTDEP("pre: platform:gpio_keys_polled");
+MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index b9ce93e9df89..99f86612f775 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
char msg_format;
char msg_no;
+ /*
+ * intrc values ENODEV, ENOLINK and EPERM
+ * will be optained from sleep_on to indicate that no
+ * IO operation can be started
+ */
+ if (cqr->intrc == -ENODEV)
+ return 1;
+
+ if (cqr->intrc == -ENOLINK)
+ return 1;
+
+ if (cqr->intrc == -EPERM)
+ return 1;
+
sense = dasd_get_sense(&cqr->irb);
if (!sense)
return 0;
@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
- do {
- rc = dasd_sleep_on(cqr);
- if (rc && suborder_not_supported(cqr))
- return -EOPNOTSUPP;
- } while (rc && (cqr->retries > 0));
- if (rc) {
+ rc = dasd_sleep_on(cqr);
+ if (rc && !suborder_not_supported(cqr)) {
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 8c9d412b6d33..e7cf0a1d4f71 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -398,6 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
}
if (dstat == 0x08)
break;
+ /* else, fall through */
case 0x04:
/* Device end interrupt. */
if ((raw = req->info) == NULL)
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 8d3370da2dfc..3e0b2f63a9d2 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -677,6 +677,7 @@ tape_generic_remove(struct ccw_device *cdev)
switch (device->tape_state) {
case TS_INIT:
tape_state_set(device, TS_NOT_OPER);
+ /* fallthrough */
case TS_NOT_OPER:
/*
* Nothing to do.
@@ -949,6 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
break;
if (device->tape_state == TS_UNUSED)
break;
+ /* fallthrough */
default:
if (device->tape_state == TS_BLKUSE)
break;
@@ -1116,6 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case -ETIMEDOUT:
DBF_LH(1, "(%08x): Request timed out\n",
device->cdev_id);
+ /* fallthrough */
case -EIO:
__tape_end_request(device, request, -EIO);
break;
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 8c1d2357ef5b..7a838e3d7c0f 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -70,7 +70,7 @@ static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
}
-const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+static const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
.read = vfio_ccw_async_region_read,
.write = vfio_ccw_async_region_write,
.release = vfio_ccw_async_region_release,
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 5ea83dc4f1d7..dad2be333d82 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -152,6 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
ap_msg->receive(aq, ap_msg, aq->reply);
break;
}
+ /* fall through */
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 12fe9deb265e..a36251d138fb 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -801,10 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -837,10 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
}
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(zq, reply, xcRB);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
@@ -870,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
return convert_type86_ep11_xcrb(zq, reply, xcRB);
- /* Fall through, no break, incorrect cprb version is an unknown resp.*/
+ /* fall through - wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -900,10 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zq, reply, data);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 1a85fe9e4b7b..1791a393795d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
*/
static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
{
- return (struct fcoe_rport *)(rdata + 1);
+ return container_of(rdata, struct fcoe_rport, rdata);
}
/**
@@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
*/
static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
@@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct fip_wwn_desc *wwn = NULL;
struct fip_vn_desc *vn = NULL;
struct fip_size_desc *size = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
case FIP_DT_VN_ID:
if (dlen != sizeof(struct fip_vn_desc))
goto len_err;
vn = (struct fip_vn_desc *)desc;
memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
- rdata->ids.port_id = ntoh24(vn->fd_fc_id);
- rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+ frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+ frport->rdata.ids.port_name =
+ get_unaligned_be64(&vn->fd_wwpn);
break;
case FIP_DT_FC4F:
if (dlen != sizeof(struct fip_fc4_feat))
@@ -2403,16 +2401,14 @@ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip)
/**
* fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request.
* @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
-
- if (rdata->ids.port_id != fip->port_id)
+ if (frport->rdata.ids.port_id != fip->port_id)
return;
switch (fip->state) {
@@ -2432,7 +2428,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
* Probe's REC bit is not set.
* If we don't reply, we will change our address.
*/
- if (fip->lp->wwpn > rdata->ids.port_name &&
+ if (fip->lp->wwpn > frport->rdata.ids.port_name &&
!(frport->flags & FIP_FL_REC_OR_P2P)) {
LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
"port_id collision\n");
@@ -2456,14 +2452,14 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply.
* @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- if (rdata->ids.port_id != fip->port_id)
+ if (frport->rdata.ids.port_id != fip->port_id)
return;
switch (fip->state) {
case FIP_ST_VNMP_START:
@@ -2486,11 +2482,11 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply.
* @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
*
* Called with ctlr_mutex held.
*/
-static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
+static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new)
{
struct fc_lport *lport = fip->lp;
struct fc_rport_priv *rdata;
@@ -2498,7 +2494,7 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
struct fcoe_rport *frport;
u32 port_id;
- port_id = new->ids.port_id;
+ port_id = new->rdata.ids.port_id;
if (port_id == fip->port_id)
return;
@@ -2515,22 +2511,28 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
rdata->disc_id = lport->disc.disc_id;
ids = &rdata->ids;
- if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
- (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+ if ((ids->port_name != -1 &&
+ ids->port_name != new->rdata.ids.port_name) ||
+ (ids->node_name != -1 &&
+ ids->node_name != new->rdata.ids.node_name)) {
mutex_unlock(&rdata->rp_mutex);
LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
fc_rport_logoff(rdata);
mutex_lock(&rdata->rp_mutex);
}
- ids->port_name = new->ids.port_name;
- ids->node_name = new->ids.node_name;
+ ids->port_name = new->rdata.ids.port_name;
+ ids->node_name = new->rdata.ids.node_name;
mutex_unlock(&rdata->rp_mutex);
frport = fcoe_ctlr_rport(rdata);
LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
port_id, frport->fcoe_len ? "old" : "new",
rdata->rp_state);
- *frport = *fcoe_ctlr_rport(new);
+ frport->fcoe_len = new->fcoe_len;
+ frport->flags = new->flags;
+ frport->login_count = new->login_count;
+ memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN);
+ memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN);
frport->time = 0;
}
@@ -2562,16 +2564,14 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
/**
* fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification
* @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(new);
-
- if (frport->flags & FIP_FL_REC_OR_P2P) {
+ if (new->flags & FIP_FL_REC_OR_P2P) {
LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
@@ -2580,7 +2580,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_START:
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
- if (new->ids.port_id == fip->port_id) {
+ if (new->rdata.ids.port_id == fip->port_id) {
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
"restart, state %d\n",
fip->state);
@@ -2589,8 +2589,8 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
break;
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
- if (new->ids.port_id == fip->port_id) {
- if (new->ids.port_name > fip->lp->wwpn) {
+ if (new->rdata.ids.port_id == fip->port_id) {
+ if (new->rdata.ids.port_name > fip->lp->wwpn) {
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
"restart, port_id collision\n");
fcoe_ctlr_vn_restart(fip);
@@ -2602,15 +2602,16 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
break;
}
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
- new->ids.port_id);
- fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
- min((u32)frport->fcoe_len,
+ new->rdata.ids.port_id);
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac,
+ min((u32)new->fcoe_len,
fcoe_ctlr_fcoe_size(fip)));
fcoe_ctlr_vn_add(fip, new);
break;
default:
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
- "ignoring claim from %x\n", new->ids.port_id);
+ "ignoring claim from %x\n",
+ new->rdata.ids.port_id);
break;
}
}
@@ -2618,15 +2619,15 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_claim_resp() - handle received Claim Response
* @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Claim Response
+ * @new: newly-parsed FCoE rport from the Claim Response
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n",
- new->ids.port_id, fcoe_ctlr_state(fip->state));
+ new->rdata.ids.port_id, fcoe_ctlr_state(fip->state));
if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM)
fcoe_ctlr_vn_add(fip, new);
}
@@ -2634,28 +2635,28 @@ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_beacon() - handle received beacon.
* @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Beacon
+ * @new: newly-parsed FCoE rport from the Beacon
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
struct fc_lport *lport = fip->lp;
struct fc_rport_priv *rdata;
struct fcoe_rport *frport;
- frport = fcoe_ctlr_rport(new);
- if (frport->flags & FIP_FL_REC_OR_P2P) {
+ if (new->flags & FIP_FL_REC_OR_P2P) {
LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
- rdata = fc_rport_lookup(lport, new->ids.port_id);
+ rdata = fc_rport_lookup(lport, new->rdata.ids.port_id);
if (rdata) {
- if (rdata->ids.node_name == new->ids.node_name &&
- rdata->ids.port_name == new->ids.port_name) {
+ if (rdata->ids.node_name == new->rdata.ids.node_name &&
+ rdata->ids.port_name == new->rdata.ids.port_name) {
frport = fcoe_ctlr_rport(rdata);
+
LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
rdata->ids.port_id);
if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
@@ -2678,7 +2679,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
* Don't add the neighbor yet.
*/
LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n",
- new->ids.port_id);
+ new->rdata.ids.port_id);
if (time_after(jiffies,
fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT)))
fcoe_ctlr_vn_send_claim(fip);
@@ -2738,10 +2739,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vn2vn_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc, vlan_id = 0;
fiph = (struct fip_header *)skb->data;
@@ -2757,7 +2755,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
goto drop;
}
- rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
goto drop;
@@ -2766,19 +2764,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mutex_lock(&fip->ctlr_mutex);
switch (sub) {
case FIP_SC_VN_PROBE_REQ:
- fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_req(fip, &frport);
break;
case FIP_SC_VN_PROBE_REP:
- fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_reply(fip, &frport);
break;
case FIP_SC_VN_CLAIM_NOTIFY:
- fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_notify(fip, &frport);
break;
case FIP_SC_VN_CLAIM_REP:
- fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_resp(fip, &frport);
break;
case FIP_SC_VN_BEACON:
- fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+ fcoe_ctlr_vn_beacon(fip, &frport);
break;
default:
LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2802,22 +2800,18 @@ drop:
*/
static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
struct fip_mac_desc *macd = NULL;
struct fip_wwn_desc *wwn = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2871,7 +2865,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
default:
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2957,13 +2952,13 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
* @fip: The FCoE controller
+ * @frport: The newly-parsed FCoE rport from the Discovery Request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
enum fip_vlan_subcode sub = FIP_SC_VL_NOTE;
if (fip->mode == FIP_MODE_VN2VN)
@@ -2982,22 +2977,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vlan_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
- rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
goto drop;
}
mutex_lock(&fip->ctlr_mutex);
if (sub == FIP_SC_VL_REQ)
- fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+ fcoe_ctlr_vlan_disc_reply(fip, &frport);
mutex_unlock(&fip->ctlr_mutex);
drop:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index eaf6177ac9ee..1bb6aada93fa 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2334,6 +2334,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
case IOACCEL2_SERV_RESPONSE_COMPLETE:
switch (c2->error_data.status) {
case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+ if (cmd)
+ cmd->result = 0;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2483,8 +2485,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/* check for good status */
if (likely(c2->error_data.serv_response == 0 &&
- c2->error_data.status == 0))
+ c2->error_data.status == 0)) {
+ cmd->result = 0;
return hpsa_cmd_free_and_done(h, c, cmd);
+ }
/*
* Any RAID offload error results in retry which will use
@@ -5654,6 +5658,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return SCSI_MLQUEUE_DEVICE_BUSY;
/*
+ * This is necessary because the SML doesn't zero out this field during
+ * error recovery.
+ */
+ cmd->result = 0;
+
+ /*
* Call alternate submit routine for I/O accelerated commands.
* Retries always go down the normal I/O path.
*/
@@ -6081,8 +6091,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
if (idx != h->last_collision_tag) { /* Print once per tag */
dev_warn(&h->pdev->dev,
"%s: tag collision (tag=%d)\n", __func__, idx);
- if (c->scsi_cmd != NULL)
- scsi_print_command(c->scsi_cmd);
if (scmd)
scsi_print_command(scmd);
h->last_collision_tag = idx;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index e0f3852fdad1..da6e97d8dc3b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
+ size_t rport_priv_size = sizeof(*rdata);
lockdep_assert_held(&lport->disc.disc_mutex);
@@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
if (rdata)
return rdata;
- rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+ if (lport->rport_priv_size > 0)
+ rport_priv_size = lport->rport_priv_size;
+ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
if (!rdata)
return NULL;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 684662888792..050c0f029ef9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2703,6 +2703,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
u64 required_mask, coherent_mask;
struct sysinfo s;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
if (ioc->is_mcpu_endpoint)
goto try_32bit;
@@ -2712,17 +2714,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
goto try_32bit;
if (ioc->dma_mask)
- coherent_mask = DMA_BIT_MASK(64);
+ coherent_mask = DMA_BIT_MASK(dma_mask);
else
coherent_mask = DMA_BIT_MASK(32);
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
dma_set_coherent_mask(&pdev->dev, coherent_mask))
goto try_32bit;
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = 64;
+ ioc->dma_mask = dma_mask;
goto out;
try_32bit:
@@ -2744,7 +2746,7 @@ static int
_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
struct pci_dev *pdev)
{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
return -ENODEV;
}
@@ -4989,7 +4991,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
- if (ioc->dma_mask == 64) {
+ if (ioc->dma_mask > 32) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
pci_name(ioc->pdev));
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4059655639d9..da83034d4759 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
ql_log(ql_log_warn, vha, 0xd049,
"Failed to allocate ct_sns request.\n");
kfree(fcport);
- fcport = NULL;
+ return NULL;
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 819296332913..42a8c2a13ab1 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -96,7 +96,7 @@ struct vhost_uaddr {
};
#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 1
+#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
#else
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
#endif
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 4c339c7e66e5..a446a7221e13 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto out_put_map;
if (!use_ptemod) {
- err = vm_map_pages(vma, map->pages, map->count);
+ err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
} else {
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 2f5ce7230a43..c6070e70dd73 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
-struct remap_pfn {
- struct mm_struct *mm;
- struct page **pages;
- pgprot_t prot;
- unsigned long i;
-};
-
-static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
-{
- struct remap_pfn *r = data;
- struct page *page = r->pages[r->i];
- pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
-
- set_pte_at(r->mm, addr, ptep, pte);
- r->i++;
-
- return 0;
-}
-
static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
{
struct privcmd_data *data = file->private_data;
@@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
goto out;
}
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
+ xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
struct page **pages;
unsigned int i;
@@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (rc)
goto out;
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- struct remap_pfn r = {
- .mm = vma->vm_mm,
- .pages = vma->vm_private_data,
- .prot = vma->vm_page_prot,
- };
-
- rc = apply_to_page_range(r.mm, kdata.addr,
- kdata.num << PAGE_SHIFT,
- remap_pfn_fn, &r);
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
+ xen_feature(XENFEAT_auto_translated_physmap)) {
+ rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
} else {
unsigned int domid =
(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index cfbe46785a3b..ae1df496bf38 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
return xen_phys_to_bus(virt_to_phys(address));
}
-static int check_pages_physically_contiguous(unsigned long xen_pfn,
- unsigned int offset,
- size_t length)
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
- unsigned long next_bfn;
- int i;
- int nr_pages;
+ unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+ unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
next_bfn = pfn_to_bfn(xen_pfn);
- nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
- for (i = 1; i < nr_pages; i++) {
+ for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
- return 0;
- }
- return 1;
-}
+ return 1;
-static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
-{
- unsigned long xen_pfn = XEN_PFN_DOWN(p);
- unsigned int offset = p & ~XEN_PAGE_MASK;
-
- if (offset + size <= XEN_PAGE_SIZE)
- return 0;
- if (check_pages_physically_contiguous(xen_pfn, offset, size))
- return 0;
- return 1;
+ return 0;
}
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
@@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
+ SetPageXenRemapped(virt_to_page(ret));
}
memset(ret, 0, size);
return ret;
@@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (((dev_addr + size - 1 <= dma_mask)) ||
- range_straddles_page_boundary(phys, size))
+ if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+ range_straddles_page_boundary(phys, size)) &&
+ TestClearPageXenRemapped(virt_to_page(vaddr)))
xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 73427d8e0116..e5694133ebe5 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
{
int err;
u16 old_value;
- pci_power_t new_state, old_state;
+ pci_power_t new_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index ba883a80b3c0..7b1077f0abcb 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
return 0;
}
EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
+
+struct remap_pfn {
+ struct mm_struct *mm;
+ struct page **pages;
+ pgprot_t prot;
+ unsigned long i;
+};
+
+static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+ struct page *page = r->pages[r->i];
+ pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
+
+ set_pte_at(r->mm, addr, ptep, pte);
+ r->i++;
+
+ return 0;
+}
+
+/* Used by the privcmd module, but has to be built-in on ARM */
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
+{
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .pages = vma->vm_private_data,
+ .prot = vma->vm_page_prot,
+ };
+
+ return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
+}
+EXPORT_SYMBOL_GPL(xen_remap_vma_range);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c2a85b587922..a6f7c892cb4a 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -439,6 +439,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
ret = -EAGAIN;
goto error;
}
+ ret = dio->size;
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
@@ -465,7 +466,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
ret = -EAGAIN;
goto error;
}
- ret += bio->bi_iter.bi_size;
+ ret = dio->size;
bio = bio_alloc(gfp, nr_pages);
if (!bio) {
@@ -1181,8 +1182,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
* Pointer to the block device containing @bdev on success, ERR_PTR()
* value on failure.
*/
-static struct block_device *bd_start_claiming(struct block_device *bdev,
- void *holder)
+struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
{
struct gendisk *disk;
struct block_device *whole;
@@ -1229,6 +1229,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
return ERR_PTR(err);
}
}
+EXPORT_SYMBOL(bd_start_claiming);
+
+static void bd_clear_claiming(struct block_device *whole, void *holder)
+{
+ lockdep_assert_held(&bdev_lock);
+ /* tell others that we're done */
+ BUG_ON(whole->bd_claiming != holder);
+ whole->bd_claiming = NULL;
+ wake_up_bit(&whole->bd_claiming, 0);
+}
+
+/**
+ * bd_finish_claiming - finish claiming of a block device
+ * @bdev: block device of interest
+ * @whole: whole block device (returned from bd_start_claiming())
+ * @holder: holder that has claimed @bdev
+ *
+ * Finish exclusive open of a block device. Mark the device as exlusively
+ * open by the holder and wake up all waiters for exclusive open to finish.
+ */
+void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
+ void *holder)
+{
+ spin_lock(&bdev_lock);
+ BUG_ON(!bd_may_claim(bdev, whole, holder));
+ /*
+ * Note that for a whole device bd_holders will be incremented twice,
+ * and bd_holder will be set to bd_may_claim before being set to holder
+ */
+ whole->bd_holders++;
+ whole->bd_holder = bd_may_claim;
+ bdev->bd_holders++;
+ bdev->bd_holder = holder;
+ bd_clear_claiming(whole, holder);
+ spin_unlock(&bdev_lock);
+}
+EXPORT_SYMBOL(bd_finish_claiming);
+
+/**
+ * bd_abort_claiming - abort claiming of a block device
+ * @bdev: block device of interest
+ * @whole: whole block device (returned from bd_start_claiming())
+ * @holder: holder that has claimed @bdev
+ *
+ * Abort claiming of a block device when the exclusive open failed. This can be
+ * also used when exclusive open is not actually desired and we just needed
+ * to block other exclusive openers for a while.
+ */
+void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
+ void *holder)
+{
+ spin_lock(&bdev_lock);
+ bd_clear_claiming(whole, holder);
+ spin_unlock(&bdev_lock);
+}
+EXPORT_SYMBOL(bd_abort_claiming);
#ifdef CONFIG_SYSFS
struct bd_holder_disk {
@@ -1698,29 +1754,7 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
/* finish claiming */
mutex_lock(&bdev->bd_mutex);
- spin_lock(&bdev_lock);
-
- if (!res) {
- BUG_ON(!bd_may_claim(bdev, whole, holder));
- /*
- * Note that for a whole device bd_holders
- * will be incremented twice, and bd_holder
- * will be set to bd_may_claim before being
- * set to holder
- */
- whole->bd_holders++;
- whole->bd_holder = bd_may_claim;
- bdev->bd_holders++;
- bdev->bd_holder = holder;
- }
-
- /* tell others that we're done */
- BUG_ON(whole->bd_claiming != holder);
- whole->bd_claiming = NULL;
- wake_up_bit(&whole->bd_claiming, 0);
-
- spin_unlock(&bdev_lock);
-
+ bd_finish_claiming(bdev, whole, holder);
/*
* Block event polling for write claims if requested. Any
* write holder makes the write_holder state stick until
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 89116afda7a2..e5d85311d5d5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1483,7 +1483,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
ulist_init(roots);
ulist_init(tmp);
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
ret = PTR_ERR(trans);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 69b59bf75882..c3c0c064c25d 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6322,68 +6322,21 @@ static int changed_extent(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
-
- if (result == BTRFS_COMPARE_TREE_CHANGED) {
- struct extent_buffer *leaf_l;
- struct extent_buffer *leaf_r;
- struct btrfs_file_extent_item *ei_l;
- struct btrfs_file_extent_item *ei_r;
-
- leaf_l = sctx->left_path->nodes[0];
- leaf_r = sctx->right_path->nodes[0];
- ei_l = btrfs_item_ptr(leaf_l,
- sctx->left_path->slots[0],
- struct btrfs_file_extent_item);
- ei_r = btrfs_item_ptr(leaf_r,
- sctx->right_path->slots[0],
- struct btrfs_file_extent_item);
-
- /*
- * We may have found an extent item that has changed
- * only its disk_bytenr field and the corresponding
- * inode item was not updated. This case happens due to
- * very specific timings during relocation when a leaf
- * that contains file extent items is COWed while
- * relocation is ongoing and its in the stage where it
- * updates data pointers. So when this happens we can
- * safely ignore it since we know it's the same extent,
- * but just at different logical and physical locations
- * (when an extent is fully replaced with a new one, we
- * know the generation number must have changed too,
- * since snapshot creation implies committing the current
- * transaction, and the inode item must have been updated
- * as well).
- * This replacement of the disk_bytenr happens at
- * relocation.c:replace_file_extents() through
- * relocation.c:btrfs_reloc_cow_block().
- */
- if (btrfs_file_extent_generation(leaf_l, ei_l) ==
- btrfs_file_extent_generation(leaf_r, ei_r) &&
- btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_compression(leaf_l, ei_l) ==
- btrfs_file_extent_compression(leaf_r, ei_r) &&
- btrfs_file_extent_encryption(leaf_l, ei_l) ==
- btrfs_file_extent_encryption(leaf_r, ei_r) &&
- btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
- btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
- btrfs_file_extent_type(leaf_l, ei_l) ==
- btrfs_file_extent_type(leaf_r, ei_r) &&
- btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
- btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
- btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_offset(leaf_l, ei_l) ==
- btrfs_file_extent_offset(leaf_r, ei_r) &&
- btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_num_bytes(leaf_r, ei_r))
- return 0;
- }
-
- inconsistent_snapshot_error(sctx, result, "extent");
- return -EIO;
- }
+ /*
+ * We have found an extent item that changed without the inode item
+ * having changed. This can happen either after relocation (where the
+ * disk_bytenr of an extent item is replaced at
+ * relocation.c:replace_file_extents()) or after deduplication into a
+ * file in both the parent and send snapshots (where an extent item can
+ * get modified or replaced with a new one). Note that deduplication
+ * updates the inode item, but it only changes the iversion (sequence
+ * field in the inode item) of the inode, so if a file is deduplicated
+ * the same amount of times in both the parent and send snapshots, its
+ * iversion becames the same in both snapshots, whence the inode item is
+ * the same on both snapshots.
+ */
+ if (sctx->cur_ino != sctx->cmp_key->objectid)
+ return 0;
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result != BTRFS_COMPARE_TREE_DELETED)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3b8ae1a8f02d..e3adb714c04b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
- __TRANS_JOIN),
+ __TRANS_JOIN |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_UNBLOCKED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
- __TRANS_JOIN_NOLOCK),
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_COMPLETED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
- __TRANS_JOIN_NOLOCK),
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
};
void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -543,7 +546,8 @@ again:
ret = join_transaction(fs_info, type);
if (ret == -EBUSY) {
wait_current_trans(fs_info);
- if (unlikely(type == TRANS_ATTACH))
+ if (unlikely(type == TRANS_ATTACH ||
+ type == TRANS_JOIN_NOSTART))
ret = -ENOENT;
}
} while (ret == -EBUSY);
@@ -660,6 +664,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
}
/*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+ return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+ BTRFS_RESERVE_NO_FLUSH, true);
+}
+
+/*
* btrfs_attach_transaction() - catch the running transaction
*
* It is used when we want to commit the current the transaction, but
@@ -2037,6 +2051,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
}
} else {
spin_unlock(&fs_info->trans_lock);
+ /*
+ * The previous transaction was aborted and was already removed
+ * from the list of transactions at fs_info->trans_list. So we
+ * abort to prevent writing a new superblock that reflects a
+ * corrupt state (pointing to trees with unwritten nodes/leafs).
+ */
+ if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
+ ret = -EROFS;
+ goto cleanup_transaction;
+ }
}
extwriter_counter_dec(cur_trans, trans->type);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 527ea94b57d9..2c5a6f6e5bb0 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -94,11 +94,13 @@ struct btrfs_transaction {
#define __TRANS_JOIN (1U << 11)
#define __TRANS_JOIN_NOLOCK (1U << 12)
#define __TRANS_DUMMY (1U << 13)
+#define __TRANS_JOIN_NOSTART (1U << 14)
#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
#define TRANS_ATTACH (__TRANS_ATTACH)
#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
@@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
int min_factor);
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
diff --git a/fs/coredump.c b/fs/coredump.c
index e42e17e55bfd..b1ea7dfbd149 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -7,6 +7,7 @@
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/swap.h>
+#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pagemap.h>
@@ -187,11 +188,13 @@ put_exe_file:
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/
-static int format_corename(struct core_name *cn, struct coredump_params *cprm)
+static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+ size_t **argv, int *argc)
{
const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern;
int ispipe = (*pat_ptr == '|');
+ bool was_space = false;
int pid_in_pattern = 0;
int err = 0;
@@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
return -ENOMEM;
cn->corename[0] = '\0';
- if (ispipe)
+ if (ispipe) {
+ int argvs = sizeof(core_pattern) / 2;
+ (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
+ if (!(*argv))
+ return -ENOMEM;
+ (*argv)[(*argc)++] = 0;
++pat_ptr;
+ }
/* Repeat as long as we have more pattern to process and more output
space */
while (*pat_ptr) {
+ /*
+ * Split on spaces before doing template expansion so that
+ * %e and %E don't get split if they have spaces in them
+ */
+ if (ispipe) {
+ if (isspace(*pat_ptr)) {
+ was_space = true;
+ pat_ptr++;
+ continue;
+ } else if (was_space) {
+ was_space = false;
+ err = cn_printf(cn, "%c", '\0');
+ if (err)
+ return err;
+ (*argv)[(*argc)++] = cn->used;
+ }
+ }
if (*pat_ptr != '%') {
err = cn_printf(cn, "%c", *pat_ptr++);
} else {
@@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo)
struct cred *cred;
int retval = 0;
int ispipe;
+ size_t *argv = NULL;
+ int argc = 0;
struct files_struct *displaced;
/* require nonrelative corefile path and be extra careful */
bool need_suid_safe = false;
@@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo)
old_cred = override_creds(cred);
- ispipe = format_corename(&cn, &cprm);
+ ispipe = format_corename(&cn, &cprm, &argv, &argc);
if (ispipe) {
+ int argi;
int dump_count;
char **helper_argv;
struct subprocess_info *sub_info;
@@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo)
goto fail_dropcount;
}
- helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL);
+ helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
+ GFP_KERNEL);
if (!helper_argv) {
printk(KERN_WARNING "%s failed to allocate memory\n",
__func__);
goto fail_dropcount;
}
+ for (argi = 0; argi < argc; argi++)
+ helper_argv[argi] = cn.corename + argv[argi];
+ helper_argv[argi] = NULL;
retval = -ENOMEM;
sub_info = call_usermodehelper_setup(helper_argv[0],
@@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
retval = call_usermodehelper_exec(sub_info,
UMH_WAIT_EXEC);
- argv_free(helper_argv);
+ kfree(helper_argv);
if (retval) {
printk(KERN_INFO "Core dump to |%s pipe failed\n",
cn.corename);
@@ -766,6 +799,7 @@ fail_dropcount:
if (ispipe)
atomic_dec(&core_dump_count);
fail_unlock:
+ kfree(argv);
kfree(cn.corename);
coredump_finish(mm, core_dumped);
revert_creds(old_cred);
diff --git a/fs/dax.c b/fs/dax.c
index a237141d8787..b64964ef44f6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
- if (entry && dax_is_conflict(entry))
+ if (entry && !dax_is_conflict(entry))
dax_wake_entry(xas, entry, false);
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f8d46df8fa9e..3e58a6f697dd 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1653,19 +1653,12 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- u32 oldflags;
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
return -EPERM;
- oldflags = fi->i_flags;
-
- if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
- if (!capable(CAP_LINUX_IMMUTABLE))
- return -EPERM;
-
- fi->i_flags = iflags | (oldflags & ~mask);
+ fi->i_flags = iflags | (fi->i_flags & ~mask);
if (fi->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1770,7 +1763,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- u32 fsflags;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ u32 fsflags, old_fsflags;
u32 iflags;
int ret;
@@ -1794,8 +1788,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
inode_lock(inode);
+ old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
+ ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
+ if (ret)
+ goto out;
+
ret = f2fs_setflags_common(inode, iflags,
f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
+out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -2855,52 +2855,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags)
return iflags;
}
-static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
+static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
{
- struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct fsxattr fa;
- memset(&fa, 0, sizeof(struct fsxattr));
- fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags);
+ simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
- fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
- fi->i_projid);
-
- if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
- return -EFAULT;
- return 0;
+ fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
}
-static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
{
- /*
- * Project Quota ID state is only allowed to change from within the init
- * namespace. Enforce that restriction only if we are trying to change
- * the quota ID state. Everything else is allowed in user namespaces.
- */
- if (current_user_ns() == &init_user_ns)
- return 0;
+ struct inode *inode = file_inode(filp);
+ struct fsxattr fa;
- if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
- return -EINVAL;
-
- if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
- if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
- return -EINVAL;
- } else {
- if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
- return -EINVAL;
- }
+ f2fs_fill_fsxattr(inode, &fa);
+ if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
+ return -EFAULT;
return 0;
}
static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- struct fsxattr fa;
+ struct fsxattr fa, old_fa;
u32 iflags;
int err;
@@ -2923,9 +2903,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
return err;
inode_lock(inode);
- err = f2fs_ioctl_check_project(inode, &fa);
+
+ f2fs_fill_fsxattr(inode, &old_fa);
+ err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
if (err)
goto out;
+
err = f2fs_setflags_common(inode, iflags,
f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
if (err)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 6691f526fa40..8974672db78f 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -796,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
if (lfs_mode)
down_write(&fio.sbi->io_order_lock);
+ mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, false);
+ if (!mpage)
+ goto up_out;
+
+ fio.encrypted_page = mpage;
+
+ /* read source block in mpage */
+ if (!PageUptodate(mpage)) {
+ err = f2fs_submit_page_bio(&fio);
+ if (err) {
+ f2fs_put_page(mpage, 1);
+ goto up_out;
+ }
+ lock_page(mpage);
+ if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
+ !PageUptodate(mpage))) {
+ err = -EIO;
+ f2fs_put_page(mpage, 1);
+ goto up_out;
+ }
+ }
+
f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
&sum, CURSEG_COLD_DATA, NULL, false);
@@ -803,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
if (!fio.encrypted_page) {
err = -ENOMEM;
- goto recover_block;
- }
-
- mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
- fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
- if (mpage) {
- bool updated = false;
-
- if (PageUptodate(mpage)) {
- memcpy(page_address(fio.encrypted_page),
- page_address(mpage), PAGE_SIZE);
- updated = true;
- }
f2fs_put_page(mpage, 1);
- invalidate_mapping_pages(META_MAPPING(fio.sbi),
- fio.old_blkaddr, fio.old_blkaddr);
- if (updated)
- goto write_page;
- }
-
- err = f2fs_submit_page_bio(&fio);
- if (err)
- goto put_page_out;
-
- /* write page */
- lock_page(fio.encrypted_page);
-
- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
- err = -EIO;
- goto put_page_out;
- }
- if (unlikely(!PageUptodate(fio.encrypted_page))) {
- err = -EIO;
- goto put_page_out;
+ goto recover_block;
}
-write_page:
+ /* write target block */
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
+ memcpy(page_address(fio.encrypted_page),
+ page_address(mpage), PAGE_SIZE);
+ f2fs_put_page(mpage, 1);
+ invalidate_mapping_pages(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, fio.old_blkaddr);
+
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -871,11 +868,12 @@ write_page:
put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
recover_block:
- if (lfs_mode)
- up_write(&fio.sbi->io_order_lock);
if (err)
f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
true, true);
+up_out:
+ if (lfs_mode)
+ up_write(&fio.sbi->io_order_lock);
put_out:
f2fs_put_dnode(&dn);
out:
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 6de6cda44031..78a1b873e48a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2422,6 +2422,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
size_t crc_offset = 0;
__u32 crc = 0;
+ if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
+ f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
+ F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
+ return -EINVAL;
+ }
+
/* Check checksum_offset and crc in superblock */
if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
crc_offset = le32_to_cpu(raw_super->checksum_offset);
@@ -2429,26 +2435,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
offsetof(struct f2fs_super_block, crc)) {
f2fs_info(sbi, "Invalid SB checksum offset: %zu",
crc_offset);
- return 1;
+ return -EFSCORRUPTED;
}
crc = le32_to_cpu(raw_super->crc);
if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
- return 1;
+ return -EFSCORRUPTED;
}
}
- if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
- f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
- F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
- return 1;
- }
-
/* Currently, support only 4KB page cache size */
if (F2FS_BLKSIZE != PAGE_SIZE) {
f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
PAGE_SIZE);
- return 1;
+ return -EFSCORRUPTED;
}
/* Currently, support only 4KB block size */
@@ -2456,14 +2456,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
if (blocksize != F2FS_BLKSIZE) {
f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
blocksize);
- return 1;
+ return -EFSCORRUPTED;
}
/* check log blocks per segment */
if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
f2fs_info(sbi, "Invalid log blocks per segment (%u)",
le32_to_cpu(raw_super->log_blocks_per_seg));
- return 1;
+ return -EFSCORRUPTED;
}
/* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2473,7 +2473,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
F2FS_MIN_LOG_SECTOR_SIZE) {
f2fs_info(sbi, "Invalid log sectorsize (%u)",
le32_to_cpu(raw_super->log_sectorsize));
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->log_sectors_per_block) +
le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2481,7 +2481,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
le32_to_cpu(raw_super->log_sectors_per_block),
le32_to_cpu(raw_super->log_sectorsize));
- return 1;
+ return -EFSCORRUPTED;
}
segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2495,7 +2495,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
if (segment_count > F2FS_MAX_SEGMENT ||
segment_count < F2FS_MIN_SEGMENTS) {
f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
- return 1;
+ return -EFSCORRUPTED;
}
if (total_sections > segment_count ||
@@ -2503,25 +2503,25 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
segs_per_sec > segment_count || !segs_per_sec) {
f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
segment_count, total_sections, segs_per_sec);
- return 1;
+ return -EFSCORRUPTED;
}
if ((segment_count / segs_per_sec) < total_sections) {
f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
segment_count, segs_per_sec, total_sections);
- return 1;
+ return -EFSCORRUPTED;
}
if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
segment_count, le64_to_cpu(raw_super->block_count));
- return 1;
+ return -EFSCORRUPTED;
}
if (secs_per_zone > total_sections || !secs_per_zone) {
f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
secs_per_zone, total_sections);
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2531,7 +2531,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
le32_to_cpu(raw_super->extension_count),
raw_super->hot_ext_count,
F2FS_MAX_EXTENSION);
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->cp_payload) >
@@ -2539,7 +2539,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
f2fs_info(sbi, "Insane cp_payload (%u > %u)",
le32_to_cpu(raw_super->cp_payload),
blocks_per_seg - F2FS_CP_PACKS);
- return 1;
+ return -EFSCORRUPTED;
}
/* check reserved ino info */
@@ -2550,12 +2550,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
le32_to_cpu(raw_super->node_ino),
le32_to_cpu(raw_super->meta_ino),
le32_to_cpu(raw_super->root_ino));
- return 1;
+ return -EFSCORRUPTED;
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sbi, bh))
- return 1;
+ return -EFSCORRUPTED;
return 0;
}
@@ -2870,10 +2870,10 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
}
/* sanity checking of raw super */
- if (sanity_check_raw_super(sbi, bh)) {
+ err = sanity_check_raw_super(sbi, bh);
+ if (err) {
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
- err = -EFSCORRUPTED;
brelse(bh);
continue;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 79581b9bdebb..4df26ef2b2b1 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1002,11 +1002,16 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
unsigned copied, struct page *page,
struct iomap *iomap)
{
+ struct gfs2_trans *tr = current->journal_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
if (page && !gfs2_is_stuffed(ip))
gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+
+ if (tr->tr_num_buf_new)
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
gfs2_trans_end(sdp);
}
@@ -1099,8 +1104,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
tr = current->journal_info;
if (tr->tr_num_buf_new)
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- else
- gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
gfs2_trans_end(sdp);
}
@@ -1181,10 +1184,16 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
if (ip->i_qadata && ip->i_qadata->qa_qd_num)
gfs2_quota_unlock(ip);
+
+ if (unlikely(!written))
+ goto out_unlock;
+
if (iomap->flags & IOMAP_F_SIZE_CHANGED)
mark_inode_dirty(inode);
- gfs2_write_unlock(inode);
+ set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
+out_unlock:
+ gfs2_write_unlock(inode);
out:
return 0;
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 012bc0efb9d3..d542f1cf4428 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1838,6 +1838,7 @@ restart:
do {
struct sqe_submit *s = &req->submit;
const struct io_uring_sqe *sqe = s->sqe;
+ unsigned int flags = req->flags;
/* Ensure we clear previously set non-block flag */
req->rw.ki_flags &= ~IOCB_NOWAIT;
@@ -1883,7 +1884,7 @@ restart:
kfree(sqe);
/* req from defer and link list needn't decrease async cnt */
- if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
+ if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
goto out;
if (!async_list)
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 385f3aaa2448..90c830e3758e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int low_bucket = 0, bucket, high_bucket;
struct ocfs2_xattr_bucket *search;
- u32 last_hash;
u64 blkno, lower_blkno = 0;
search = ocfs2_xattr_bucket_new(inode);
@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
if (xh->xh_count)
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
- last_hash = le32_to_cpu(xe->xe_name_hash);
-
/* record lower_blkno which may be the insert place. */
lower_blkno = blkno;
diff --git a/fs/super.c b/fs/super.c
index 113c58f19425..5960578a4076 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -478,13 +478,10 @@ EXPORT_SYMBOL(generic_shutdown_super);
bool mount_capable(struct fs_context *fc)
{
- struct user_namespace *user_ns = fc->global ? &init_user_ns
- : fc->user_ns;
-
if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
return capable(CAP_SYS_ADMIN);
else
- return ns_capable(user_ns, CAP_SYS_ADMIN);
+ return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
}
/**
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 94c4f1de1922..77ff9f97bcda 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -278,7 +278,11 @@ xchk_da_btree_block_check_sibling(
/* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling)
xchk_da_set_corrupt(ds, level);
- xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
+ if (ds->state->altpath.blk[level].bp) {
+ xfs_trans_brelse(ds->dargs.trans,
+ ds->state->altpath.blk[level].bp);
+ ds->state->altpath.blk[level].bp = NULL;
+ }
out:
return error;
}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index a8a06bb78ea8..f5c955d35be4 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -272,6 +272,7 @@ xfs_bulkstat_to_bstat(
struct xfs_bstat *bs1,
const struct xfs_bulkstat *bstat)
{
+ /* memset is needed here because of padding holes in the structure. */
memset(bs1, 0, sizeof(struct xfs_bstat));
bs1->bs_ino = bstat->bs_ino;
bs1->bs_mode = bstat->bs_mode;
@@ -388,6 +389,8 @@ xfs_inumbers_to_inogrp(
struct xfs_inogrp *ig1,
const struct xfs_inumbers *ig)
{
+ /* memset is needed here because of padding holes in the structure. */
+ memset(ig1, 0, sizeof(struct xfs_inogrp));
ig1->xi_startino = ig->xi_startino;
ig1->xi_alloccount = ig->xi_alloccount;
ig1->xi_allocmask = ig->xi_allocmask;
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7a52be..e9f20b813a69 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
#include <linux/compiler.h>
#include <linux/log2.h>
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
- int order;
-
- size--;
- size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
- order = fls(size);
-#else
- order = fls64(size);
-#endif
- return order;
-}
-
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
*/
-#define get_order(n) \
-( \
- __builtin_constant_p(n) ? ( \
- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
- ilog2((n) - 1) - PAGE_SHIFT + 1) \
- ) : \
- __get_order(n) \
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ if (__builtin_constant_p(size)) {
+ if (!size)
+ return BITS_PER_LONG - PAGE_SHIFT;
+
+ if (size < (1UL << PAGE_SHIFT))
+ return 0;
+
+ return ilog2((size) - 1) - PAGE_SHIFT + 1;
+ }
+
+ size--;
+ size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+ return fls(size);
+#else
+ return fls64(size);
+#endif
+}
#endif /* __ASSEMBLY__ */
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 72d51d1e9dd9..5cf2c5dd8b1e 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -149,6 +149,8 @@ struct drm_client_buffer {
struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
+void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
int drm_client_modeset_create(struct drm_client_dev *client);
void drm_client_modeset_free(struct drm_client_dev *client);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 759d462d028b..f57eea0481e0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -853,6 +853,13 @@ struct drm_mode_config {
uint32_t preferred_depth, prefer_shadow;
/**
+ * @prefer_shadow_fbdev:
+ *
+ * Hint to framebuffer emulation to prefer shadow-fb rendering.
+ */
+ bool prefer_shadow_fbdev;
+
+ /**
* @quirk_addfb_prefer_xbgr_30bpp:
*
* Special hack for legacy ADDFB to keep nouveau userspace happy. Should
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 3c096c7a51dc..853a8f181394 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
/**
* devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
* @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
* @clks: pointer to the clk_bulk_data table of consumer
*
* Behaves the same as devm_clk_bulk_get() except where there is no clock
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 56b8e358af5c..997a530ff4e9 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2598,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
void *holder);
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
void *holder);
+extern struct block_device *bd_start_claiming(struct block_device *bdev,
+ void *holder);
+extern void bd_finish_claiming(struct block_device *bdev,
+ struct block_device *whole, void *holder);
+extern void bd_abort_claiming(struct block_device *bdev,
+ struct block_device *whole, void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
extern int __blkdev_reread_part(struct block_device *bdev);
extern int blkdev_reread_part(struct block_device *bdev);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 9ddcf50a3c59..a7f08fb0f865 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline void devm_gpiod_unhinge(struct device *dev,
@@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev,
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(descs);
}
static inline struct gpio_desc *__must_check
@@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline void devm_gpiod_put_array(struct device *dev,
@@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(descs);
}
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_input(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
@@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_get_array_value(unsigned int array_size,
@@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_get_raw_array_value(unsigned int array_size,
@@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
@@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
@@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_to_irq(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -EINVAL;
}
@@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
const char *name)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -EINVAL;
}
@@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
static inline int desc_to_gpio(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -EINVAL;
}
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index b8a08b2a10ca..7ef56dc18050 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline bool hmm_vma_range_done(struct hmm_range *range)
-{
- bool ret = hmm_range_valid(range);
-
- hmm_range_unregister(range);
- return ret;
-}
-
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline int hmm_vma_fault(struct hmm_mirror *mirror,
- struct hmm_range *range, bool block)
-{
- long ret;
-
- /*
- * With the old API the driver must set each individual entries with
- * the requested flags (valid, write, ...). So here we set the mask to
- * keep intact the entries provided by the driver and zero out the
- * default_flags.
- */
- range->default_flags = 0;
- range->pfn_flags_mask = -1UL;
-
- ret = hmm_range_register(range, mirror,
- range->start, range->end,
- PAGE_SHIFT);
- if (ret)
- return (int)ret;
-
- if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- /*
- * The mmap_sem was taken by driver we release it here and
- * returns -EAGAIN which correspond to mmap_sem have been
- * drop in the old API.
- */
- up_read(&range->vma->vm_mm->mmap_sem);
- return -EAGAIN;
- }
-
- ret = hmm_range_fault(range, block);
- if (ret <= 0) {
- if (ret == -EBUSY || !ret) {
- /* Same as above, drop mmap_sem to match old API. */
- up_read(&range->vma->vm_mm->mmap_sem);
- ret = -EBUSY;
- } else if (ret == -EAGAIN)
- ret = -EBUSY;
- hmm_range_unregister(range);
- return ret;
- }
- return 0;
-}
-
/* Below are for HMM internal use only! Not to be used by device driver! */
static inline void hmm_mm_init(struct mm_struct *mm)
{
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b2c1648f7e5d..5714fd35a83c 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -814,6 +814,7 @@ struct tee_client_device_id {
/**
* struct wmi_device_id - WMI device identifier
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @context: pointer to driver specific data
*/
struct wmi_device_id {
const char guid_string[UUID_STRING_LEN+1];
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 1dda31825ec4..71283739ffd2 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -32,6 +32,7 @@
#endif /* CONFIG_SPARSEMEM */
+#ifndef BUILD_VDSO32_64
/*
* page->flags layout:
*
@@ -76,20 +77,22 @@
#define LAST_CPUPID_SHIFT 0
#endif
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#ifdef CONFIG_KASAN_SW_TAGS
+#define KASAN_TAG_WIDTH 8
+#else
+#define KASAN_TAG_WIDTH 0
+#endif
+
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
#endif
-#ifdef CONFIG_KASAN_SW_TAGS
-#define KASAN_TAG_WIDTH 8
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
> BITS_PER_LONG - NR_PAGEFLAGS
-#error "KASAN: not enough bits in page flags for tag"
-#endif
-#else
-#define KASAN_TAG_WIDTH 0
+#error "Not enough bits in page flags"
#endif
/*
@@ -104,4 +107,5 @@
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
+#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index b848517da64c..f91cb8898ff0 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -152,6 +152,8 @@ enum pageflags {
PG_savepinned = PG_dirty,
/* Has a grant mapping of another (foreign) domain's page. */
PG_foreign = PG_owner_priv_1,
+ /* Remapped by swiotlb-xen. */
+ PG_xen_remapped = PG_owner_priv_1,
/* SLOB */
PG_slob_free = PG_private,
@@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
+PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
+ TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index c5f8a9f17063..4f225175cb91 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2647,7 +2647,9 @@ struct ib_client {
const union ib_gid *gid,
const struct sockaddr *addr,
void *client_data);
- struct list_head list;
+
+ refcount_t uses;
+ struct completion uses_zero;
u32 client_id;
/* kverbs are not required by the client */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0eeea520a853..e06c77d76463 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
/**
* rvt_qp_wqe_unreserve - clean reserved operation
* @qp - the rvt qp
- * @wqe - the send wqe
+ * @flags - send wqe flags
*
* This decrements the reserve use count.
*
@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
* the compiler does not juggle the order of the s_last
* ring index and the decrementing of s_reserved_used.
*/
-static inline void rvt_qp_wqe_unreserve(
- struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
{
- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
+ if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
atomic_dec(&qp->s_reserved_used);
/* insure no compiler re-order up to s_last change */
smp_mb__after_atomic();
@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
u32 byte_len, last;
int flags = wqe->wr.send_flags;
+ rvt_qp_wqe_unreserve(qp, flags);
rvt_put_qp_swqe(qp, wqe);
need_completion =
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 2d64b53f947c..9b87e1a1c646 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -115,7 +115,7 @@ struct fc_disc_port {
struct fc_lport *lp;
struct list_head peers;
struct work_struct rport_work;
- u32 port_id;
+ u32 port_id;
};
/**
@@ -155,14 +155,14 @@ struct fc_rport_operations {
*/
struct fc_rport_libfc_priv {
struct fc_lport *local_port;
- enum fc_rport_state rp_state;
+ enum fc_rport_state rp_state;
u16 flags;
#define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
#define FC_RP_FLAGS_RETRY (1 << 1)
#define FC_RP_STARTED (1 << 2)
#define FC_RP_FLAGS_CONF_REQ (1 << 3)
- unsigned int e_d_tov;
- unsigned int r_a_tov;
+ unsigned int e_d_tov;
+ unsigned int r_a_tov;
};
/**
@@ -191,24 +191,24 @@ struct fc_rport_priv {
struct fc_lport *local_port;
struct fc_rport *rport;
struct kref kref;
- enum fc_rport_state rp_state;
+ enum fc_rport_state rp_state;
struct fc_rport_identifiers ids;
u16 flags;
- u16 max_seq;
+ u16 max_seq;
u16 disc_id;
u16 maxframe_size;
- unsigned int retries;
- unsigned int major_retries;
- unsigned int e_d_tov;
- unsigned int r_a_tov;
- struct mutex rp_mutex;
+ unsigned int retries;
+ unsigned int major_retries;
+ unsigned int e_d_tov;
+ unsigned int r_a_tov;
+ struct mutex rp_mutex;
struct delayed_work retry_work;
- enum fc_rport_event event;
+ enum fc_rport_event event;
struct fc_rport_operations *ops;
- struct list_head peers;
- struct work_struct event_work;
+ struct list_head peers;
+ struct work_struct event_work;
u32 supported_classes;
- u16 prli_count;
+ u16 prli_count;
struct rcu_head rcu;
u16 sp_features;
u8 spp_type;
@@ -618,12 +618,12 @@ struct libfc_function_template {
* @disc_callback: Callback routine called when discovery completes
*/
struct fc_disc {
- unsigned char retry_count;
- unsigned char pending;
- unsigned char requested;
- unsigned short seq_count;
- unsigned char buf_len;
- u16 disc_id;
+ unsigned char retry_count;
+ unsigned char pending;
+ unsigned char requested;
+ unsigned short seq_count;
+ unsigned char buf_len;
+ u16 disc_id;
struct list_head rports;
void *priv;
@@ -697,7 +697,7 @@ struct fc_lport {
struct fc_rport_priv *ms_rdata;
struct fc_rport_priv *ptp_rdata;
void *scsi_priv;
- struct fc_disc disc;
+ struct fc_disc disc;
/* Virtual port information */
struct list_head vports;
@@ -715,7 +715,7 @@ struct fc_lport {
u8 retry_count;
/* Fabric information */
- u32 port_id;
+ u32 port_id;
u64 wwpn;
u64 wwnn;
unsigned int service_params;
@@ -743,11 +743,11 @@ struct fc_lport {
struct fc_ns_fts fcts;
/* Miscellaneous */
- struct mutex lp_mutex;
- struct list_head list;
+ struct mutex lp_mutex;
+ struct list_head list;
struct delayed_work retry_work;
void *prov[FC_FC4_PROV_SIZE];
- struct list_head lport_list;
+ struct list_head lport_list;
};
/**
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index dc14b52577f7..2568cb0627ec 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -229,6 +229,7 @@ struct fcoe_fcf {
* @vn_mac: VN_Node assigned MAC address for data
*/
struct fcoe_rport {
+ struct fc_rport_priv rdata;
unsigned long time;
u16 fcoe_len;
u16 flags;
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 2212adda8f77..64e92d56c6a8 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -2,7 +2,7 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM dma_fence
-#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_DMA_FENCE_H
#include <linux/tracepoint.h>
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index f3a12566bed0..6678cf8b235b 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -3,7 +3,7 @@
#define TRACE_SYSTEM napi
#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_NAPI_H_
+#define _TRACE_NAPI_H
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
@@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll,
#undef NO_DEV
-#endif /* _TRACE_NAPI_H_ */
+#endif /* _TRACE_NAPI_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index 60d0d8bd336d..0d1a9ebf55ba 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -2,7 +2,7 @@
#define TRACE_SYSTEM qdisc
#if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_QDISC_H_
+#define _TRACE_QDISC_H
#include <linux/skbuff.h>
#include <linux/netdevice.h>
@@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue,
__entry->txq_state, __entry->packets, __entry->skbaddr )
);
-#endif /* _TRACE_QDISC_H_ */
+#endif /* _TRACE_QDISC_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h
index 0818f6286110..971cd02d2daf 100644
--- a/include/trace/events/tegra_apb_dma.h
+++ b/include/trace/events/tegra_apb_dma.h
@@ -1,5 +1,5 @@
#if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_TEGRA_APM_DMA_H
+#define _TRACE_TEGRA_APB_DMA_H
#include <linux/tracepoint.h>
#include <linux/dmaengine.h>
@@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr,
TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq)
);
-#endif /* _TRACE_TEGRADMA_H */
+#endif /* _TRACE_TEGRA_APB_DMA_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index ba1b460c9944..237e36a280cb 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/*
- * Virtio-iommu definition v0.9
+ * Virtio-iommu definition v0.12
*
- * Copyright (C) 2018 Arm Ltd.
+ * Copyright (C) 2019 Arm Ltd.
*/
#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
#define _UAPI_LINUX_VIRTIO_IOMMU_H
@@ -11,26 +11,31 @@
/* Feature bits */
#define VIRTIO_IOMMU_F_INPUT_RANGE 0
-#define VIRTIO_IOMMU_F_DOMAIN_BITS 1
+#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1
#define VIRTIO_IOMMU_F_MAP_UNMAP 2
#define VIRTIO_IOMMU_F_BYPASS 3
#define VIRTIO_IOMMU_F_PROBE 4
+#define VIRTIO_IOMMU_F_MMIO 5
-struct virtio_iommu_range {
- __u64 start;
- __u64 end;
+struct virtio_iommu_range_64 {
+ __le64 start;
+ __le64 end;
+};
+
+struct virtio_iommu_range_32 {
+ __le32 start;
+ __le32 end;
};
struct virtio_iommu_config {
/* Supported page sizes */
- __u64 page_size_mask;
+ __le64 page_size_mask;
/* Supported IOVA range */
- struct virtio_iommu_range input_range;
+ struct virtio_iommu_range_64 input_range;
/* Max domain ID size */
- __u8 domain_bits;
- __u8 padding[3];
+ struct virtio_iommu_range_32 domain_range;
/* Probe buffer size */
- __u32 probe_size;
+ __le32 probe_size;
};
/* Request types */
@@ -49,6 +54,7 @@ struct virtio_iommu_config {
#define VIRTIO_IOMMU_S_RANGE 0x05
#define VIRTIO_IOMMU_S_NOENT 0x06
#define VIRTIO_IOMMU_S_FAULT 0x07
+#define VIRTIO_IOMMU_S_NOMEM 0x08
struct virtio_iommu_req_head {
__u8 type;
@@ -78,12 +84,10 @@ struct virtio_iommu_req_detach {
#define VIRTIO_IOMMU_MAP_F_READ (1 << 0)
#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1)
-#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2)
-#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3)
+#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2)
#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \
VIRTIO_IOMMU_MAP_F_WRITE | \
- VIRTIO_IOMMU_MAP_F_EXEC | \
VIRTIO_IOMMU_MAP_F_MMIO)
struct virtio_iommu_req_map {
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 4969817124a8..98b30c1613b2 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -109,6 +109,9 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
}
#endif
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long len);
+
/*
* xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
* @vma: VMA to map the pages into
diff --git a/kernel/Makefile b/kernel/Makefile
index a8d923b5481b..ef0d95a190b4 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -111,7 +111,6 @@ obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
obj-$(CONFIG_TORTURE_TEST) += torture.o
obj-$(CONFIG_HAS_IOMEM) += iomem.o
-obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_RSEQ) += rseq.o
obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index bfc0c17f2a3d..2bd410f934b3 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -243,8 +243,9 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
/* CMA can be used only in the context which permits sleeping */
if (cma && gfpflags_allow_blocking(gfp)) {
- align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
- page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN);
+ size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
+
+ page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
}
/* Fallback allocation of normal pages */
@@ -266,7 +267,8 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
*/
void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
{
- if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT))
+ if (!cma_release(dev_get_cma_area(dev), page,
+ PAGE_ALIGN(size) >> PAGE_SHIFT))
__free_pages(page, get_order(size));
}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 1f628e7ac709..b945239621d8 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
int ret;
if (!dev_is_dma_coherent(dev)) {
+ unsigned long pfn;
+
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
return -ENXIO;
- page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
- dma_addr));
+ /* If the PFN is not valid, we do not have a struct page */
+ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+ page = pfn_to_page(pfn);
} else {
page = virt_to_page(cpu_addr);
}
@@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
if (!dev_is_dma_coherent(dev)) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
return -ENXIO;
+
+ /* If the PFN is not valid, we do not have a struct page */
pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+ if (!pfn_valid(pfn))
+ return -ENXIO;
} else {
pfn = page_to_pfn(virt_to_page(cpu_addr));
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 4436158a6d30..5b4a5dcce8f8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -734,9 +734,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
autoreap = true;
}
- tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
- if (tsk->exit_state == EXIT_DEAD)
+ if (autoreap) {
+ tsk->exit_state = EXIT_DEAD;
list_add(&tsk->ptrace_entry, &dead);
+ }
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
diff --git a/kernel/signal.c b/kernel/signal.c
index 91b789dd6e72..e667be6907d7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -349,7 +349,7 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
* @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
* Group stop states are cleared and the group stop count is consumed if
* %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
- * stop, the appropriate %SIGNAL_* flags are set.
+ * stop, the appropriate `SIGNAL_*` flags are set.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
@@ -1885,6 +1885,7 @@ static void do_notify_pidfd(struct task_struct *task)
{
struct pid *pid;
+ WARN_ON(task->exit_state == 0);
pid = task_pid(task);
wake_up_all(&pid->wait_pidfd);
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 69ebf3c2f1b5..78af97163147 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
return 0;
+ /*
+ * Do not trace a function if it's filtered by set_graph_notrace.
+ * Make the index of ret stack negative to indicate that it should
+ * ignore further functions. But it needs its own ret stack entry
+ * to recover the original index in order to continue tracing after
+ * returning from the function.
+ */
if (ftrace_graph_notrace_addr(trace->func)) {
trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
/*
@@ -156,16 +163,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
return 0;
/*
- * Do not trace a function if it's filtered by set_graph_notrace.
- * Make the index of ret stack negative to indicate that it should
- * ignore further functions. But it needs its own ret stack entry
- * to recover the original index in order to continue tracing after
- * returning from the function.
- */
- if (ftrace_graph_notrace_addr(trace->func))
- return 1;
-
- /*
* Stop here if tracing_threshold is set. We only write function return
* events to the ring buffer.
*/
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fafba1a923b..7fa97a8b5717 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -106,7 +106,6 @@ endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
- default !(CLANG_VERSION < 90000)
depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
@@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE
Disabling asan-stack makes it safe to run kernels build
with clang-8 with KASAN enabled, though it loses some of
the functionality.
- This feature is always disabled when compile-testing with clang-8
- or earlier to avoid cluttering the output in stack overflow
- warnings, but clang-8 users can still enable it for builds without
- CONFIG_COMPILE_TEST. On gcc and later clang versions it is
- assumed to always be safe to use and enabled by default.
+ This feature is always disabled when compile-testing with clang
+ to avoid cluttering the output in stack overflow warnings,
+ but clang users can still enable it for builds without
+ CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
+ to use and enabled by default.
config KASAN_STACK
int
diff --git a/lib/Makefile b/lib/Makefile
index 095601ce371d..29c02a924973 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -279,7 +279,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+KASAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 62d19f270cad..9729f271d150 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -222,7 +222,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor,
* Copy the buffer to check that it's not wiped on
* free().
*/
- buf_copy = kmalloc(size, GFP_KERNEL);
+ buf_copy = kmalloc(size, GFP_ATOMIC);
if (buf_copy)
memcpy(buf_copy, buf, size);
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 2d1c1f241fd9..e630e7ff57f1 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -51,7 +51,7 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
ns = vdso_ts->nsec;
last = vd->cycle_last;
if (unlikely((s64)cycles < 0))
- return clock_gettime_fallback(clk, ts);
+ return -1;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
ns >>= vd->shift;
@@ -82,14 +82,14 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
}
static __maybe_unused int
-__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
{
const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
- goto fallback;
+ return -1;
/*
* Convert the clockid to a bitmask and use it to check which
@@ -104,9 +104,17 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
} else if (msk & VDSO_RAW) {
return do_hres(&vd[CS_RAW], clock, ts);
}
+ return -1;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ int ret = __cvdso_clock_gettime_common(clock, ts);
-fallback:
- return clock_gettime_fallback(clock, ts);
+ if (unlikely(ret))
+ return clock_gettime_fallback(clock, ts);
+ return 0;
}
static __maybe_unused int
@@ -115,20 +123,21 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
struct __kernel_timespec ts;
int ret;
- if (res == NULL)
- goto fallback;
+ ret = __cvdso_clock_gettime_common(clock, &ts);
- ret = __cvdso_clock_gettime(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+ if (unlikely(ret))
+ return clock_gettime32_fallback(clock, res);
+#else
+ if (unlikely(ret))
+ ret = clock_gettime_fallback(clock, &ts);
+#endif
- if (ret == 0) {
+ if (likely(!ret)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
-
return ret;
-
-fallback:
- return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
}
static __maybe_unused int
@@ -169,17 +178,18 @@ static __maybe_unused time_t __cvdso_time(time_t *time)
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
{
const struct vdso_data *vd = __arch_get_vdso_data();
- u64 ns;
+ u64 hrtimer_res;
u32 msk;
- u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+ u64 ns;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
- goto fallback;
+ return -1;
+ hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
/*
* Convert the clockid to a bitmask and use it to check which
* clocks are handled in the VDSO directly.
@@ -201,18 +211,22 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
*/
ns = hrtimer_res;
} else {
- goto fallback;
+ return -1;
}
- if (res) {
- res->tv_sec = 0;
- res->tv_nsec = ns;
- }
+ res->tv_sec = 0;
+ res->tv_nsec = ns;
return 0;
+}
+
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ int ret = __cvdso_clock_getres_common(clock, res);
-fallback:
- return clock_getres_fallback(clock, res);
+ if (unlikely(ret))
+ return clock_getres_fallback(clock, res);
+ return 0;
}
static __maybe_unused int
@@ -221,19 +235,20 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
struct __kernel_timespec ts;
int ret;
- if (res == NULL)
- goto fallback;
+ ret = __cvdso_clock_getres_common(clock, &ts);
- ret = __cvdso_clock_getres(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+ if (unlikely(ret))
+ return clock_getres32_fallback(clock, res);
+#else
+ if (unlikely(ret))
+ ret = clock_getres_fallback(clock, &ts);
+#endif
- if (ret == 0) {
+ if (likely(!ret)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
-
return ret;
-
-fallback:
- return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
}
#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/mm/Makefile b/mm/Makefile
index 338e528ad436..d0b295c3b764 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -102,5 +102,6 @@ obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
+obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 83a7b614061f..798275a51887 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
* memory corruption is possible and we should stop execution.
*/
BUG_ON(!trylock_page(page));
- list_del(&page->lru);
balloon_page_insert(b_dev_info, page);
unlock_page(page);
__count_vm_event(BALLOON_INFLATE);
@@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
* @b_dev_info: balloon device descriptor where we will insert a new page to
* @pages: pages to enqueue - allocated using balloon_page_alloc.
*
- * Driver must call it to properly enqueue a balloon pages before definitively
- * removing it from the guest system.
+ * Driver must call this function to properly enqueue balloon pages before
+ * definitively removing them from the guest system.
*
* Return: number of pages that were enqueued.
*/
@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) {
+ list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page);
n_pages++;
}
@@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
* @n_req_pages: number of requested pages.
*
* Driver must call this function to properly de-allocate a previous enlisted
- * balloon pages before definetively releasing it back to the guest system.
+ * balloon pages before definitively releasing it back to the guest system.
* This function tries to remove @n_req_pages from the ballooned pages and
* return them to the caller in the @pages list.
*
- * Note that this function may fail to dequeue some pages temporarily empty due
- * to compaction isolated pages.
+ * Note that this function may fail to dequeue some pages even if the balloon
+ * isn't empty - since the page list can be temporarily empty due to compaction
+ * of isolated pages.
*
* Return: number of pages that were added to the @pages list.
*/
@@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
/*
* balloon_page_alloc - allocates a new page for insertion into the balloon
- * page list.
+ * page list.
+ *
+ * Driver must call this function to properly allocate a new balloon page.
+ * Driver must call balloon_page_enqueue before definitively removing the page
+ * from the guest system.
*
- * Driver must call it to properly allocate a new enlisted balloon page.
- * Driver must call balloon_page_enqueue before definitively removing it from
- * the guest system. This function returns the page address for the recently
- * allocated page or NULL in the case we fail to allocate a new page this turn.
+ * Return: struct page for the allocated page or NULL on allocation failure.
*/
struct page *balloon_page_alloc(void)
{
@@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void)
EXPORT_SYMBOL_GPL(balloon_page_alloc);
/*
- * balloon_page_enqueue - allocates a new page and inserts it into the balloon
- * page list.
- * @b_dev_info: balloon device descriptor where we will insert a new page to
+ * balloon_page_enqueue - inserts a new page into the balloon page list.
+ *
+ * @b_dev_info: balloon device descriptor where we will insert a new page
* @page: new page to enqueue - allocated using balloon_page_alloc.
*
- * Driver must call it to properly enqueue a new allocated balloon page
- * before definitively removing it from the guest system.
- * This function returns the page address for the recently enqueued page or
- * NULL in the case we fail to allocate a new page this turn.
+ * Drivers must call this function to properly enqueue a new allocated balloon
+ * page before definitively removing the page from the guest system.
+ *
+ * Drivers must not call balloon_page_enqueue on pages that have been pushed to
+ * a list with balloon_page_push before removing them with balloon_page_pop. To
+ * enqueue a list of pages, use balloon_page_list_enqueue instead.
*/
void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
struct page *page)
@@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue);
/*
* balloon_page_dequeue - removes a page from balloon's page list and returns
- * the its address to allow the driver release the page.
+ * its address to allow the driver to release the page.
* @b_dev_info: balloon device decriptor where we will grab a page from.
*
- * Driver must call it to properly de-allocate a previous enlisted balloon page
- * before definetively releasing it back to the guest system.
- * This function returns the page address for the recently dequeued page or
- * NULL in the case we find balloon's page list temporarily empty due to
- * compaction isolated pages.
+ * Driver must call this function to properly dequeue a previously enqueued page
+ * before definitively releasing it back to the guest system.
+ *
+ * Caller must perform its own accounting to ensure that this
+ * function is called only if some pages are actually enqueued.
+ *
+ * Note that this function may fail to dequeue some pages even if there are
+ * some enqueued pages - since the page list can be temporarily empty due to
+ * the compaction of isolated pages.
+ *
+ * TODO: remove the caller accounting requirements, and allow caller to wait
+ * until all pages can be dequeued.
+ *
+ * Return: struct page for the dequeued page, or NULL if no page was dequeued.
*/
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
{
@@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
if (n_pages != 1) {
/*
* If we are unable to dequeue a balloon page because the page
- * list is empty and there is no isolated pages, then something
+ * list is empty and there are no isolated pages, then something
* went out of track and some balloon pages are lost.
- * BUG() here, otherwise the balloon driver may get stuck into
+ * BUG() here, otherwise the balloon driver may get stuck in
* an infinite loop while attempting to release all its pages.
*/
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
@@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping,
/*
* We can not easily support the no copy case here so ignore it as it
- * is unlikely to be use with ballon pages. See include/linux/hmm.h for
- * user of the MIGRATE_SYNC_NO_COPY mode.
+ * is unlikely to be used with balloon pages. See include/linux/hmm.h
+ * for a user of the MIGRATE_SYNC_NO_COPY mode.
*/
if (mode == MIGRATE_SYNC_NO_COPY)
return -EINVAL;
diff --git a/mm/compaction.c b/mm/compaction.c
index 9e1b9acb116b..952dc2fb24e5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -842,13 +842,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/*
* Periodically drop the lock (if held) regardless of its
- * contention, to give chance to IRQs. Abort async compaction
- * if contended.
+ * contention, to give chance to IRQs. Abort completely if
+ * a fatal signal is pending.
*/
if (!(low_pfn % SWAP_CLUSTER_MAX)
&& compact_unlock_should_abort(&pgdat->lru_lock,
- flags, &locked, cc))
- break;
+ flags, &locked, cc)) {
+ low_pfn = 0;
+ goto fatal_pending;
+ }
if (!pfn_valid_within(low_pfn))
goto isolate_fail;
@@ -1060,6 +1062,7 @@ isolate_abort:
trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
nr_scanned, nr_isolated);
+fatal_pending:
cc->total_migrate_scanned += nr_scanned;
if (nr_isolated)
count_compact_events(COMPACTISOLATED, nr_isolated);
diff --git a/mm/hmm.c b/mm/hmm.c
index e1eedef129cf..16b6731a34db 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
* @range: range
* Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
* permission (for instance asking for write and range is read only),
- * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
+ * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
* vma or it is illegal to access that range), number of valid pages
* in range->pfns[] (from range start address).
*
@@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range)
do {
/* If range is no longer valid force retry. */
if (!range->valid)
- return -EAGAIN;
+ return -EBUSY;
vma = find_vma(hmm->mm, start);
if (vma == NULL || (vma->vm_flags & device_vma))
@@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
do {
/* If range is no longer valid force retry. */
- if (!range->valid) {
- up_read(&hmm->mm->mmap_sem);
- return -EAGAIN;
- }
+ if (!range->valid)
+ return -EBUSY;
vma = find_vma(hmm->mm, start);
if (vma == NULL || (vma->vm_flags & device_vma))
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index dbbd518fb6b3..6e9e8cca663e 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -114,7 +114,7 @@
/* GFP bitmask for kmemleak internal allocations */
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \
- __GFP_NOWARN | __GFP_NOFAIL)
+ __GFP_NOWARN)
/* scanning area inside a memory block */
struct kmemleak_scan_area {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2a9bbddb0e55..c73f09913165 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -132,7 +132,6 @@ static void release_memory_resource(struct resource *res)
return;
release_resource(res);
kfree(res);
- return;
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
@@ -979,7 +978,6 @@ static void rollback_node_hotadd(int nid)
arch_refresh_nodedata(nid, NULL);
free_percpu(pgdat->per_cpu_nodestats);
arch_free_nodedata(pgdat);
- return;
}
diff --git a/kernel/memremap.c b/mm/memremap.c
index 6ee03a816d67..6ee03a816d67 100644
--- a/kernel/memremap.c
+++ b/mm/memremap.c
diff --git a/mm/migrate.c b/mm/migrate.c
index 8992741f10aa..a42858d8e00b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -767,12 +767,12 @@ recheck_buffers:
}
bh = bh->b_this_page;
} while (bh != head);
- spin_unlock(&mapping->private_lock);
if (busy) {
if (invalidated) {
rc = -EAGAIN;
goto unlock_buffers;
}
+ spin_unlock(&mapping->private_lock);
invalidate_bh_lrus();
invalidated = true;
goto recheck_buffers;
@@ -805,6 +805,8 @@ recheck_buffers:
rc = MIGRATEPAGE_SUCCESS;
unlock_buffers:
+ if (check_refs)
+ spin_unlock(&mapping->private_lock);
bh = head;
do {
unlock_buffer(bh);
@@ -2338,16 +2340,13 @@ next:
static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- struct mm_walk mm_walk;
-
- mm_walk.pmd_entry = migrate_vma_collect_pmd;
- mm_walk.pte_entry = NULL;
- mm_walk.pte_hole = migrate_vma_collect_hole;
- mm_walk.hugetlb_entry = NULL;
- mm_walk.test_walk = NULL;
- mm_walk.vma = migrate->vma;
- mm_walk.mm = migrate->vma->vm_mm;
- mm_walk.private = migrate;
+ struct mm_walk mm_walk = {
+ .pmd_entry = migrate_vma_collect_pmd,
+ .pte_hole = migrate_vma_collect_hole,
+ .vma = migrate->vma,
+ .mm = migrate->vma->vm_mm,
+ .private = migrate,
+ };
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
migrate->start,
diff --git a/mm/slub.c b/mm/slub.c
index e6c030e47364..8834563cdb4b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1432,7 +1432,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void *old_tail = *tail ? *tail : *head;
int rsize;
- if (slab_want_init_on_free(s))
+ if (slab_want_init_on_free(s)) {
+ void *p = NULL;
+
do {
object = next;
next = get_freepointer(s, object);
@@ -1445,8 +1447,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
: 0;
memset((char *)object + s->inuse, 0,
s->size - s->inuse - rsize);
- set_freepointer(s, object, next);
+ set_freepointer(s, object, p);
+ p = object;
} while (object != old_tail);
+ }
/*
* Compiler cannot detect this function can be removed if slab_free_hook()
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 44df66a98f2a..dbdc46a84f63 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -699,7 +699,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
unsigned long ret, freed = 0;
struct shrinker *shrinker;
- if (!mem_cgroup_is_root(memcg))
+ /*
+ * The root memcg might be allocated even though memcg is disabled
+ * via "cgroup_disable=memory" boot parameter. This could make
+ * mem_cgroup_is_root() return false, then just run memcg slab
+ * shrink, but skip global shrink. This may result in premature
+ * oom.
+ */
+ if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
if (!down_read_trylock(&shrinker_rwsem))
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 624ccc6ac744..f8efaa9f647c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -272,6 +272,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
return v;
}
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
+
/*
* Initialize a policy database structure.
*/
@@ -319,8 +321,10 @@ static int policydb_init(struct policydb *p)
out:
hashtab_destroy(p->filename_trans);
hashtab_destroy(p->range_tr);
- for (i = 0; i < SYM_NUM; i++)
+ for (i = 0; i < SYM_NUM; i++) {
+ hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
hashtab_destroy(p->symtab[i].table);
+ }
return rc;
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 12dd9b318db1..703857aab00f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1873,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
if (!to_check)
break; /* all drained */
init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream);
if (runtime->no_period_wakeup)
@@ -1885,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
}
tout = msecs_to_jiffies(tout * 1000);
}
- tout = schedule_timeout_interruptible(tout);
+ tout = schedule_timeout(tout);
snd_pcm_stream_lock_irq(substream);
group = snd_pcm_stream_group_ref(substream);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 1192c7561d62..3c2db3816029 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
if (!acomp)
return -ENODEV;
if (!acomp->ops) {
- request_module("i915");
- /* 60s timeout */
- wait_for_completion_timeout(&bind_complete,
- msecs_to_jiffies(60 * 1000));
+ if (!IS_ENABLED(CONFIG_MODULES) ||
+ !request_module("i915")) {
+ /* 60s timeout */
+ wait_for_completion_timeout(&bind_complete,
+ msecs_to_jiffies(60 * 1000));
+ }
}
if (!acomp->ops) {
dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 71d5f540334a..4c12cc5b53fd 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -72,7 +72,7 @@ int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe)
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(dev, pipe);
- if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
+ if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
return -EINVAL;
return 0;
}
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 4602464ebdfb..a4217c1a5d01 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -214,6 +214,18 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
+ /* Higher values mean better protection. */
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
+ /* Higher values mean better protection. */
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index d819a3e8b552..9a507716ae2f 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -229,6 +229,16 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
/* SVE registers */
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
diff --git a/tools/arch/powerpc/include/uapi/asm/mman.h b/tools/arch/powerpc/include/uapi/asm/mman.h
index f33105bc5ca6..8601d824a9c6 100644
--- a/tools/arch/powerpc/include/uapi/asm/mman.h
+++ b/tools/arch/powerpc/include/uapi/asm/mman.h
@@ -4,12 +4,8 @@
#define MAP_DENYWRITE 0x0800
#define MAP_EXECUTABLE 0x1000
#define MAP_GROWSDOWN 0x0100
-#define MAP_HUGETLB 0x40000
#define MAP_LOCKED 0x80
-#define MAP_NONBLOCK 0x10000
#define MAP_NORESERVE 0x40
-#define MAP_POPULATE 0x8000
-#define MAP_STACK 0x20000
#include <uapi/asm-generic/mman-common.h>
/* MAP_32BIT is undefined on powerpc, fix it for perf */
#define MAP_32BIT 0
diff --git a/tools/arch/sparc/include/uapi/asm/mman.h b/tools/arch/sparc/include/uapi/asm/mman.h
index 38920eed8cbf..7b94dccc843d 100644
--- a/tools/arch/sparc/include/uapi/asm/mman.h
+++ b/tools/arch/sparc/include/uapi/asm/mman.h
@@ -4,12 +4,8 @@
#define MAP_DENYWRITE 0x0800
#define MAP_EXECUTABLE 0x1000
#define MAP_GROWSDOWN 0x0200
-#define MAP_HUGETLB 0x40000
#define MAP_LOCKED 0x100
-#define MAP_NONBLOCK 0x10000
#define MAP_NORESERVE 0x40
-#define MAP_POPULATE 0x8000
-#define MAP_STACK 0x20000
#include <uapi/asm-generic/mman-common.h>
/* MAP_32BIT is undefined on sparc, fix it for perf */
#define MAP_32BIT 0
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index d6ab5b4d15e5..503d3f42da16 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -378,10 +378,11 @@ struct kvm_sync_regs {
struct kvm_vcpu_events events;
};
-#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
-#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
-#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
-#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
+#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
+#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */
@@ -432,4 +433,17 @@ struct kvm_nested_state {
} data;
};
+/* for KVM_CAP_PMU_EVENT_FILTER */
+struct kvm_pmu_event_filter {
+ __u32 action;
+ __u32 nevents;
+ __u32 fixed_counter_bitmap;
+ __u32 flags;
+ __u32 pad[4];
+ __u64 events[0];
+};
+
+#define KVM_PMU_EVENT_ALLOW 0
+#define KVM_PMU_EVENT_DENY 1
+
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index d213ec5c3766..f0b0c90dd398 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -146,7 +146,6 @@
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
-#define VMX_ABORT_VMCS_CORRUPTED 3
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index abd238d0f7a4..63b1f506ea67 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -19,15 +19,18 @@
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
-#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
-# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
-#else
-# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
-#endif
-/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
+/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
+#define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x010000 /* do not block on IO */
+#define MAP_STACK 0x020000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x040000 /* create a huge page mapping */
+#define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
+#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
+ * uninitialized */
+
/*
* Flags for mlock
*/
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 36c197fc44a0..406f7718f9ad 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -9,13 +9,11 @@
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
-#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
-/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
+/*
+ * Bits [26:31] are reserved, see asm-generic/hugetlb_encode.h
+ * for MAP_HUGETLB usage
+ */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index a87904daf103..1be0e798e362 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -844,9 +844,15 @@ __SYSCALL(__NR_fsconfig, sys_fsconfig)
__SYSCALL(__NR_fsmount, sys_fsmount)
#define __NR_fspick 433
__SYSCALL(__NR_fspick, sys_fspick)
+#define __NR_pidfd_open 434
+__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+#ifdef __ARCH_WANT_SYS_CLONE3
+#define __NR_clone3 435
+__SYSCALL(__NR_clone3, sys_clone3)
+#endif
#undef __NR_syscalls
-#define __NR_syscalls 434
+#define __NR_syscalls 436
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 661d73f9a919..8a5b2f8f8eb9 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -50,6 +50,7 @@ typedef unsigned int drm_handle_t;
#else /* One of the BSDs */
+#include <stdint.h>
#include <sys/ioccom.h>
#include <sys/types.h>
typedef int8_t __s8;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 3a73f5316766..328d05e77d9f 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -136,6 +136,8 @@ enum drm_i915_gem_engine_class {
struct i915_engine_class_instance {
__u16 engine_class; /* see enum drm_i915_gem_engine_class */
__u16 engine_instance;
+#define I915_ENGINE_CLASS_INVALID_NONE -1
+#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
};
/**
@@ -355,6 +357,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39
+#define DRM_I915_GEM_VM_CREATE 0x3a
+#define DRM_I915_GEM_VM_DESTROY 0x3b
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -415,6 +419,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
+#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -598,6 +604,12 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_MMAP_GTT_COHERENT 52
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
+ * execution through use of explicit fence support.
+ * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
+ */
+#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -1120,7 +1132,16 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_ARRAY (1<<19)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
+/*
+ * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_SUBMIT (1 << 20)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1464,8 +1485,9 @@ struct drm_i915_gem_context_create_ext {
__u32 ctx_id; /* output: id of new context*/
__u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
- (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+ (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
__u64 extensions;
};
@@ -1507,6 +1529,41 @@ struct drm_i915_gem_context_param {
* On creation, all new contexts are marked as recoverable.
*/
#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+
+ /*
+ * The id of the associated virtual memory address space (ppGTT) of
+ * this context. Can be retrieved and passed to another context
+ * (on the same fd) for both to use the same ppGTT and so share
+ * address layouts, and avoid reloading the page tables on context
+ * switches between themselves.
+ *
+ * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
+ */
+#define I915_CONTEXT_PARAM_VM 0x9
+
+/*
+ * I915_CONTEXT_PARAM_ENGINES:
+ *
+ * Bind this context to operate on this subset of available engines. Henceforth,
+ * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
+ * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
+ * and upwards. Slots 0...N are filled in using the specified (class, instance).
+ * Use
+ * engine_class: I915_ENGINE_CLASS_INVALID,
+ * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
+ * to specify a gap in the array that can be filled in later, e.g. by a
+ * virtual engine used for load balancing.
+ *
+ * Setting the number of engines bound to the context to 0, by passing a zero
+ * sized argument, will revert back to default settings.
+ *
+ * See struct i915_context_param_engines.
+ *
+ * Extensions:
+ * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
+ * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ */
+#define I915_CONTEXT_PARAM_ENGINES 0xa
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -1540,9 +1597,10 @@ struct drm_i915_gem_context_param_sseu {
struct i915_engine_class_instance engine;
/*
- * Unused for now. Must be cleared to zero.
+ * Unknown flags must be cleared to zero.
*/
__u32 flags;
+#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
/*
* Mask of slices to enable for the context. Valid values are a subset
@@ -1570,12 +1628,115 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+/*
+ * i915_context_engines_load_balance:
+ *
+ * Enable load balancing across this set of engines.
+ *
+ * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
+ * used will proxy the execbuffer request onto one of the set of engines
+ * in such a way as to distribute the load evenly across the set.
+ *
+ * The set of engines must be compatible (e.g. the same HW class) as they
+ * will share the same logical GPU context and ring.
+ *
+ * To intermix rendering with the virtual engine and direct rendering onto
+ * the backing engines (bypassing the load balancing proxy), the context must
+ * be defined to use a single timeline for all engines.
+ */
+struct i915_context_engines_load_balance {
+ struct i915_user_extension base;
+
+ __u16 engine_index;
+ __u16 num_siblings;
+ __u32 flags; /* all undefined flags must be zero */
+
+ __u64 mbz64; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 num_siblings; \
+ __u32 flags; \
+ __u64 mbz64; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/*
+ * i915_context_engines_bond:
+ *
+ * Constructed bonded pairs for execution within a virtual engine.
+ *
+ * All engines are equal, but some are more equal than others. Given
+ * the distribution of resources in the HW, it may be preferable to run
+ * a request on a given subset of engines in parallel to a request on a
+ * specific engine. We enable this selection of engines within a virtual
+ * engine by specifying bonding pairs, for any given master engine we will
+ * only execute on one of the corresponding siblings within the virtual engine.
+ *
+ * To execute a request in parallel on the master engine and a sibling requires
+ * coordination with a I915_EXEC_FENCE_SUBMIT.
+ */
+struct i915_context_engines_bond {
+ struct i915_user_extension base;
+
+ struct i915_engine_class_instance master;
+
+ __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
+ __u16 num_bonds;
+
+ __u64 flags; /* all undefined flags must be zero */
+ __u64 mbz64[4]; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
+ struct i915_user_extension base; \
+ struct i915_engine_class_instance master; \
+ __u16 virtual_index; \
+ __u16 num_bonds; \
+ __u64 flags; \
+ __u64 mbz64[4]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+struct i915_context_param_engines {
+ __u64 extensions; /* linked chain of extension blocks, 0 terminates */
+#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
+#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
+ __u64 extensions; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
struct drm_i915_gem_context_create_ext_setparam {
#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
struct i915_user_extension base;
struct drm_i915_gem_context_param param;
};
+struct drm_i915_gem_context_create_ext_clone {
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
+ struct i915_user_extension base;
+ __u32 clone_id;
+ __u32 flags;
+#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
+#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
+#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
+#define I915_CONTEXT_CLONE_SSEU (1u << 3)
+#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
+#define I915_CONTEXT_CLONE_VM (1u << 5)
+#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
+ __u64 rsvd;
+};
+
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
@@ -1821,6 +1982,7 @@ struct drm_i915_perf_oa_config {
struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
/* Must be kept compact -- no holes and well documented */
/*
@@ -1919,6 +2081,47 @@ struct drm_i915_query_topology_info {
__u8 data[];
};
+/**
+ * struct drm_i915_engine_info
+ *
+ * Describes one engine and it's capabilities as known to the driver.
+ */
+struct drm_i915_engine_info {
+ /** Engine class and instance. */
+ struct i915_engine_class_instance engine;
+
+ /** Reserved field. */
+ __u32 rsvd0;
+
+ /** Engine flags. */
+ __u64 flags;
+
+ /** Capabilities of this engine. */
+ __u64 capabilities;
+#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
+#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+
+ /** Reserved fields. */
+ __u64 rsvd1[4];
+};
+
+/**
+ * struct drm_i915_query_engine_info
+ *
+ * Engine info query enumerates all engines known to the driver by filling in
+ * an array of struct drm_i915_engine_info structures.
+ */
+struct drm_i915_query_engine_info {
+ /** Number of struct drm_i915_engine_info structs following. */
+ __u32 num_engines;
+
+ /** MBZ */
+ __u32 rsvd[3];
+
+ /** Marker for drm_i915_engine_info structures. */
+ struct drm_i915_engine_info engines[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 7d113a9602f0..4a8c02cafa9a 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -695,6 +695,7 @@ enum {
IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
+ IFLA_VF_BROADCAST, /* VF broadcast */
__IFLA_VF_MAX,
};
@@ -705,6 +706,10 @@ struct ifla_vf_mac {
__u8 mac[32]; /* MAX_ADDR_LEN */
};
+struct ifla_vf_broadcast {
+ __u8 broadcast[32];
+};
+
struct ifla_vf_vlan {
__u32 vf;
__u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index e7c67be7c15f..5e3f12d5359e 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -995,6 +995,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_SVE 170
#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
+#define KVM_CAP_PMU_EVENT_FILTER 173
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1329,6 +1330,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
+/* Available with KVM_CAP_PMU_EVENT_FILTER */
+#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h
index ed4ee170bee2..b3105ac1381a 100644
--- a/tools/include/uapi/linux/sched.h
+++ b/tools/include/uapi/linux/sched.h
@@ -2,6 +2,8 @@
#ifndef _UAPI_LINUX_SCHED_H
#define _UAPI_LINUX_SCHED_H
+#include <linux/types.h>
+
/*
* cloning flags:
*/
@@ -32,6 +34,20 @@
#define CLONE_IO 0x80000000 /* Clone io context */
/*
+ * Arguments for the clone3 syscall
+ */
+struct clone_args {
+ __aligned_u64 flags;
+ __aligned_u64 pidfd;
+ __aligned_u64 child_tid;
+ __aligned_u64 parent_tid;
+ __aligned_u64 exit_signal;
+ __aligned_u64 stack;
+ __aligned_u64 stack_size;
+ __aligned_u64 tls;
+};
+
+/*
* Scheduling policies
*/
#define SCHED_NORMAL 0
@@ -51,9 +67,21 @@
#define SCHED_FLAG_RESET_ON_FORK 0x01
#define SCHED_FLAG_RECLAIM 0x02
#define SCHED_FLAG_DL_OVERRUN 0x04
+#define SCHED_FLAG_KEEP_POLICY 0x08
+#define SCHED_FLAG_KEEP_PARAMS 0x10
+#define SCHED_FLAG_UTIL_CLAMP_MIN 0x20
+#define SCHED_FLAG_UTIL_CLAMP_MAX 0x40
+
+#define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \
+ SCHED_FLAG_KEEP_PARAMS)
+
+#define SCHED_FLAG_UTIL_CLAMP (SCHED_FLAG_UTIL_CLAMP_MIN | \
+ SCHED_FLAG_UTIL_CLAMP_MAX)
#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
SCHED_FLAG_RECLAIM | \
- SCHED_FLAG_DL_OVERRUN)
+ SCHED_FLAG_DL_OVERRUN | \
+ SCHED_FLAG_KEEP_ALL | \
+ SCHED_FLAG_UTIL_CLAMP)
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h
index 964e87217be4..78efe870c2b7 100644
--- a/tools/include/uapi/linux/usbdevice_fs.h
+++ b/tools/include/uapi/linux/usbdevice_fs.h
@@ -76,6 +76,26 @@ struct usbdevfs_connectinfo {
unsigned char slow;
};
+struct usbdevfs_conninfo_ex {
+ __u32 size; /* Size of the structure from the kernel's */
+ /* point of view. Can be used by userspace */
+ /* to determine how much data can be */
+ /* used/trusted. */
+ __u32 busnum; /* USB bus number, as enumerated by the */
+ /* kernel, the device is connected to. */
+ __u32 devnum; /* Device address on the bus. */
+ __u32 speed; /* USB_SPEED_* constants from ch9.h */
+ __u8 num_ports; /* Number of ports the device is connected */
+ /* to on the way to the root hub. It may */
+ /* be bigger than size of 'ports' array so */
+ /* userspace can detect overflows. */
+ __u8 ports[7]; /* List of ports on the way from the root */
+ /* hub to the device. Current limit in */
+ /* USB specification is 7 tiers (root hub, */
+ /* 5 intermediate hubs, device), which */
+ /* gives at most 6 port entries. */
+};
+
#define USBDEVFS_URB_SHORT_NOT_OK 0x01
#define USBDEVFS_URB_ISO_ASAP 0x02
#define USBDEVFS_URB_BULK_CONTINUATION 0x04
@@ -137,6 +157,7 @@ struct usbdevfs_hub_portinfo {
#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10
#define USBDEVFS_CAP_MMAP 0x20
#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40
+#define USBDEVFS_CAP_CONNINFO_EX 0x80
/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
@@ -197,5 +218,10 @@ struct usbdevfs_streams {
#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams)
#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32)
#define USBDEVFS_GET_SPEED _IO('U', 31)
+/*
+ * Returns struct usbdevfs_conninfo_ex; length is variable to allow
+ * extending size of the data returned.
+ */
+#define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
#endif /* _UAPI_LINUX_USBDEVICE_FS_H */
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
index 03748a742146..bae8879cdf58 100644
--- a/tools/lib/bpf/hashmap.h
+++ b/tools/lib/bpf/hashmap.h
@@ -10,6 +10,11 @@
#include <stdbool.h>
#include <stddef.h>
+#ifdef __GLIBC__
+#include <bits/wordsize.h>
+#else
+#include <bits/reg.h>
+#endif
#include "libbpf_internal.h"
static inline size_t hash_bits(size_t h, int bits)
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index 5f54feb19977..d030c87ed9f5 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -126,7 +126,7 @@ vendor,family,model,stepping. For example: GenuineIntel,6,69,1
HEADER_TOTAL_MEM = 10,
-An uint64_t with the total memory in bytes.
+An uint64_t with the total memory in kilobytes.
HEADER_CMDLINE = 11,
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index b4e6f9e6204a..c29976eca4a8 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -355,6 +355,8 @@
431 common fsconfig __x64_sys_fsconfig
432 common fsmount __x64_sys_fsmount
433 common fspick __x64_sys_fspick
+434 common pidfd_open __x64_sys_pidfd_open
+435 common clone3 __x64_sys_clone3/ptregs
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
index 930b80f422e8..aa597ae53747 100755
--- a/tools/perf/trace/beauty/usbdevfs_ioctl.sh
+++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
@@ -3,10 +3,13 @@
[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+# also as:
+# #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
+
printf "static const char *usbdevfs_ioctl_cmds[] = {\n"
-regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
-egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
- sed -r "s/$regex/\2 \1/g" | \
+regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
+egrep "$regex" ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
+ sed -r "s/$regex/\4 \1/g" | \
sort | xargs printf "\t[%s] = \"%s\",\n"
printf "};\n\n"
printf "#if 0\n"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 20111f8da5cb..1903d7ec9797 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3559,6 +3559,13 @@ int perf_session__read_header(struct perf_session *session)
data->file.path);
}
+ if (f_header.attr_size == 0) {
+ pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+ "Was the 'perf record' command properly terminated?\n",
+ data->file.path);
+ return -EINVAL;
+ }
+
nr_attrs = f_header.attrs.size / f_header.attr_size;
lseek(fd, f_header.attrs.offset, SEEK_SET);
@@ -3639,7 +3646,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
size += sizeof(struct perf_event_header);
size += ids * sizeof(u64);
- ev = malloc(size);
+ ev = zalloc(size);
if (ev == NULL)
return -ENOMEM;
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 4c223266299a..bdb69599c4bd 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, size_t len)
strtok(NULL, delim);
strtok(NULL, delim);
- if (strcmp(fs, "cgroup") == 0 &&
- strcmp(type, "cgroup2") == 0) {
+ if (strcmp(type, "cgroup2") == 0) {
strncpy(root, mount, len);
return 0;
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index 71231ad2dbfb..47315fe48d5a 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -262,7 +262,7 @@ test_mc_aware()
stop_traffic
- log_test "UC performace under MC overload"
+ log_test "UC performance under MC overload"
echo "UC-only throughput $(humanize $ucth1)"
echo "UC+MC throughput $(humanize $ucth2)"
@@ -316,7 +316,7 @@ test_uc_aware()
stop_traffic
- log_test "MC performace under UC overload"
+ log_test "MC performance under UC overload"
echo " ingress UC throughput $(humanize ${uc_ir})"
echo " egress UC throughput $(humanize ${uc_er})"
echo " sent $attempts BC ARPs, got $passes responses"
diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
index 0a76314b4414..8b944cf042f6 100755
--- a/tools/testing/selftests/kmod/kmod.sh
+++ b/tools/testing/selftests/kmod/kmod.sh
@@ -28,7 +28,7 @@
# override by exporting to your environment prior running this script.
# For instance this script assumes you do not have xfs loaded upon boot.
# If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this
-# script if the filesyste module you don't have loaded upon bootup
+# script if the filesystem module you don't have loaded upon bootup
# is ext4 instead. Refer to allow_user_defaults() for a list of user
# override variables possible.
#
@@ -263,7 +263,7 @@ config_get_test_result()
config_reset()
{
if ! echo -n "1" >"$DIR"/reset; then
- echo "$0: reset shuld have worked" >&2
+ echo "$0: reset should have worked" >&2
exit 1
fi
}
@@ -488,7 +488,7 @@ usage()
echo Example uses:
echo
echo "${TEST_NAME}.sh -- executes all tests"
- echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recomended"
+ echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recommended"
echo "${TEST_NAME}.sh -w 0008 -- Watch test ID 0008 run until an error occurs"
echo "${TEST_NAME}.sh -s 0008 -- Run test ID 0008 once"
echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times"
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index 30195449c63c..edcfeace4655 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -13,6 +13,14 @@ function log() {
echo "$1" > /dev/kmsg
}
+# skip(msg) - testing can't proceed
+# msg - explanation
+function skip() {
+ log "SKIP: $1"
+ echo "SKIP: $1" >&2
+ exit 4
+}
+
# die(msg) - game over, man
# msg - dying words
function die() {
@@ -43,6 +51,12 @@ function loop_until() {
done
}
+function assert_mod() {
+ local mod="$1"
+
+ modprobe --dry-run "$mod" &>/dev/null
+}
+
function is_livepatch_mod() {
local mod="$1"
@@ -75,6 +89,9 @@ function __load_mod() {
function load_mod() {
local mod="$1"; shift
+ assert_mod "$mod" ||
+ skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
+
is_livepatch_mod "$mod" &&
die "use load_lp() to load the livepatch module $mod"
@@ -88,6 +105,9 @@ function load_mod() {
function load_lp_nowait() {
local mod="$1"; shift
+ assert_mod "$mod" ||
+ skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
+
is_livepatch_mod "$mod" ||
die "module $mod is not a livepatch"
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index 7eaa8a3de262..b632965e60eb 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -339,13 +339,9 @@ static int test_pidfd_send_signal_syscall_support(void)
ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
if (ret < 0) {
- /*
- * pidfd_send_signal() will currently return ENOSYS when
- * CONFIG_PROC_FS is not set.
- */
if (errno == ENOSYS)
ksft_exit_skip(
- "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n",
+ "%s test: pidfd_send_signal() syscall not supported\n",
test_name);
ksft_exit_fail_msg("%s test: Failed to send signal\n",
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index 4602326b8f5b..a4f4d4cf22c3 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -451,7 +451,7 @@ static int test_vsys_x(void)
printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n",
segv_err);
} else {
- printf("[FAILT]\tExecution failed with the wrong error: #PF(0x%lx)\n",
+ printf("[FAIL]\tExecution failed with the wrong error: #PF(0x%lx)\n",
segv_err);
return 1;
}