aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html118
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html22
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg123
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg16
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg56
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg237
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg12
-rw-r--r--Documentation/RCU/stallwarn.txt24
-rw-r--r--Documentation/RCU/whatisRCU.txt18
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/core-api/atomic_ops.rst2
-rw-r--r--Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt84
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/phy-ath79-usb.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt34
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt8
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/index.rst33
-rw-r--r--Documentation/filesystems/porting20
-rw-r--r--Documentation/filesystems/vfs.txt18
-rw-r--r--Documentation/hwmon/max3444016
-rw-r--r--Documentation/hwmon/mlxreg-fan60
-rw-r--r--Documentation/hwmon/npcm750-pwm-fan22
-rw-r--r--Documentation/hwmon/sysfs-interface48
-rw-r--r--Documentation/kprobes.txt35
-rw-r--r--Documentation/memory-barriers.txt43
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt22
-rw-r--r--Documentation/virtual/kvm/api.txt16
-rw-r--r--Documentation/x86/intel_rdt_ui.txt380
-rw-r--r--Documentation/x86/x86_64/boot-options.txt8
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/atomic.h64
-rw-r--r--arch/arc/include/asm/atomic.h86
-rw-r--r--arch/arc/include/asm/kprobes.h2
-rw-r--r--arch/arc/kernel/kprobes.c50
-rw-r--r--arch/arm/Kconfig20
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/atomic.h55
-rw-r--r--arch/arm/include/asm/bitops.h92
-rw-r--r--arch/arm/include/asm/efi.h3
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h7
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kprobes.h2
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/include/asm/mach/time.h3
-rw-r--r--arch/arm/include/asm/probes.h1
-rw-r--r--arch/arm/include/asm/thread_info.h4
-rw-r--r--arch/arm/include/asm/tlb.h8
-rw-r--r--arch/arm/include/asm/uaccess.h26
-rw-r--r--arch/arm/kernel/entry-armv.S10
-rw-r--r--arch/arm/kernel/head-nommu.S12
-rw-r--r--arch/arm/kernel/hw_breakpoint.c78
-rw-r--r--arch/arm/kernel/irq.c10
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/signal.c58
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c8
-rw-r--r--arch/arm/kernel/time.c15
-rw-r--r--arch/arm/lib/copy_from_user.S9
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/nommu.c3
-rw-r--r--arch/arm/mm/tcm.h2
-rw-r--r--arch/arm/plat-omap/counter_32k.c2
-rw-r--r--arch/arm/probes/kprobes/core.c139
-rw-r--r--arch/arm/probes/kprobes/test-core.c1
-rw-r--r--arch/arm/vfp/Makefile5
-rw-r--r--arch/arm/vfp/vfpmodule.c17
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S150
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S76
-rw-r--r--arch/arm64/include/asm/atomic.h47
-rw-r--r--arch/arm64/include/asm/bitops.h21
-rw-r--r--arch/arm64/include/asm/efi.h3
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h7
-rw-r--r--arch/arm64/include/asm/irq.h2
-rw-r--r--arch/arm64/include/asm/kprobes.h1
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c86
-rw-r--r--arch/arm64/kernel/irq.c10
-rw-r--r--arch/arm64/kernel/probes/kprobes.c88
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/bitops.S76
-rw-r--r--arch/arm64/mm/mmu.c4
-rw-r--r--arch/h8300/include/asm/atomic.h19
-rw-r--r--arch/hexagon/include/asm/atomic.h18
-rw-r--r--arch/ia64/include/asm/atomic.h81
-rw-r--r--arch/ia64/include/asm/kprobes.h2
-rw-r--r--arch/ia64/include/uapi/asm/break.h1
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/jprobes.S90
-rw-r--r--arch/ia64/kernel/kprobes.c93
-rw-r--r--arch/m68k/Kconfig5
-rw-r--r--arch/m68k/apollo/config.c8
-rw-r--r--arch/m68k/atari/config.c5
-rw-r--r--arch/m68k/atari/time.c63
-rw-r--r--arch/m68k/bvme6000/config.c45
-rw-r--r--arch/m68k/configs/amiga_defconfig32
-rw-r--r--arch/m68k/configs/apollo_defconfig30
-rw-r--r--arch/m68k/configs/atari_defconfig29
-rw-r--r--arch/m68k/configs/bvme6000_defconfig30
-rw-r--r--arch/m68k/configs/hp300_defconfig30
-rw-r--r--arch/m68k/configs/mac_defconfig30
-rw-r--r--arch/m68k/configs/multi_defconfig32
-rw-r--r--arch/m68k/configs/mvme147_defconfig30
-rw-r--r--arch/m68k/configs/mvme16x_defconfig30
-rw-r--r--arch/m68k/configs/q40_defconfig30
-rw-r--r--arch/m68k/configs/sun3_defconfig28
-rw-r--r--arch/m68k/configs/sun3x_defconfig30
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/atomic.h24
-rw-r--r--arch/m68k/include/asm/bitops.h14
-rw-r--r--arch/m68k/include/asm/dma-mapping.h12
-rw-r--r--arch/m68k/include/asm/io.h7
-rw-r--r--arch/m68k/include/asm/io_mm.h42
-rw-r--r--arch/m68k/include/asm/io_no.h12
-rw-r--r--arch/m68k/include/asm/kmap.h9
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/macintosh.h1
-rw-r--r--arch/m68k/include/asm/page_no.h2
-rw-r--r--arch/m68k/kernel/dma.c68
-rw-r--r--arch/m68k/kernel/setup_mm.c15
-rw-r--r--arch/m68k/kernel/setup_no.c21
-rw-r--r--arch/m68k/mac/config.c21
-rw-r--r--arch/m68k/mac/misc.c80
-rw-r--r--arch/m68k/mm/init.c1
-rw-r--r--arch/m68k/mm/mcfmmu.c13
-rw-r--r--arch/m68k/mm/motorola.c35
-rw-r--r--arch/m68k/mvme147/config.c7
-rw-r--r--arch/m68k/mvme16x/config.c8
-rw-r--r--arch/m68k/q40/config.c30
-rw-r--r--arch/m68k/sun3/config.c4
-rw-r--r--arch/mips/Kconfig56
-rw-r--r--arch/mips/Makefile22
-rw-r--r--arch/mips/alchemy/board-gpr.c3
-rw-r--r--arch/mips/alchemy/board-mtx1.c3
-rw-r--r--arch/mips/alchemy/board-xxs1500.c3
-rw-r--r--arch/mips/alchemy/devboards/platform.c3
-rw-r--r--arch/mips/ar7/clock.c29
-rw-r--r--arch/mips/ar7/prom.c4
-rw-r--r--arch/mips/ath25/Kconfig1
-rw-r--r--arch/mips/ath25/board.c6
-rw-r--r--arch/mips/ath25/early_printk.c5
-rw-r--r--arch/mips/ath79/clock.c193
-rw-r--r--arch/mips/ath79/common.c8
-rw-r--r--arch/mips/ath79/early_printk.c64
-rw-r--r--arch/mips/ath79/setup.c35
-rw-r--r--arch/mips/bcm63xx/early_printk.c1
-rw-r--r--arch/mips/bmips/dma.c32
-rw-r--r--arch/mips/bmips/setup.c7
-rw-r--r--arch/mips/boot/Makefile52
-rw-r--r--arch/mips/boot/compressed/uart-prom.c3
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi19
-rw-r--r--arch/mips/boot/dts/mscc/Makefile2
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi32
-rw-r--r--arch/mips/boot/dts/mscc/ocelot_pcb123.dts10
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts3
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dpt_module.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_omega.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts5
-rw-r--r--arch/mips/boot/ecoff.h61
-rw-r--r--arch/mips/boot/elf2ecoff.c31
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c191
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c5
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c7
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-spi.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c7
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c4
-rw-r--r--arch/mips/cavium-octeon/setup.c8
-rw-r--r--arch/mips/configs/ci20_defconfig2
-rw-r--r--arch/mips/configs/generic_defconfig3
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig1
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig1
-rw-r--r--arch/mips/configs/maltaaprp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig1
-rw-r--r--arch/mips/configs/maltaup_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/fw/arc/arc_con.c1
-rw-r--r--arch/mips/fw/arc/promlib.c1
-rw-r--r--arch/mips/fw/sni/sniprom.c1
-rw-r--r--arch/mips/generic/Kconfig12
-rw-r--r--arch/mips/generic/Platform1
-rw-r--r--arch/mips/generic/board-ocelot_pcb123.its.S23
-rw-r--r--arch/mips/generic/init.c14
-rw-r--r--arch/mips/generic/yamon-dt.c4
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/atomic.h367
-rw-r--r--arch/mips/include/asm/bmips.h16
-rw-r--r--arch/mips/include/asm/cpu-features.h176
-rw-r--r--arch/mips/include/asm/cpu.h51
-rw-r--r--arch/mips/include/asm/dma-coherence.h6
-rw-r--r--arch/mips/include/asm/dma-direct.h17
-rw-r--r--arch/mips/include/asm/dma-mapping.h20
-rw-r--r--arch/mips/include/asm/io.h40
-rw-r--r--arch/mips/include/asm/kprobes.h13
-rw-r--r--arch/mips/include/asm/mach-ar7/spaces.h3
-rw-r--r--arch/mips/include/asm/mach-ath25/dma-coherence.h76
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h771
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h34
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h6
-rw-r--r--arch/mips/include/asm/mach-bmips/dma-coherence.h54
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h79
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h73
-rw-r--r--arch/mips/include/asm/mach-generic/kmalloc.h3
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h10
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h70
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h92
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h60
-rw-r--r--arch/mips/include/asm/mach-loongson64/dma-coherence.h93
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h24
-rw-r--r--arch/mips/include/asm/mach-pic32/spaces.h1
-rw-r--r--arch/mips/include/asm/mipsregs.h29
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/asm/netlogic/xlr/fmn.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu-defs.h10062
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gmxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-stxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/octeon.h9
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h3
-rw-r--r--arch/mips/include/asm/page.h11
-rw-r--r--arch/mips/include/asm/processor.h15
-rw-r--r--arch/mips/include/asm/setup.h2
-rw-r--r--arch/mips/include/asm/sgialib.h1
-rw-r--r--arch/mips/include/asm/sim.h12
-rw-r--r--arch/mips/include/asm/smp.h12
-rw-r--r--arch/mips/include/asm/txx9/generic.h1
-rw-r--r--arch/mips/include/asm/txx9/tx4939.h29
-rw-r--r--arch/mips/jazz/jazzdma.c141
-rw-r--r--arch/mips/jazz/setup.c17
-rw-r--r--arch/mips/jz4740/Platform2
-rw-r--r--arch/mips/kernel/cpu-probe.c3
-rw-r--r--arch/mips/kernel/early_printk.c2
-rw-r--r--arch/mips/kernel/early_printk_8250.c1
-rw-r--r--arch/mips/kernel/genex.S46
-rw-r--r--arch/mips/kernel/idle.c12
-rw-r--r--arch/mips/kernel/kprobes.c70
-rw-r--r--arch/mips/kernel/linux32.c29
-rw-r--r--arch/mips/kernel/process.c80
-rw-r--r--arch/mips/kernel/ptrace.c254
-rw-r--r--arch/mips/kernel/ptrace32.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S4
-rw-r--r--arch/mips/kernel/setup.c38
-rw-r--r--arch/mips/kernel/signal.c24
-rw-r--r--arch/mips/kernel/signal_n32.c12
-rw-r--r--arch/mips/kernel/signal_o32.c24
-rw-r--r--arch/mips/kernel/traps.c7
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/mips/lantiq/early_printk.c1
-rw-r--r--arch/mips/lantiq/prom.c8
-rw-r--r--arch/mips/lantiq/xway/dma.c3
-rw-r--r--arch/mips/lasat/prom.c1
-rw-r--r--arch/mips/lib/memset.S21
-rw-r--r--arch/mips/loongson32/Platform8
-rw-r--r--arch/mips/loongson64/Kconfig5
-rw-r--r--arch/mips/loongson64/common/Makefile6
-rw-r--r--arch/mips/loongson64/common/cs5536/cs5536_ohci.c2
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c109
-rw-r--r--arch/mips/loongson64/common/dma.c18
-rw-r--r--arch/mips/loongson64/common/early_printk.c1
-rw-r--r--arch/mips/loongson64/common/env.c3
-rw-r--r--arch/mips/loongson64/loongson-3/Makefile2
-rw-r--r--arch/mips/loongson64/loongson-3/dma.c25
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c3
-rw-r--r--arch/mips/mm/Makefile3
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/cache.c4
-rw-r--r--arch/mips/mm/dma-default.c404
-rw-r--r--arch/mips/mm/dma-noncoherent.c208
-rw-r--r--arch/mips/mm/page.c15
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/mm/uasm-micromips.c1
-rw-r--r--arch/mips/mm/uasm-mips.c1
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-pm.c96
-rw-r--r--arch/mips/mti-malta/malta-reset.c30
-rw-r--r--arch/mips/mti-malta/malta-setup.c39
-rw-r--r--arch/mips/netlogic/common/earlycons.c1
-rw-r--r--arch/mips/netlogic/xlp/dt.c14
-rw-r--r--arch/mips/paravirt/serial.c5
-rw-r--r--arch/mips/pci/ops-bridge.c4
-rw-r--r--arch/mips/pci/pci-ar2315.c24
-rw-r--r--arch/mips/pci/pci-ar724x.c42
-rw-r--r--arch/mips/pci/pci-ip27.c25
-rw-r--r--arch/mips/pci/pci-octeon.c4
-rw-r--r--arch/mips/pci/pcie-octeon.c6
-rw-r--r--arch/mips/pic32/pic32mzda/early_console.c5
-rw-r--r--arch/mips/ralink/early_printk.c7
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c1
-rw-r--r--arch/mips/sgi-ip32/Makefile2
-rw-r--r--arch/mips/sgi-ip32/ip32-dma.c37
-rw-r--r--arch/mips/sibyte/Kconfig1
-rw-r--r--arch/mips/sibyte/common/cfe.c1
-rw-r--r--arch/mips/txx9/generic/setup.c1
-rw-r--r--arch/mips/vdso/Makefile9
-rw-r--r--arch/mips/vdso/genvdso.h51
-rw-r--r--arch/mips/vr41xx/common/pmu.c3
-rw-r--r--arch/openrisc/Kconfig5
-rw-r--r--arch/openrisc/include/asm/atomic.h4
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h3
-rw-r--r--arch/openrisc/include/asm/irq.h2
-rw-r--r--arch/openrisc/kernel/irq.c7
-rw-r--r--arch/parisc/Kconfig8
-rw-r--r--arch/parisc/include/asm/assembly.h2
-rw-r--r--arch/parisc/include/asm/atomic.h107
-rw-r--r--arch/parisc/include/asm/barrier.h32
-rw-r--r--arch/parisc/include/asm/dma-mapping.h5
-rw-r--r--arch/parisc/include/asm/linkage.h17
-rw-r--r--arch/parisc/include/asm/ptrace.h11
-rw-r--r--arch/parisc/include/asm/spinlock.h8
-rw-r--r--arch/parisc/include/asm/unwind.h3
-rw-r--r--arch/parisc/include/uapi/asm/errno.h1
-rw-r--r--arch/parisc/kernel/entry.S55
-rw-r--r--arch/parisc/kernel/pacache.S126
-rw-r--r--arch/parisc/kernel/pci-dma.c199
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/ptrace.c100
-rw-r--r--arch/parisc/kernel/real2.S6
-rw-r--r--arch/parisc/kernel/setup.c8
-rw-r--r--arch/parisc/kernel/syscall.S28
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/parisc/kernel/unwind.c93
-rw-r--r--arch/parisc/lib/lusercopy.S21
-rw-r--r--arch/parisc/mm/init.c11
-rw-r--r--arch/powerpc/include/asm/atomic.h69
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h7
-rw-r--r--arch/powerpc/include/asm/kprobes.h12
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c47
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c72
-rw-r--r--arch/powerpc/kernel/kprobes.c92
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S39
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/riscv/include/asm/atomic.h166
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/Makefile42
-rw-r--r--arch/s390/appldata/appldata_base.c122
-rw-r--r--arch/s390/boot/Makefile45
-rw-r--r--arch/s390/boot/als.c (renamed from arch/s390/kernel/als.c)20
-rw-r--r--arch/s390/boot/compressed/.gitignore1
-rw-r--r--arch/s390/boot/compressed/Makefile55
-rw-r--r--arch/s390/boot/compressed/head.S8
-rw-r--r--arch/s390/boot/compressed/misc.c37
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S15
-rw-r--r--arch/s390/boot/compressed/vmlinux.scr.lds.S (renamed from arch/s390/boot/compressed/vmlinux.scr)4
-rw-r--r--arch/s390/boot/ebcdic.c2
-rw-r--r--arch/s390/boot/head.S (renamed from arch/s390/kernel/head.S)11
-rw-r--r--arch/s390/boot/head_kdump.S (renamed from arch/s390/kernel/head_kdump.S)0
-rw-r--r--arch/s390/boot/mem.S2
-rw-r--r--arch/s390/boot/sclp_early_core.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c4
-rw-r--r--arch/s390/hypfs/inode.c6
-rw-r--r--arch/s390/include/asm/ap.h284
-rw-r--r--arch/s390/include/asm/atomic.h65
-rw-r--r--arch/s390/include/asm/cpu_mf.h12
-rw-r--r--arch/s390/include/asm/gmap.h10
-rw-r--r--arch/s390/include/asm/hugetlb.h5
-rw-r--r--arch/s390/include/asm/kprobes.h2
-rw-r--r--arch/s390/include/asm/lowcore.h2
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/nospec-insn.h4
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pgtable.h13
-rw-r--r--arch/s390/include/asm/purgatory.h6
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/include/asm/sections.h2
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/uapi/asm/chsc.h10
-rw-r--r--arch/s390/kernel/Makefile24
-rw-r--r--arch/s390/kernel/crash_dump.c104
-rw-r--r--arch/s390/kernel/early.c12
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/head64.S43
-rw-r--r--arch/s390/kernel/kprobes.c86
-rw-r--r--arch/s390/kernel/nospec-branch.c12
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c14
-rw-r--r--arch/s390/kernel/perf_regs.c6
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/kernel/sysinfo.c4
-rw-r--r--arch/s390/kernel/time.c15
-rw-r--r--arch/s390/kernel/topology.c44
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S13
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c82
-rw-r--r--arch/s390/kvm/priv.c105
-rw-r--r--arch/s390/lib/mem.S16
-rw-r--r--arch/s390/mm/cmm.c74
-rw-r--r--arch/s390/mm/extmem.c4
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/gmap.c454
-rw-r--r--arch/s390/mm/hugetlbpage.c24
-rw-r--r--arch/s390/mm/page-states.c2
-rw-r--r--arch/s390/mm/pageattr.c6
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/mm/pgtable.c159
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/s390/numa/numa.c16
-rw-r--r--arch/s390/pci/pci_debug.c8
-rw-r--r--arch/s390/purgatory/Makefile9
-rw-r--r--arch/s390/purgatory/head.S45
-rw-r--r--arch/s390/purgatory/purgatory.c9
-rw-r--r--arch/s390/scripts/Makefile.chkbss11
-rw-r--r--arch/s390/tools/gen_opcode_table.c4
-rw-r--r--arch/sh/include/asm/atomic.h35
-rw-r--r--arch/sh/include/asm/cmpxchg-xchg.h3
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h8
-rw-r--r--arch/sh/include/asm/kprobes.h4
-rw-r--r--arch/sh/kernel/hw_breakpoint.c53
-rw-r--r--arch/sh/kernel/kprobes.c72
-rw-r--r--arch/sparc/include/asm/atomic_32.h24
-rw-r--r--arch/sparc/include/asm/atomic_64.h65
-rw-r--r--arch/sparc/include/asm/kprobes.h1
-rw-r--r--arch/sparc/kernel/kprobes.c65
-rw-r--r--arch/sparc/lib/atomic32.c4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/boot/bitops.h3
-rw-r--r--arch/x86/boot/compressed/eboot.c545
-rw-r--r--arch/x86/boot/compressed/eboot.h12
-rw-r--r--arch/x86/boot/compressed/kaslr.c98
-rw-r--r--arch/x86/boot/string.c5
-rw-r--r--arch/x86/crypto/aegis128-aesni-asm.S2
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c12
-rw-r--r--arch/x86/crypto/aegis128l-aesni-asm.S2
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c12
-rw-r--r--arch/x86/crypto/aegis256-aesni-asm.S2
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c12
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S8
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S4
-rw-r--r--arch/x86/crypto/morus1280-avx2-asm.S2
-rw-r--r--arch/x86/crypto/morus1280-avx2-glue.c10
-rw-r--r--arch/x86/crypto/morus1280-sse2-asm.S2
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c10
-rw-r--r--arch/x86/crypto/morus640-sse2-asm.S2
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c10
-rw-r--r--arch/x86/crypto/sha1_ssse3_asm.S2
-rw-r--r--arch/x86/entry/entry_32.S632
-rw-r--r--arch/x86/entry/entry_64.S5
-rw-r--r--arch/x86/entry/vdso/Makefile26
-rw-r--r--arch/x86/events/intel/core.c31
-rw-r--r--arch/x86/events/intel/ds.c49
-rw-r--r--arch/x86/events/intel/lbr.c56
-rw-r--r--arch/x86/events/perf_event.h6
-rw-r--r--arch/x86/hyperv/hv_apic.c59
-rw-r--r--arch/x86/hyperv/mmu.c80
-rw-r--r--arch/x86/include/asm/atomic.h32
-rw-r--r--arch/x86/include/asm/atomic64_32.h61
-rw-r--r--arch/x86/include/asm/atomic64_64.h50
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h4
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h7
-rw-r--r--arch/x86/include/asm/intel-family.h13
-rw-r--r--arch/x86/include/asm/intel-mid.h43
-rw-r--r--arch/x86/include/asm/intel_ds.h3
-rw-r--r--arch/x86/include/asm/irqflags.h2
-rw-r--r--arch/x86/include/asm/kprobes.h5
-rw-r--r--arch/x86/include/asm/kvm_guest.h7
-rw-r--r--arch/x86/include/asm/kvm_para.h1
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/mshyperv.h34
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/orc_types.h2
-rw-r--r--arch/x86/include/asm/percpu.h7
-rw-r--r--arch/x86/include/asm/pgtable-2level.h9
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h3
-rw-r--r--arch/x86/include/asm/pgtable-3level.h7
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h6
-rw-r--r--arch/x86/include/asm/pgtable.h94
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h9
-rw-r--r--arch/x86/include/asm/pgtable_64.h89
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h3
-rw-r--r--arch/x86/include/asm/pgtable_types.h28
-rw-r--r--arch/x86/include/asm/processor-flags.h8
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/pti.h3
-rw-r--r--arch/x86/include/asm/refcount.h1
-rw-r--r--arch/x86/include/asm/sections.h1
-rw-r--r--arch/x86/include/asm/set_memory.h1
-rw-r--r--arch/x86/include/asm/switch_to.h16
-rw-r--r--arch/x86/include/asm/text-patching.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h21
-rw-r--r--arch/x86/include/asm/trace/hyperv.h15
-rw-r--r--arch/x86/include/asm/tsc.h4
-rw-r--r--arch/x86/include/asm/unwind_hints.h16
-rw-r--r--arch/x86/kernel/alternative.c7
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/vector.c19
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/asm-offsets.c5
-rw-r--r--arch/x86/kernel/asm-offsets_32.c10
-rw-r--r--arch/x86/kernel/asm-offsets_64.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/amd.c13
-rw-r--r--arch/x86/kernel/cpu/bugs.c58
-rw-r--r--arch/x86/kernel/cpu/common.c48
-rw-r--r--arch/x86/kernel/cpu/intel.c10
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c11
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h143
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c129
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c1522
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h43
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c808
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c202
-rw-r--r--arch/x86/kernel/dumpstack.c28
-rw-r--r--arch/x86/kernel/head_32.S20
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c131
-rw-r--r--arch/x86/kernel/jump_label.c11
-rw-r--r--arch/x86/kernel/kprobes/common.h10
-rw-r--r--arch/x86/kernel/kprobes/core.c124
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c49
-rw-r--r--arch/x86/kernel/kprobes/opt.c1
-rw-r--r--arch/x86/kernel/kvm.c18
-rw-r--r--arch/x86/kernel/kvmclock.c258
-rw-r--r--arch/x86/kernel/ldt.c137
-rw-r--r--arch/x86/kernel/machine_kexec_32.c5
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c2
-rw-r--r--arch/x86/kernel/pci-iommu_table.c2
-rw-r--r--arch/x86/kernel/pcspeaker.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/stacktrace.c42
-rw-r--r--arch/x86/kernel/tsc.c259
-rw-r--r--arch/x86/kernel/tsc_msr.c96
-rw-r--r--arch/x86/kernel/unwind_orc.c52
-rw-r--r--arch/x86/kernel/vm86_32.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S17
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/mm/dump_pagetables.c27
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init.c37
-rw-r--r--arch/x86/mm/init_64.c14
-rw-r--r--arch/x86/mm/numa_emulation.c107
-rw-r--r--arch/x86/mm/pageattr.c19
-rw-r--r--arch/x86/mm/pgtable.c169
-rw-r--r--arch/x86/mm/pti.c261
-rw-r--r--arch/x86/mm/tlb.c224
-rw-r--r--arch/x86/platform/efi/efi_64.c101
-rw-r--r--arch/x86/platform/efi/quirks.c14
-rw-r--r--arch/x86/platform/intel-mid/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c23
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_weak_decls.h18
-rw-r--r--arch/x86/platform/intel-mid/mfld.c70
-rw-r--r--arch/x86/platform/intel-mid/mrfld.c105
-rw-r--r--arch/x86/platform/olpc/olpc.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/power/hibernate_asm_64.S2
-rw-r--r--arch/x86/tools/relocs.c1
-rw-r--r--arch/x86/um/vdso/.gitignore1
-rw-r--r--arch/x86/um/vdso/Makefile16
-rw-r--r--arch/x86/xen/enlighten_pv.c51
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--arch/x86/xen/suspend_pv.c5
-rw-r--r--arch/x86/xen/time.c18
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/include/asm/atomic.h98
-rw-r--r--arch/xtensa/include/asm/hw_breakpoint.h7
-rw-r--r--arch/xtensa/kernel/hw_breakpoint.c40
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/zram/zram_drv.c15
-rw-r--r--drivers/clk/clkdev.c5
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/mtk_timer.c268
-rw-r--r--drivers/clocksource/tegra20_timer.c4
-rw-r--r--drivers/clocksource/timer-atcpit100.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/clocksource/timer-mediatek.c328
-rw-r--r--drivers/clocksource/timer-sprd.c50
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/clocksource/zevio-timer.c2
-rw-r--r--drivers/firmware/efi/Kconfig12
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/efi.c23
-rw-r--r--drivers/firmware/efi/esrt.c8
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c31
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c202
-rw-r--r--drivers/gpio/gpiolib-acpi.c56
-rw-r--r--drivers/gpu/drm/drm_lease.c2
-rw-r--r--drivers/hwmon/Kconfig22
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adt7475.c340
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/iio_hwmon.c67
-rw-r--r--drivers/hwmon/k10temp.c8
-rw-r--r--drivers/hwmon/mlxreg-fan.c489
-rw-r--r--drivers/hwmon/nct6775.c6
-rw-r--r--drivers/hwmon/nct7904.c68
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c1057
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/max34440.c93
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c41
-rw-r--r--drivers/infiniband/core/rdma_core.c2
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/irqchip/Kconfig16
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c16
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c243
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic.c1
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/misc/cxl/api.c22
-rw-r--r--drivers/net/ethernet/8390/mac8390.c20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c2
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nubus/bus.c3
-rw-r--r--drivers/platform/mips/cpu_hwmon.c3
-rw-r--r--drivers/s390/block/dasd.c17
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c12
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/keyboard.c28
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/sclp_async.c38
-rw-r--r--drivers/s390/char/tape_3590.c8
-rw-r--r--drivers/s390/char/tape_class.c6
-rw-r--r--drivers/s390/cio/chp.c21
-rw-r--r--drivers/s390/cio/chsc.c18
-rw-r--r--drivers/s390/cio/chsc.h18
-rw-r--r--drivers/s390/cio/cio.c77
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/css.c82
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/qdio_main.c5
-rw-r--r--drivers/s390/cio/trace.h102
-rw-r--r--drivers/s390/crypto/ap_asm.h236
-rw-r--r--drivers/s390/crypto/ap_bus.c25
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/ap_card.c1
-rw-r--r--drivers/s390/crypto/ap_queue.c20
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/zcrypt_card.c12
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h20
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c20
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c12
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c24
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c16
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c53
-rw-r--r--drivers/scsi/sr.c29
-rw-r--r--drivers/scsi/vmw_pvscsi.c11
-rw-r--r--drivers/vhost/vhost.c9
-rw-r--r--drivers/video/fbdev/efifb.c51
-rw-r--r--fs/9p/vfs_inode.c7
-rw-r--r--fs/9p/vfs_inode_dotl.c7
-rw-r--r--fs/adfs/inode.c2
-rw-r--r--fs/adfs/super.c1
-rw-r--r--fs/afs/dir.c45
-rw-r--r--fs/afs/dynroot.c25
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/aio.c232
-rw-r--r--fs/anon_inodes.c30
-rw-r--r--fs/attr.c5
-rw-r--r--fs/bad_inode.c2
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/btrfs/inode.c106
-rw-r--r--fs/ceph/file.c7
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/dir.c7
-rw-r--r--fs/dcache.c81
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/ext2/ialloc.c3
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/file_table.c85
-rw-r--r--fs/fuse/dir.c25
-rw-r--r--fs/gfs2/inode.c32
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c28
-rw-r--r--fs/hpfs/dir.c23
-rw-r--r--fs/hugetlbfs/inode.c54
-rw-r--r--fs/inode.c53
-rw-r--r--fs/internal.h5
-rw-r--r--fs/jfs/jfs_imap.c8
-rw-r--r--fs/jfs/jfs_inode.c10
-rw-r--r--fs/jfs/namei.c12
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/locks.c20
-rw-r--r--fs/namei.c261
-rw-r--r--fs/namespace.c28
-rw-r--r--fs/nfs/dir.c23
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/open.c88
-rw-r--r--fs/pipe.c43
-rw-r--r--fs/timerfd.c14
-rw-r--r--fs/udf/namei.c12
-rw-r--r--fs/ufs/ialloc.c3
-rw-r--r--fs/ufs/namei.c9
-rw-r--r--fs/xfs/xfs_iops.c2
-rw-r--r--include/asm-generic/atomic-instrumented.h197
-rw-r--r--include/asm-generic/atomic.h33
-rw-r--r--include/asm-generic/atomic64.h15
-rw-r--r--include/asm-generic/bitops/atomic.h188
-rw-r--r--include/asm-generic/bitops/lock.h68
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/asm-generic/tlb.h10
-rw-r--r--include/linux/atomic.h453
-rw-r--r--include/linux/bitops.h22
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h9
-rw-r--r--include/linux/compat_time.h9
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/dma-noncoherent.h8
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/fs.h41
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/irqchip/arm-gic-v3.h3
-rw-r--r--include/linux/kprobes.h53
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/lsm_hooks.h2
-rw-r--r--include/linux/mm_types.h241
-rw-r--r--include/linux/nmi.h10
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/posix-timers.h4
-rw-r--r--include/linux/pti.h1
-rw-r--r--include/linux/rculist.h19
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/refcount.h34
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/sched_clock.h5
-rw-r--r--include/linux/security.h5
-rw-r--r--include/linux/smpboot.h15
-rw-r--r--include/linux/spinlock.h53
-rw-r--r--include/linux/srcu.h17
-rw-r--r--include/linux/swait.h36
-rw-r--r--include/linux/syscalls.h10
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/time64.h1
-rw-r--r--include/linux/timekeeping.h5
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/net/af_vsock.h4
-rw-r--r--include/net/llc.h5
-rw-r--r--include/trace/events/filelock.h5
-rw-r--r--include/trace/events/rcu.h112
-rw-r--r--include/uapi/linux/aio_abi.h6
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/time.h7
-rw-r--r--init/Kconfig15
-rw-r--r--init/main.c13
-rw-r--r--ipc/shm.c45
-rw-r--r--kernel/bpf/cpumap.c15
-rw-r--r--kernel/bpf/devmap.c14
-rw-r--r--kernel/bpf/sockmap.c9
-rw-r--r--kernel/bpf/syscall.c4
-rw-r--r--kernel/compat.c29
-rw-r--r--kernel/cpu.c9
-rw-r--r--kernel/dma/noncoherent.c8
-rw-r--r--kernel/events/core.c20
-rw-r--r--kernel/events/hw_breakpoint.c92
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/fail_function.c3
-rw-r--r--kernel/fork.c15
-rw-r--r--kernel/irq/Kconfig1
-rw-r--r--kernel/irq/irqdesc.c13
-rw-r--r--kernel/irq/manage.c47
-rw-r--r--kernel/irq/proc.c22
-rw-r--r--kernel/kprobes.c167
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/locking/locktorture.c5
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/rcu.h104
-rw-r--r--kernel/rcu/rcuperf.c57
-rw-r--r--kernel/rcu/rcutorture.c462
-rw-r--r--kernel/rcu/srcutiny.c4
-rw-r--r--kernel/rcu/srcutree.c39
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tree.c1027
-rw-r--r--kernel/rcu/tree.h71
-rw-r--r--kernel/rcu/tree_exp.h18
-rw-r--r--kernel/rcu/tree_plugin.h188
-rw-r--r--kernel/rcu/update.c45
-rw-r--r--kernel/sched/Makefile2
-rw-r--r--kernel/sched/clock.c57
-rw-r--r--kernel/sched/completion.c8
-rw-r--r--kernel/sched/core.c144
-rw-r--r--kernel/sched/cpufreq_schedutil.c103
-rw-r--r--kernel/sched/deadline.c8
-rw-r--r--kernel/sched/debug.c37
-rw-r--r--kernel/sched/fair.c663
-rw-r--r--kernel/sched/pelt.c399
-rw-r--r--kernel/sched/pelt.h72
-rw-r--r--kernel/sched/rt.c15
-rw-r--r--kernel/sched/sched.h87
-rw-r--r--kernel/sched/swait.c32
-rw-r--r--kernel/sched/wait.c55
-rw-r--r--kernel/smpboot.c54
-rw-r--r--kernel/stop_machine.c43
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/test_kprobes.c94
-rw-r--r--kernel/time/alarmtimer.c7
-rw-r--r--kernel/time/clockevents.c6
-rw-r--r--kernel/time/clocksource.c149
-rw-r--r--kernel/time/hrtimer.c7
-rw-r--r--kernel/time/ntp.c23
-rw-r--r--kernel/time/ntp_internal.h4
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/time/posix-stubs.c2
-rw-r--r--kernel/time/posix-timers.c68
-rw-r--r--kernel/time/posix-timers.h2
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c2
-rw-r--r--kernel/time/time.c31
-rw-r--r--kernel/time/timekeeping.c183
-rw-r--r--kernel/time/timekeeping_debug.c2
-rw-r--r--kernel/time/timekeeping_internal.h2
-rw-r--r--kernel/time/timer.c31
-rw-r--r--kernel/torture.c15
-rw-r--r--kernel/trace/trace_kprobe.c11
-rw-r--r--kernel/watchdog.c147
-rw-r--r--kernel/watchdog_hld.c4
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.ubsan11
-rw-r--r--lib/atomic64.c14
-rw-r--r--lib/debugobjects.c10
-rw-r--r--lib/ioremap.c4
-rw-r--r--lib/raid6/s390vx.uc34
-rw-r--r--lib/refcount.c55
-rw-r--r--mm/init-mm.c11
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/memory.c25
-rw-r--r--mm/page_alloc.c16
-rw-r--r--mm/shmem.c49
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/dccp/ccids/ccid2.c6
-rw-r--r--net/dsa/slave.c4
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/rxrpc/ar-internal.h8
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_event.c4
-rw-r--r--net/rxrpc/conn_object.c4
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/rxrpc/net_ns.c6
-rw-r--r--net/rxrpc/output.c12
-rw-r--r--net/rxrpc/peer_event.c156
-rw-r--r--net/rxrpc/peer_object.c10
-rw-r--r--net/rxrpc/rxkad.c4
-rw-r--r--net/smc/af_smc.c15
-rw-r--r--net/socket.c29
-rw-r--r--net/tipc/net.c4
-rw-r--r--net/vmw_vsock/af_vsock.c15
-rw-r--r--net/vmw_vsock/vmci_transport.c3
-rw-r--r--samples/bpf/xdp_redirect_cpu_kern.c2
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c4
-rw-r--r--scripts/Makefile.ubsan4
-rw-r--r--security/Kconfig2
-rw-r--r--security/apparmor/lsm.c4
-rw-r--r--security/integrity/ima/ima.h4
-rw-r--r--security/integrity/ima/ima_appraise.c4
-rw-r--r--security/integrity/ima/ima_main.c16
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/smack/smack_lsm.c6
-rw-r--r--security/tomoyo/tomoyo.c2
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h20
-rw-r--r--tools/arch/parisc/include/uapi/asm/errno.h1
-rw-r--r--tools/include/uapi/asm-generic/unistd.h783
-rw-r--r--tools/include/uapi/linux/in.h301
-rw-r--r--tools/lib/bpf/btf.c2
-rw-r--r--tools/lib/bpf/btf.h2
-rw-r--r--tools/memory-model/Documentation/explanation.txt2
-rw-r--r--tools/memory-model/Documentation/recipes.txt12
-rw-r--r--tools/memory-model/README20
-rw-r--r--tools/memory-model/linux-kernel.bell2
-rw-r--r--tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus (renamed from tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus2
-rw-r--r--tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus (renamed from tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus (renamed from tools/memory-model/litmus-tests/MP+wmbonceonce+rmbonceonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/R+fencembonceonces.litmus (renamed from tools/memory-model/litmus-tests/R+mbonceonces.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/README25
-rw-r--r--tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus (renamed from tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/SB+fencembonceonces.litmus (renamed from tools/memory-model/litmus-tests/SB+mbonceonces.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus32
-rw-r--r--tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus (renamed from tools/memory-model/litmus-tests/WRC+pooncerelease+rmbonceonce+Once.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus (renamed from tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus)2
-rwxr-xr-x[-rw-r--r--]tools/memory-model/scripts/checkalllitmus.sh2
-rwxr-xr-x[-rw-r--r--]tools/memory-model/scripts/checklitmus.sh2
-rw-r--r--tools/objtool/arch/x86/include/asm/orc_types.h2
-rw-r--r--tools/objtool/check.c1
-rw-r--r--tools/objtool/check.h2
-rw-r--r--tools/objtool/orc_dump.c3
-rw-r--r--tools/objtool/orc_gen.c2
-rw-r--r--tools/perf/Documentation/perf-list.txt10
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Makefile.config6
-rw-r--r--tools/perf/Makefile.perf10
-rw-r--r--tools/perf/arch/arm64/Makefile21
-rwxr-xr-xtools/perf/arch/arm64/entry/syscalls/mksyscalltbl62
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c10
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c2
-rw-r--r--tools/perf/builtin-c2c.c7
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-report.c4
-rw-r--r--tools/perf/builtin-stat.c60
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c19
-rwxr-xr-xtools/perf/check-headers.sh3
-rw-r--r--tools/perf/include/bpf/bpf.h3
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json87
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/extended.json18
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/extended.json56
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/basic.json8
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json53
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/extended.json24
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/extended.json35
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json7
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rw-r--r--tools/perf/tests/builtin-test.c2
-rw-r--r--tools/perf/tests/parse-events.c18
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh36
-rw-r--r--tools/perf/trace/beauty/Build1
-rw-r--r--tools/perf/trace/beauty/beauty.h3
-rwxr-xr-xtools/perf/trace/beauty/drm_ioctl.sh9
-rwxr-xr-xtools/perf/trace/beauty/kcmp_type.sh2
-rwxr-xr-xtools/perf/trace/beauty/kvm_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/madvise_behavior.sh2
-rwxr-xr-xtools/perf/trace/beauty/perf_ioctl.sh2
-rwxr-xr-xtools/perf/trace/beauty/pkey_alloc_access_rights.sh2
-rwxr-xr-xtools/perf/trace/beauty/sndrv_ctl_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/sndrv_pcm_ioctl.sh4
-rw-r--r--tools/perf/trace/beauty/socket.c28
-rwxr-xr-xtools/perf/trace/beauty/socket_ipproto.sh11
-rwxr-xr-xtools/perf/trace/beauty/vhost_virtio_ioctl.sh6
-rw-r--r--tools/perf/ui/stdio/hist.c8
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/comm.c16
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c10
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h1
-rw-r--r--tools/perf/util/cs-etm.c71
-rw-r--r--tools/perf/util/evsel.c25
-rw-r--r--tools/perf/util/evsel.h9
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/machine.c79
-rw-r--r--tools/perf/util/metricgroup.c26
-rw-r--r--tools/perf/util/metricgroup.h1
-rw-r--r--tools/perf/util/pmu.c6
-rw-r--r--tools/perf/util/stat-shadow.c5
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/unwind-libdw.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh26
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh11
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh7
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh2
-rw-r--r--tools/testing/selftests/rseq/param_test.c24
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h513
-rw-r--r--tools/testing/selftests/rseq/rseq.h2
-rw-r--r--tools/testing/selftests/timers/raw_skew.c5
-rw-r--r--virt/kvm/arm/arm.c4
-rw-r--r--virt/kvm/arm/psci.c2
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/kvm_main.c4
1021 files changed, 23283 insertions, 24984 deletions
diff --git a/.mailmap b/.mailmap
index 29ddeb1bf015..d96147eb1a68 100644
--- a/.mailmap
+++ b/.mailmap
@@ -81,6 +81,9 @@ Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Jean Tourrilhes <jt@hpl.hp.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
+Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
+Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
+Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 6c06e10bd04b..f5120a00f511 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -380,31 +380,26 @@ and therefore need no protection.
as follows:
<pre>
- 1 unsigned long gpnum;
- 2 unsigned long completed;
+ 1 unsigned long gp_seq;
</pre>
<p>RCU grace periods are numbered, and
-the <tt>-&gt;gpnum</tt> field contains the number of the grace
-period that started most recently.
-The <tt>-&gt;completed</tt> field contains the number of the
-grace period that completed most recently.
-If the two fields are equal, the RCU grace period that most recently
-started has already completed, and therefore the corresponding
-flavor of RCU is idle.
-If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
-then <tt>-&gt;gpnum</tt> gives the number of the current RCU
-grace period, which has not yet completed.
-Any other combination of values indicates that something is broken.
-These two fields are protected by the root <tt>rcu_node</tt>'s
+the <tt>-&gt;gp_seq</tt> field contains the current grace-period
+sequence number.
+The bottom two bits are the state of the current grace period,
+which can be zero for not yet started or one for in progress.
+In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
+zero, the corresponding flavor of RCU is idle.
+Any other value in the bottom two bits indicates that something is broken.
+This field is protected by the root <tt>rcu_node</tt> structure's
<tt>-&gt;lock</tt> field.
-</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+</p><p>There are <tt>-&gt;gp_seq</tt> fields
in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
as well.
The fields in the <tt>rcu_state</tt> structure represent the
-most current values, and those of the other structures are compared
-in order to detect the start of a new grace period in a distributed
+most current value, and those of the other structures are compared
+in order to detect the beginnings and ends of grace periods in a distributed
fashion.
The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
@@ -512,27 +507,47 @@ than to be heisenbugged out of existence.
as follows:
<pre>
- 1 unsigned long gpnum;
- 2 unsigned long completed;
+ 1 unsigned long gp_seq;
+ 2 unsigned long gp_seq_needed;
</pre>
-<p>These fields are the counterparts of the fields of the same name in
-the <tt>rcu_state</tt> structure.
-They each may lag up to one behind their <tt>rcu_state</tt>
-counterparts.
-If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+<p>The <tt>rcu_node</tt> structures' <tt>-&gt;gp_seq</tt> fields are
+the counterparts of the field of the same name in the <tt>rcu_state</tt>
+structure.
+They each may lag up to one step behind their <tt>rcu_state</tt>
+counterpart.
+If the bottom two bits of a given <tt>rcu_node</tt> structure's
+<tt>-&gt;gp_seq</tt> field is zero, then this <tt>rcu_node</tt>
structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_node</tt> believes
-is still being waited for.
+</p><p>The <tt>&gt;gp_seq</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning and the end
+of each grace period.
+
+<p>The <tt>-&gt;gp_seq_needed</tt> fields record the
+furthest-in-the-future grace period request seen by the corresponding
+<tt>rcu_node</tt> structure. The request is considered fulfilled when
+the value of the <tt>-&gt;gp_seq</tt> field equals or exceeds that of
+the <tt>-&gt;gp_seq_needed</tt> field.
-</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
-structure is updated at the beginning
-of each grace period, and the <tt>-&gt;completed</tt> fields are
-updated at the end of each grace period.
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ Suppose that this <tt>rcu_node</tt> structure doesn't see
+ a request for a very long time.
+ Won't wrapping of the <tt>-&gt;gp_seq</tt> field cause
+ problems?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ No, because if the <tt>-&gt;gp_seq_needed</tt> field lags behind the
+ <tt>-&gt;gp_seq</tt> field, the <tt>-&gt;gp_seq_needed</tt> field
+ will be updated at the end of the grace period.
+ Modulo-arithmetic comparisons therefore will always get the
+ correct answer, even with wrapping.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
<h5>Quiescent-State Tracking</h5>
@@ -626,9 +641,8 @@ normal and expedited grace periods, respectively.
</ol>
<p><font color="ffffff">So the locking is absolutely required in
- order to coordinate
- clearing of the bits with the grace-period numbers in
- <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
+ order to coordinate clearing of the bits with updating of the
+ grace-period sequence number in <tt>-&gt;gp_seq</tt>.
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
@@ -1038,15 +1052,15 @@ out any <tt>rcu_data</tt> structure for which this flag is not set.
as follows:
<pre>
- 1 unsigned long completed;
- 2 unsigned long gpnum;
+ 1 unsigned long gp_seq;
+ 2 unsigned long gp_seq_needed;
3 bool cpu_no_qs;
4 bool core_needs_qs;
5 bool gpwrap;
6 unsigned long rcu_qs_ctr_snap;
</pre>
-<p>The <tt>completed</tt> and <tt>gpnum</tt>
+<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt>
fields are the counterparts of the fields of the same name
in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
They may each lag up to one behind their <tt>rcu_node</tt>
@@ -1054,15 +1068,9 @@ counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
will catch up upon exit from dyntick-idle mode).
-If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+If the lower two bits of a given <tt>rcu_data</tt> structure's
+<tt>-&gt;gp_seq</tt> are zero, then this <tt>rcu_data</tt>
structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
-structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_data</tt> believes
-is still being waited for.
<table>
<tr><th>&nbsp;</th></tr>
@@ -1070,13 +1078,13 @@ is still being waited for.
<tr><td>
All this replication of the grace period numbers can only cause
massive confusion.
- Why not just keep a global pair of counters and be done with it???
+ Why not just keep a global sequence number and be done with it???
</td></tr>
<tr><th align="left">Answer:</th></tr>
<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because if there was only a single global pair of grace-period
+ Because if there was only a single global sequence
numbers, there would need to be a single global lock to allow
- safely accessing and updating them.
+ safely accessing and updating it.
And if we are not going to have a single global lock, we need
to carefully manage the numbers on a per-node basis.
Recall from the answer to a previous Quick Quiz that the consequences
@@ -1091,8 +1099,8 @@ CPU has not yet passed through a quiescent state,
while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
RCU core needs a quiescent state from the corresponding CPU.
The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
-CPU has remained idle for so long that the <tt>completed</tt>
-and <tt>gpnum</tt> counters are in danger of overflow, which
+CPU has remained idle for so long that the
+<tt>gp_seq</tt> counter is in danger of overflow, which
will cause the CPU to disregard the values of its counters on
its next exit from idle.
Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
@@ -1130,10 +1138,10 @@ The CPU advances the callbacks in its <tt>rcu_data</tt> structure
whenever it notices that another RCU grace period has completed.
The CPU detects the completion of an RCU grace period by noticing
that the value of its <tt>rcu_data</tt> structure's
-<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>-&gt;gp_seq</tt> field differs from that of its leaf
<tt>rcu_node</tt> structure.
Recall that each <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field is updated at the end of each
+<tt>-&gt;gp_seq</tt> field is updated at the beginnings and ends of each
grace period.
<p>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index 8651b0b4fd79..a346ce0116eb 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -357,7 +357,7 @@ parts, starting in this section with the various phases of
grace-period initialization.
<p>The first ordering-related grace-period initialization action is to
-increment the <tt>rcu_state</tt> structure's <tt>-&gt;gpnum</tt>
+advance the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt>
grace-period-number counter, as shown below:
</p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -388,7 +388,7 @@ its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
<p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
-<tt>-&gt;gpnum</tt> field to the newly incremented value from the
+<tt>-&gt;gp_seq</tt> field to the newly advanced value from the
<tt>rcu_state</tt> structure, as shown in the following diagram.
</p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -398,9 +398,9 @@ tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
to notice that a new grace period has started, as described in the next
section.
But because the grace-period kthread started the grace period at the
-root (with the increment of the <tt>rcu_state</tt> structure's
-<tt>-&gt;gpnum</tt> field) before setting each leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;gpnum</tt> field, each CPU's observation of
+root (with the advancing of the <tt>rcu_state</tt> structure's
+<tt>-&gt;gp_seq</tt> field) before setting each leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;gp_seq</tt> field, each CPU's observation of
the start of the grace period will happen after the actual start
of the grace period.
@@ -466,7 +466,7 @@ section that the grace period must wait on.
<tr><td>
But a RCU read-side critical section might have started
after the beginning of the grace period
- (the <tt>-&gt;gpnum++</tt> from earlier), so why should
+ (the advancing of <tt>-&gt;gp_seq</tt> from earlier), so why should
the grace period wait on such a critical section?
</td></tr>
<tr><th align="left">Answer:</th></tr>
@@ -609,10 +609,8 @@ states outstanding from other CPUs.
<h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
<p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
-breadth-first setting all the <tt>-&gt;completed</tt> fields equal
-to the number of the newly completed grace period, then it sets
-the <tt>rcu_state</tt> structure's <tt>-&gt;completed</tt> field,
-again to the number of the newly completed grace period.
+breadth-first advancing all the <tt>-&gt;gp_seq</tt> fields, then it
+advances the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt> field.
The ordering effects are shown below:
</p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
@@ -634,7 +632,7 @@ grace-period cleanup is complete, the next grace period can begin.
CPU has reported its quiescent state, but it may be some
milliseconds before RCU becomes aware of this.
The latest reasonable candidate is once the <tt>rcu_state</tt>
- structure's <tt>-&gt;completed</tt> field has been updated,
+ structure's <tt>-&gt;gp_seq</tt> field has been updated,
but it is quite possible that some CPUs have already completed
phase two of their updates by that time.
In short, if you are going to work with RCU, you need to
@@ -647,7 +645,7 @@ grace-period cleanup is complete, the next grace period can begin.
<h4><a name="Callback Invocation">Callback Invocation</a></h4>
<p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field has been updated, that CPU can begin
+<tt>-&gt;gp_seq</tt> field has been updated, that CPU can begin
invoking its RCU callbacks that were waiting for this grace period
to end.
These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
index 754f426b297a..bf84fbab27ee 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
@@ -384,11 +384,11 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="617.89017"
- inkscape:cy="542.52419"
- inkscape:window-x="86"
- inkscape:window-y="28"
+ inkscape:zoom="0.78716603"
+ inkscape:cx="513.06403"
+ inkscape:cy="623.1214"
+ inkscape:window-x="102"
+ inkscape:window-y="38"
inkscape:window-maximized="0"
inkscape:current-layer="g3188-3"
fit-margin-top="5"
@@ -417,13 +417,15 @@
id="g3188">
<text
xml:space="preserve"
- x="3199.1516"
+ x="3145.9592"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3143">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
@@ -502,13 +504,15 @@
</g>
<text
xml:space="preserve"
- x="5324.5371"
- y="15414.598"
+ x="5264.4731"
+ y="15428.84"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-753"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -547,15 +551,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-6-0">Leaf</tspan></text>
- <text
- xml:space="preserve"
- x="7479.5796"
- y="17699.943"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<path
sodipodi:nodetypes="cc"
inkscape:connector-curvature="0"
@@ -566,15 +561,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(-737.93887,7732.6672)"
id="g3188-3">
- <text
- xml:space="preserve"
- x="3225.7478"
- y="13175.802"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;completed =</text>
<g
id="g3107-62"
transform="translate(947.90548,11584.029)">
@@ -607,15 +593,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7">Root</tspan></text>
- <text
- xml:space="preserve"
- x="3225.7478"
- y="13390.038"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
<flowRoot
xml:space="preserve"
id="flowRoot3356"
@@ -627,7 +604,18 @@
height="63.63961"
x="332.34018"
y="681.87292" /></flowRegion><flowPara
- id="flowPara3362" /></flowRoot> </g>
+ id="flowPara3362" /></flowRoot> <text
+ xml:space="preserve"
+ x="3156.6121"
+ y="13317.754"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+ </g>
<g
style="fill:none;stroke-width:0.025in"
transform="translate(-858.40227,7769.0342)"
@@ -859,6 +847,17 @@
id="path3414-8-3-6-6"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ x="7418.769"
+ y="17646.104"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-70"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-93">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-1642.5377,-11611.245)"
@@ -887,13 +886,15 @@
</g>
<text
xml:space="preserve"
- x="5327.3057"
+ x="5274.1133"
y="15428.84"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-36"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-151.71746,-11647.612)"
@@ -972,13 +973,15 @@
id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7486.4907"
- y="17670.119"
+ x="7408.5918"
+ y="17619.504"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-6817.1997,-11647.612)"
@@ -1019,13 +1022,15 @@
id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7474.1382"
- y="17688.926"
+ x="7416.8003"
+ y="17619.504"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-56">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -1059,15 +1064,6 @@
id="path3414-8-3-6"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="7318.9653"
- y="6031.6353"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
style="fill:none;stroke-width:0.025in"
id="g4504-3-9"
@@ -1123,4 +1119,15 @@
id="path3134-9-0-3-5"
d="m 6875.6003,15833.906 1595.7755,0"
style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+ <text
+ xml:space="preserve"
+ x="7275.2612"
+ y="5971.8916"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-2">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
index 0161262904ec..8c207550818f 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
@@ -272,13 +272,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="617.89019"
- inkscape:cy="636.57143"
- inkscape:window-x="697"
+ inkscape:zoom="2.6330492"
+ inkscape:cx="524.82797"
+ inkscape:cy="519.31194"
+ inkscape:window-x="79"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g3188"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -305,13 +305,15 @@
id="g3188">
<text
xml:space="preserve"
- x="3305.5364"
+ x="3119.363"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
index de6ecc51b00e..d24d7d555dbc 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
@@ -19,7 +19,7 @@
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
- sodipodi:docname="TreeRCU-gp-init-2.svg">
+ sodipodi:docname="TreeRCU-gp-init-3.svg">
<metadata
id="metadata212">
<rdf:RDF>
@@ -257,18 +257,22 @@
inkscape:window-width="1087"
inkscape:window-height="1144"
id="namedview208"
- showgrid="false"
- inkscape:zoom="0.70710678"
+ showgrid="true"
+ inkscape:zoom="0.68224756"
inkscape:cx="617.89019"
inkscape:cy="625.84293"
- inkscape:window-x="697"
+ inkscape:window-x="54"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g3153"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
- fit-margin-bottom="5" />
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3090" />
+ </sodipodi:namedview>
<path
sodipodi:nodetypes="cccccccccccccccccccccccc"
inkscape:connector-curvature="0"
@@ -281,13 +285,13 @@
id="g3188">
<text
xml:space="preserve"
- x="3305.5364"
+ x="3145.9592"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
@@ -366,13 +370,13 @@
</g>
<text
xml:space="preserve"
- x="5392.3345"
- y="15407.104"
+ x="5253.6904"
+ y="15407.032"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -413,13 +417,13 @@
id="tspan3104-6-5-6-0">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7536.4883"
- y="17640.934"
+ x="7415.4365"
+ y="17670.572"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-1642.5375,-11610.962)"
@@ -448,13 +452,13 @@
</g>
<text
xml:space="preserve"
- x="5378.4146"
- y="15436.927"
+ x="5258.0688"
+ y="15412.313"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-151.71726,-11647.329)"
@@ -533,13 +537,13 @@
id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7520.1294"
- y="17673.639"
+ x="7405.2607"
+ y="17670.572"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-35"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-6817.1998,-11647.329)"
@@ -580,13 +584,13 @@
id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7521.4663"
- y="17666.062"
+ x="7413.4688"
+ y="17670.566"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-75"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -622,11 +626,11 @@
sodipodi:nodetypes="cc" />
<text
xml:space="preserve"
- x="7370.856"
- y="5997.5972"
+ x="7271.9297"
+ y="6023.2412"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-62"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
index b13b7b01bb3a..acd73c7ad0f4 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -1070,13 +1070,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.6004608"
- inkscape:cx="826.65969"
- inkscape:cy="483.3047"
- inkscape:window-x="66"
- inkscape:window-y="28"
+ inkscape:zoom="0.81932583"
+ inkscape:cx="840.45848"
+ inkscape:cy="5052.4242"
+ inkscape:window-x="787"
+ inkscape:window-y="24"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g4"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -1543,15 +1543,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1749.0282,658.72243)"
id="g3188">
- <text
- xml:space="preserve"
- x="3305.5364"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
<g
id="g3107-62"
transform="translate(947.90548,11584.029)">
@@ -1584,6 +1575,17 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3137.9988"
+ y="13271.316"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-626"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
</g>
<rect
ry="0"
@@ -2318,15 +2320,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1739.0986,17188.625)"
id="g3188-6">
- <text
- xml:space="preserve"
- x="3305.5364"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
<g
id="g3107-5"
transform="translate(947.90548,11584.029)">
@@ -2359,6 +2352,15 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-1">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3147.9268"
+ y="13240.524"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -2387,13 +2389,13 @@
</g>
<text
xml:space="preserve"
- x="5392.3345"
- y="15407.104"
+ x="5263.1094"
+ y="15411.646"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6-7"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-92"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -2434,13 +2436,13 @@
id="tspan3104-6-5-6-0-94">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7536.4883"
- y="17640.934"
+ x="7417.4053"
+ y="17655.502"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-759"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-2353.8462,17224.992)"
@@ -2469,13 +2471,13 @@
</g>
<text
xml:space="preserve"
- x="5378.4146"
- y="15436.927"
+ x="5246.1548"
+ y="15411.648"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-87"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-863.02613,17188.625)"
@@ -2554,13 +2556,13 @@
id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7520.1294"
- y="17673.639"
+ x="7433.8257"
+ y="17682.098"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-35"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-7528.5085,17188.625)"
@@ -2601,13 +2603,13 @@
id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7521.4663"
- y="17666.062"
+ x="7415.4404"
+ y="17682.098"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-75-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -2641,15 +2643,6 @@
id="path3414-8-3-6-4"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="6659.5469"
- y="34833.551"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-62"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
@@ -3844,7 +3837,7 @@
font-weight="bold"
font-size="192"
id="text202-6-6-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
<text
xml:space="preserve"
x="5035.4155"
@@ -4284,15 +4277,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1874.038,53203.538)"
id="g3188-7">
- <text
- xml:space="preserve"
- x="3199.1516"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-82"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
id="g3107-53"
transform="translate(947.90548,11584.029)">
@@ -4325,6 +4309,17 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-19">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3175.896"
+ y="13240.11"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<rect
ry="0"
@@ -4371,13 +4366,15 @@
</g>
<text
xml:space="preserve"
- x="5324.5371"
- y="15414.598"
+ x="5264.4829"
+ y="15411.231"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-753"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -4412,30 +4409,12 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-6-0-4">Leaf</tspan></text>
- <text
- xml:space="preserve"
- x="10084.225"
- y="70903.312"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-9-0"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
id="path3134-9-0-3-9"
d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0"
style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
- <text
- xml:space="preserve"
- x="5092.4683"
- y="74111.672"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rsp-&gt;completed =</text>
<g
style="fill:none;stroke-width:0.025in"
id="g3107-62-6"
@@ -4469,15 +4448,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7-7">Root</tspan></text>
- <text
- xml:space="preserve"
- x="5092.4683"
- y="74325.906"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
<g
style="fill:none;stroke-width:0.025in"
transform="translate(1746.2528,60972.572)"
@@ -4736,13 +4706,15 @@
</g>
<text
xml:space="preserve"
- x="5327.3057"
- y="15428.84"
+ x="5274.1216"
+ y="15411.231"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-36"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-6">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-728.08545,53203.538)"
@@ -4821,13 +4793,15 @@
id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7486.4907"
- y="17670.119"
+ x="7435.1987"
+ y="17708.281"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-1">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-7393.5687,53203.538)"
@@ -4868,13 +4842,15 @@
id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7474.1382"
- y="17688.926"
+ x="7416.8125"
+ y="17708.281"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-5-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-35"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-62">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -4908,15 +4884,6 @@
id="path3414-8-3-6-67"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="6742.6001"
- y="70882.617"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
style="fill:none;stroke-width:0.025in"
id="g4504-3-9-6"
@@ -5131,5 +5098,47 @@
font-size="192"
id="text202-7-9-6-6-7"
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
+ <text
+ xml:space="preserve"
+ x="6698.9019"
+ y="70885.211"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-7">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="10023.457"
+ y="70885.234"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="5023.3389"
+ y="74209.773"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-36"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="6562.5884"
+ y="34870.727"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
index de3992f4cbe1..149bec2a4493 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
@@ -300,13 +300,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="616.47598"
- inkscape:cy="595.41964"
- inkscape:window-x="813"
+ inkscape:zoom="0.96484375"
+ inkscape:cx="507.0191"
+ inkscape:cy="885.62207"
+ inkscape:window-x="47"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="g4405"
+ inkscape:current-layer="g3115"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -710,7 +710,7 @@
font-weight="bold"
font-size="192"
id="text202-6-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
<text
xml:space="preserve"
x="5035.4155"
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 4259f95c3261..f99cf11b314b 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -172,7 +172,7 @@ it will print a message similar to the following:
INFO: rcu_sched detected stalls on CPUs/tasks:
2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0
- (detected by 32, t=2603 jiffies, g=7073, c=7072, q=625)
+ (detected by 32, t=2603 jiffies, g=7075, q=625)
This message indicates that CPU 32 detected that CPUs 2 and 16 were both
causing stalls, and that the stall was affecting RCU-sched. This message
@@ -215,11 +215,10 @@ CPU since the last time that this CPU noted the beginning of a grace
period.
The "detected by" line indicates which CPU detected the stall (in this
-case, CPU 32), how many jiffies have elapsed since the start of the
-grace period (in this case 2603), the number of the last grace period
-to start and to complete (7073 and 7072, respectively), and an estimate
-of the total number of RCU callbacks queued across all CPUs (625 in
-this case).
+case, CPU 32), how many jiffies have elapsed since the start of the grace
+period (in this case 2603), the grace-period sequence number (7075), and
+an estimate of the total number of RCU callbacks queued across all CPUs
+(625 in this case).
In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
for each CPU:
@@ -266,15 +265,16 @@ If the relevant grace-period kthread has been unable to run prior to
the stall warning, as was the case in the "All QSes seen" line above,
the following additional line is printed:
- kthread starved for 23807 jiffies! g7073 c7072 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1
+ kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
Starving the grace-period kthreads of CPU time can of course result
in RCU CPU stall warnings even when all CPUs and tasks have passed
-through the required quiescent states. The "g" and "c" numbers flag the
-number of the last grace period started and completed, respectively,
-the "f" precedes the ->gp_flags command to the grace-period kthread,
-the "RCU_GP_WAIT_FQS" indicates that the kthread is waiting for a short
-timeout, and the "state" precedes value of the task_struct ->state field.
+through the required quiescent states. The "g" number shows the current
+grace-period sequence number, the "f" precedes the ->gp_flags command
+to the grace-period kthread, the "RCU_GP_WAIT_FQS" indicates that the
+kthread is waiting for a short timeout, the "state" precedes value of the
+task_struct ->state field, and the "cpu" indicates that the grace-period
+kthread last ran on CPU 5.
Multiple Warnings From One Stall
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 65eb856526b7..c2a7facf7ff9 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -588,6 +588,7 @@ It is extremely simple:
void synchronize_rcu(void)
{
write_lock(&rcu_gp_mutex);
+ smp_mb__after_spinlock();
write_unlock(&rcu_gp_mutex);
}
@@ -609,12 +610,15 @@ don't forget about them when submitting patches making use of RCU!]
The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
and release a global reader-writer lock. The synchronize_rcu()
-primitive write-acquires this same lock, then immediately releases
-it. This means that once synchronize_rcu() exits, all RCU read-side
-critical sections that were in progress before synchronize_rcu() was
-called are guaranteed to have completed -- there is no way that
-synchronize_rcu() would have been able to write-acquire the lock
-otherwise.
+primitive write-acquires this same lock, then releases it. This means
+that once synchronize_rcu() exits, all RCU read-side critical sections
+that were in progress before synchronize_rcu() was called are guaranteed
+to have completed -- there is no way that synchronize_rcu() would have
+been able to write-acquire the lock otherwise. The smp_mb__after_spinlock()
+promotes synchronize_rcu() to a full memory barrier in compliance with
+the "Memory-Barrier Guarantees" listed in:
+
+ Documentation/RCU/Design/Requirements/Requirements.html.
It is possible to nest rcu_read_lock(), since reader-writer locks may
be recursively acquired. Note also that rcu_read_lock() is immune
@@ -816,11 +820,13 @@ RCU list traversal:
list_next_rcu
list_for_each_entry_rcu
list_for_each_entry_continue_rcu
+ list_for_each_entry_from_rcu
hlist_first_rcu
hlist_next_rcu
hlist_pprev_rcu
hlist_for_each_entry_rcu
hlist_for_each_entry_rcu_bh
+ hlist_for_each_entry_from_rcu
hlist_for_each_entry_continue_rcu
hlist_for_each_entry_continue_rcu_bh
hlist_nulls_first_rcu
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 533ff5c68970..5cde1ff32ff3 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2835,8 +2835,6 @@
nosync [HW,M68K] Disables sync negotiation for all devices.
- notsc [BUGS=X86-32] Disable Time Stamp Counter
-
nowatchdog [KNL] Disable both lockup detectors, i.e.
soft-lockup and NMI watchdog (hard-lockup).
@@ -3632,8 +3630,8 @@
Set time (s) after boot for CPU-hotplug testing.
rcutorture.onoff_interval= [KNL]
- Set time (s) between CPU-hotplug operations, or
- zero to disable CPU-hotplug testing.
+ Set time (jiffies) between CPU-hotplug operations,
+ or zero to disable CPU-hotplug testing.
rcutorture.shuffle_interval= [KNL]
Set task-shuffle interval (s). Shuffling tasks
diff --git a/Documentation/core-api/atomic_ops.rst b/Documentation/core-api/atomic_ops.rst
index 2e7165f86f55..724583453e1f 100644
--- a/Documentation/core-api/atomic_ops.rst
+++ b/Documentation/core-api/atomic_ops.rst
@@ -29,7 +29,7 @@ updated by one CPU, local_t is probably more appropriate. Please see
local_t.
The first operations to implement for atomic_t's are the initializers and
-plain reads. ::
+plain writes. ::
#define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) ((v)->counter = (i))
diff --git a/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt
new file mode 100644
index 000000000000..28f43e929f6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt
@@ -0,0 +1,84 @@
+Nuvoton NPCM7xx PWM and Fan Tacho controller device
+
+The Nuvoton BMC NPCM7XX supports 8 Pulse-width modulation (PWM)
+controller outputs and 16 Fan tachometer controller inputs.
+
+Required properties for pwm-fan node
+- #address-cells : should be 1.
+- #size-cells : should be 0.
+- compatible : "nuvoton,npcm750-pwm-fan" for Poleg NPCM7XX.
+- reg : specifies physical base address and size of the registers.
+- reg-names : must contain:
+ * "pwm" for the PWM registers.
+ * "fan" for the Fan registers.
+- clocks : phandle of reference clocks.
+- clock-names : must contain
+ * "pwm" for PWM controller operating clock.
+ * "fan" for Fan controller operating clock.
+- interrupts : contain the Fan interrupts with flags for falling edge.
+- pinctrl-names : a pinctrl state named "default" must be defined.
+- pinctrl-0 : phandle referencing pin configuration of the PWM and Fan
+ controller ports.
+
+fan subnode format:
+===================
+Under fan subnode can be upto 8 child nodes, each child node representing a fan.
+Each fan subnode must have one PWM channel and atleast one Fan tach channel.
+
+For PWM channel can be configured cooling-levels to create cooling device.
+Cooling device could be bound to a thermal zone for the thermal control.
+
+Required properties for each child node:
+- reg : specify the PWM output channel.
+ integer value in the range 0 through 7, that represent
+ the PWM channel number that used.
+
+- fan-tach-ch : specify the Fan tach input channel.
+ integer value in the range 0 through 15, that represent
+ the fan tach channel number that used.
+
+ At least one Fan tach input channel is required
+
+Optional property for each child node:
+- cooling-levels: PWM duty cycle values in a range from 0 to 255
+ which correspond to thermal cooling states.
+
+Examples:
+
+pwm_fan:pwm-fan-controller@103000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nuvoton,npcm750-pwm-fan";
+ reg = <0x103000 0x2000>,
+ <0x180000 0x8000>;
+ reg-names = "pwm", "fan";
+ clocks = <&clk NPCM7XX_CLK_APB3>,
+ <&clk NPCM7XX_CLK_APB4>;
+ clock-names = "pwm","fan";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pins &pwm1_pins &pwm2_pins
+ &fanin0_pins &fanin1_pins &fanin2_pins
+ &fanin3_pins &fanin4_pins>;
+ fan@0 {
+ reg = <0x00>;
+ fan-tach-ch = /bits/ 8 <0x00 0x01>;
+ cooling-levels = <127 255>;
+ };
+ fan@1 {
+ reg = <0x01>;
+ fan-tach-ch = /bits/ 8 <0x02 0x03>;
+ };
+ fan@2 {
+ reg = <0x02>;
+ fan-tach-ch = /bits/ 8 <0x04>;
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
index 5f89fb635a1b..f97fd8ab5e45 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible : should be "ingenic,<socname>-intc". Valid strings are:
ingenic,jz4740-intc
+ ingenic,jz4725b-intc
ingenic,jz4770-intc
ingenic,jz4775-intc
ingenic,jz4780-intc
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index 20f121daa910..697ca2f26d1b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -7,6 +7,7 @@ Required properties:
- "renesas,irqc-r8a73a4" (R-Mobile APE6)
- "renesas,irqc-r8a7743" (RZ/G1M)
- "renesas,irqc-r8a7745" (RZ/G1E)
+ - "renesas,irqc-r8a77470" (RZ/G1C)
- "renesas,irqc-r8a7790" (R-Car H2)
- "renesas,irqc-r8a7791" (R-Car M2-W)
- "renesas,irqc-r8a7792" (R-Car V2H)
@@ -16,6 +17,7 @@ Required properties:
- "renesas,intc-ex-r8a7796" (R-Car M3-W)
- "renesas,intc-ex-r8a77965" (R-Car M3-N)
- "renesas,intc-ex-r8a77970" (R-Car V3M)
+ - "renesas,intc-ex-r8a77980" (R-Car V3H)
- "renesas,intc-ex-r8a77995" (R-Car D3)
- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
interrupts.txt in this directory
diff --git a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt b/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
index cafe2197dad9..c3a29c5feea3 100644
--- a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
+++ b/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
@@ -3,7 +3,7 @@
Required properties:
- compatible: "qca,ar7100-usb-phy"
- #phys-cells: should be 0
-- reset-names: "usb-phy"[, "usb-suspend-override"]
+- reset-names: "phy"[, "suspend-override"]
- resets: references to the reset controllers
Example:
@@ -11,7 +11,7 @@ Example:
usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
index b1fe7e9de1b4..18d4d0166c76 100644
--- a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
+++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
@@ -1,19 +1,25 @@
-Mediatek MT6577, MT6572 and MT6589 Timers
----------------------------------------
+Mediatek Timers
+---------------
+
+Mediatek SoCs have two different timers on different platforms,
+- GPT (General Purpose Timer)
+- SYST (System Timer)
+
+The proper timer will be selected automatically by driver.
Required properties:
- compatible should contain:
- * "mediatek,mt2701-timer" for MT2701 compatible timers
- * "mediatek,mt6580-timer" for MT6580 compatible timers
- * "mediatek,mt6589-timer" for MT6589 compatible timers
- * "mediatek,mt7623-timer" for MT7623 compatible timers
- * "mediatek,mt8127-timer" for MT8127 compatible timers
- * "mediatek,mt8135-timer" for MT8135 compatible timers
- * "mediatek,mt8173-timer" for MT8173 compatible timers
- * "mediatek,mt6577-timer" for MT6577 and all above compatible timers
-- reg: Should contain location and length for timers register.
-- clocks: Clocks driving the timer hardware. This list should include two
- clocks. The order is system clock and as second clock the RTC clock.
+ * "mediatek,mt2701-timer" for MT2701 compatible timers (GPT)
+ * "mediatek,mt6580-timer" for MT6580 compatible timers (GPT)
+ * "mediatek,mt6589-timer" for MT6589 compatible timers (GPT)
+ * "mediatek,mt7623-timer" for MT7623 compatible timers (GPT)
+ * "mediatek,mt8127-timer" for MT8127 compatible timers (GPT)
+ * "mediatek,mt8135-timer" for MT8135 compatible timers (GPT)
+ * "mediatek,mt8173-timer" for MT8173 compatible timers (GPT)
+ * "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT)
+ * "mediatek,mt6765-timer" for MT6765 compatible timers (SYST)
+- reg: Should contain location and length for timer register.
+- clocks: Should contain system clock.
Examples:
@@ -21,5 +27,5 @@ Examples:
compatible = "mediatek,mt6577-timer";
reg = <0x10008000 0x80>;
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&system_clk>, <&rtc_clk>;
+ clocks = <&system_clk>;
};
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index dbdf62907703..c7858dd1ea8f 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -5,10 +5,10 @@
#
# Architecture requirements
#
-# * arm64
+# * arm/arm64
#
-# Rely on eret context synchronization when returning from IPI handler, and
-# when returning to user-space.
+# Rely on implicit context synchronization as a result of exception return
+# when returning from IPI handler, and when returning to user-space.
#
# * x86
#
@@ -31,7 +31,7 @@
-----------------------
| alpha: | TODO |
| arc: | TODO |
- | arm: | TODO |
+ | arm: | ok |
| arm64: | ok |
| c6x: | TODO |
| h8300: | TODO |
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 37bf0a9de75c..7cca82ed2ed1 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -64,7 +64,7 @@ prototypes:
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
+ umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
locking rules:
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 53b89d0edc15..46d1b1be3a51 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -71,6 +71,39 @@ Other Functions
.. kernel-doc:: fs/block_dev.c
:export:
+.. kernel-doc:: fs/anon_inodes.c
+ :export:
+
+.. kernel-doc:: fs/attr.c
+ :export:
+
+.. kernel-doc:: fs/d_path.c
+ :export:
+
+.. kernel-doc:: fs/dax.c
+ :export:
+
+.. kernel-doc:: fs/direct-io.c
+ :export:
+
+.. kernel-doc:: fs/file_table.c
+ :export:
+
+.. kernel-doc:: fs/libfs.c
+ :export:
+
+.. kernel-doc:: fs/posix_acl.c
+ :export:
+
+.. kernel-doc:: fs/stat.c
+ :export:
+
+.. kernel-doc:: fs/sync.c
+ :export:
+
+.. kernel-doc:: fs/xattr.c
+ :export:
+
The proc filesystem
===================
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 17bb4dc28fae..7b7b845c490a 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -602,3 +602,23 @@ in your dentry operations instead.
dentry separately, and it now has request_mask and query_flags arguments
to specify the fields and sync type requested by statx. Filesystems not
supporting any statx-specific features may ignore the new arguments.
+--
+[mandatory]
+ ->atomic_open() calling conventions have changed. Gone is int *opened,
+ along with FILE_OPENED/FILE_CREATED. In place of those we have
+ FMODE_OPENED/FMODE_CREATED, set in file->f_mode. Additionally, return
+ value for 'called finish_no_open(), open it yourself' case has become
+ 0, not 1. Since finish_no_open() itself is returning 0 now, that part
+ does not need any changes in ->atomic_open() instances.
+--
+[mandatory]
+ alloc_file() has become static now; two wrappers are to be used instead.
+ alloc_file_pseudo(inode, vfsmount, name, flags, ops) is for the cases
+ when dentry needs to be created; that's the majority of old alloc_file()
+ users. Calling conventions: on success a reference to new struct file
+ is returned and callers reference to inode is subsumed by that. On
+ failure, ERR_PTR() is returned and no caller's references are affected,
+ so the caller needs to drop the inode reference it held.
+ alloc_file_clone(file, flags, ops) does not affect any caller's references.
+ On success you get a new struct file sharing the mount/dentry with the
+ original, on failure - ERR_PTR().
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index f608180ad59d..85907d5b9c2c 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -386,7 +386,7 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *, struct file *,
- unsigned open_flag, umode_t create_mode, int *opened);
+ unsigned open_flag, umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
};
@@ -496,13 +496,15 @@ otherwise noted.
atomic_open: called on the last component of an open. Using this optional
method the filesystem can look up, possibly create and open the file in
- one atomic operation. If it cannot perform this (e.g. the file type
- turned out to be wrong) it may signal this by returning 1 instead of
- usual 0 or -ve . This method is only called if the last component is
- negative or needs lookup. Cached positive dentries are still handled by
- f_op->open(). If the file was created, the FILE_CREATED flag should be
- set in "opened". In case of O_EXCL the method must only succeed if the
- file didn't exist and hence FILE_CREATED shall always be set on success.
+ one atomic operation. If it wants to leave actual opening to the
+ caller (e.g. if the file turned out to be a symlink, device, or just
+ something filesystem won't do atomic open for), it may signal this by
+ returning finish_no_open(file, dentry). This method is only called if
+ the last component is negative or needs lookup. Cached positive dentries
+ are still handled by f_op->open(). If the file was created,
+ FMODE_CREATED flag should be set in file->f_mode. In case of O_EXCL
+ the method must only succeed if the file didn't exist and hence FMODE_CREATED
+ shall always be set on success.
tmpfile: called in the end of O_TMPFILE open(). Optional, equivalent to
atomically creating, opening and unlinking a file in given directory.
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index 9ba6587b7657..b2de8fa49273 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -16,6 +16,11 @@ Supported chips:
Prefixes: 'max34446'
Addresses scanned: -
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
+ * Maxim MAX34451
+ PMBus 16-Channel V/I Monitor and 12-Channel Sequencer/Marginer
+ Prefixes: 'max34451'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34451.pdf
* Maxim MAX34460
PMBus 12-Channel Voltage Monitor & Sequencer
Prefix: 'max34460'
@@ -36,9 +41,10 @@ Description
This driver supports hardware monitoring for Maxim MAX34440 PMBus 6-Channel
Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager
and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger.
-It also supports the MAX34460 and MAX34461 PMBus Voltage Monitor & Sequencers.
-The MAX34460 supports 12 voltage channels, and the MAX34461 supports 16 voltage
-channels.
+It also supports the MAX34451, MAX34460, and MAX34461 PMBus Voltage Monitor &
+Sequencers. The MAX34451 supports monitoring voltage or current of 12 channels
+based on GIN pins. The MAX34460 supports 12 voltage channels, and the MAX34461
+supports 16 voltage channels.
The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -93,7 +99,7 @@ curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
-curr[1-4]_average Historical average current (MAX34446 only).
+curr[1-4]_average Historical average current (MAX34446/34451 only).
curr[1-6]_highest Historical maximum current.
curr[1-6]_reset_history Write any value to reset history.
@@ -123,5 +129,7 @@ temp[1-8]_reset_history Write any value to reset history.
temp7 and temp8 attributes only exist for MAX34440.
MAX34446 only supports temp[1-3].
+MAX34451 supports attribute groups in[1-16] (or curr[1-16] based on input pins)
+and temp[1-5].
MAX34460 supports attribute groups in[1-12] and temp[1-5].
MAX34461 supports attribute groups in[1-16] and temp[1-5].
diff --git a/Documentation/hwmon/mlxreg-fan b/Documentation/hwmon/mlxreg-fan
new file mode 100644
index 000000000000..fc531c6978d4
--- /dev/null
+++ b/Documentation/hwmon/mlxreg-fan
@@ -0,0 +1,60 @@
+Kernel driver mlxreg-fan
+========================
+
+Provides FAN control for the next Mellanox systems:
+QMB700, equipped with 40x200GbE InfiniBand ports;
+MSN3700, equipped with 32x200GbE or 16x400GbE Ethernet ports;
+MSN3410, equipped with 6x400GbE plus 48x50GbE Ethernet ports;
+MSN3800, equipped with 64x1000GbE Ethernet ports;
+These are the Top of the Rack systems, equipped with Mellanox switch
+board with Mellanox Quantum or Spectrume-2 devices.
+FAN controller is implemented by the programmable device logic.
+
+The default registers offsets set within the programmable device is as
+following:
+- pwm1 0xe3
+- fan1 (tacho1) 0xe4
+- fan2 (tacho2) 0xe5
+- fan3 (tacho3) 0xe6
+- fan4 (tacho4) 0xe7
+- fan5 (tacho5) 0xe8
+- fan6 (tacho6) 0xe9
+- fan7 (tacho7) 0xea
+- fan8 (tacho8) 0xeb
+- fan9 (tacho9) 0xec
+- fan10 (tacho10) 0xed
+- fan11 (tacho11) 0xee
+- fan12 (tacho12) 0xef
+This setup can be re-programmed with other registers.
+
+Author: Vadim Pasternak <vadimp@mellanox.com>
+
+Description
+-----------
+
+The driver implements a simple interface for driving a fan connected to
+a PWM output and tachometer inputs.
+This driver obtains PWM and tachometers registers location according to
+the system configuration and creates FAN/PWM hwmon objects and a cooling
+device. PWM and tachometers are sensed through the on-board programmable
+device, which exports its register map. This device could be attached to
+any bus type, for which register mapping is supported.
+Single instance is created with one PWM control, up to 12 tachometers and
+one cooling device. It could be as many instances as programmable device
+supports.
+The driver exposes the fan to the user space through the hwmon's and
+thermal's sysfs interfaces.
+
+/sys files in hwmon subsystem
+-----------------------------
+
+fan[1-12]_fault - RO files for tachometers TACH1-TACH12 fault indication
+fan[1-12]_input - RO files for tachometers TACH1-TACH12 input (in RPM)
+pwm1 - RW file for fan[1-12] target duty cycle (0..255)
+
+/sys files in thermal subsystem
+-------------------------------
+
+cur_state - RW file for current cooling state of the cooling device
+ (0..max_state)
+max_state - RO file for maximum cooling state of the cooling device
diff --git a/Documentation/hwmon/npcm750-pwm-fan b/Documentation/hwmon/npcm750-pwm-fan
new file mode 100644
index 000000000000..6156ef7398e6
--- /dev/null
+++ b/Documentation/hwmon/npcm750-pwm-fan
@@ -0,0 +1,22 @@
+Kernel driver npcm750-pwm-fan
+=============================
+
+Supported chips:
+ NUVOTON NPCM750/730/715/705
+
+Authors:
+ <tomer.maimon@nuvoton.com>
+
+Description:
+------------
+This driver implements support for NUVOTON NPCM7XX PWM and Fan Tacho
+controller. The PWM controller supports up to 8 PWM outputs. The Fan tacho
+controller supports up to 16 tachometer inputs.
+
+The driver provides the following sensor accesses in sysfs:
+
+fanX_input ro provide current fan rotation value in RPM as reported
+ by the fan to the device.
+
+pwmX rw get or set PWM fan control value. This is an integer
+ value between 0(off) and 255(full speed).
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index fc337c317c67..2b9e1005d88b 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -171,6 +171,13 @@ in[0-*]_label Suggested voltage channel label.
user-space.
RO
+in[0-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
cpu[0-*]_vid CPU core reference voltage.
Unit: millivolt
RO
@@ -236,6 +243,13 @@ fan[1-*]_label Suggested fan channel label.
In all other cases, the label is provided by user-space.
RO
+fan[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with fans.
@@ -409,6 +423,13 @@ temp_reset_history
Reset temp_lowest and temp_highest for all sensors
WO
+temp[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Some chips measure temperature using external thermistors and an ADC, and
report the temperature measurement as a voltage. Converting this voltage
back to a temperature (or the other way around for limits) requires
@@ -468,6 +489,13 @@ curr_reset_history
Reset currX_lowest and currX_highest for all sensors
WO
+curr[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with currents.
*********
@@ -566,6 +594,13 @@ power[1-*]_crit Critical maximum power.
Unit: microWatt
RW
+power[1-*]_enable Enable or disable the sensors.
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with power readings.
**********
@@ -576,6 +611,12 @@ energy[1-*]_input Cumulative energy use
Unit: microJoule
RO
+energy[1-*]_enable Enable or disable the sensors.
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
************
* Humidity *
@@ -586,6 +627,13 @@ humidity[1-*]_input Humidity
RO
+humidity[1-*]_enable Enable or disable the sensors
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
**********
* Alarms *
**********
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index cb3b0de83fc6..10f4499e677c 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -80,6 +80,26 @@ After the instruction is single-stepped, Kprobes executes the
"post_handler," if any, that is associated with the kprobe.
Execution then continues with the instruction following the probepoint.
+Changing Execution Path
+-----------------------
+
+Since kprobes can probe into a running kernel code, it can change the
+register set, including instruction pointer. This operation requires
+maximum care, such as keeping the stack frame, recovering the execution
+path etc. Since it operates on a running kernel and needs deep knowledge
+of computer architecture and concurrent computing, you can easily shoot
+your foot.
+
+If you change the instruction pointer (and set up other related
+registers) in pre_handler, you must return !0 so that kprobes stops
+single stepping and just returns to the given address.
+This also means post_handler should not be called anymore.
+
+Note that this operation may be harder on some architectures which use
+TOC (Table of Contents) for function call, since you have to setup a new
+TOC for your function in your module, and recover the old one after
+returning from it.
+
Return Probes
-------------
@@ -262,7 +282,7 @@ is optimized, that modification is ignored. Thus, if you want to
tweak the kernel's execution path, you need to suppress optimization,
using one of the following techniques:
-- Specify an empty function for the kprobe's post_handler or break_handler.
+- Specify an empty function for the kprobe's post_handler.
or
@@ -474,7 +494,7 @@ error occurs during registration, all probes in the array, up to
the bad probe, are safely unregistered before the register_*probes
function returns.
-- kps/rps/jps: an array of pointers to ``*probe`` data structures
+- kps/rps: an array of pointers to ``*probe`` data structures
- num: the number of the array entries.
.. note::
@@ -566,12 +586,11 @@ the same handler) may run concurrently on different CPUs.
Kprobes does not use mutexes or allocate memory except during
registration and unregistration.
-Probe handlers are run with preemption disabled. Depending on the
-architecture and optimization state, handlers may also run with
-interrupts disabled (e.g., kretprobe handlers and optimized kprobe
-handlers run without interrupt disabled on x86/x86-64). In any case,
-your handler should not yield the CPU (e.g., by attempting to acquire
-a semaphore).
+Probe handlers are run with preemption disabled or interrupt disabled,
+which depends on the architecture and optimization state. (e.g.,
+kretprobe handlers and optimized kprobe handlers run without interrupt
+disabled on x86/x86-64). In any case, your handler should not yield
+the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O).
Since a return probe is implemented by replacing the return
address with the trampoline's address, stack backtraces and calls
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index a02d6bbfc9d0..0d8d7ef131e9 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -2179,32 +2179,41 @@ or:
event_indicated = 1;
wake_up_process(event_daemon);
-A write memory barrier is implied by wake_up() and co. if and only if they
-wake something up. The barrier occurs before the task state is cleared, and so
-sits between the STORE to indicate the event and the STORE to set TASK_RUNNING:
+A general memory barrier is executed by wake_up() if it wakes something up.
+If it doesn't wake anything up then a memory barrier may or may not be
+executed; you must not rely on it. The barrier occurs before the task state
+is accessed, in particular, it sits between the STORE to indicate the event
+and the STORE to set TASK_RUNNING:
- CPU 1 CPU 2
+ CPU 1 (Sleeper) CPU 2 (Waker)
=============================== ===============================
set_current_state(); STORE event_indicated
smp_store_mb(); wake_up();
- STORE current->state <write barrier>
- <general barrier> STORE current->state
- LOAD event_indicated
+ STORE current->state ...
+ <general barrier> <general barrier>
+ LOAD event_indicated if ((LOAD task->state) & TASK_NORMAL)
+ STORE task->state
-To repeat, this write memory barrier is present if and only if something
-is actually awakened. To see this, consider the following sequence of
-events, where X and Y are both initially zero:
+where "task" is the thread being woken up and it equals CPU 1's "current".
+
+To repeat, a general memory barrier is guaranteed to be executed by wake_up()
+if something is actually awakened, but otherwise there is no such guarantee.
+To see this, consider the following sequence of events, where X and Y are both
+initially zero:
CPU 1 CPU 2
=============================== ===============================
- X = 1; STORE event_indicated
+ X = 1; Y = 1;
smp_mb(); wake_up();
- Y = 1; wait_event(wq, Y == 1);
- wake_up(); load from Y sees 1, no memory barrier
- load from X might see 0
+ LOAD Y LOAD X
+
+If a wakeup does occur, one (at least) of the two loads must see 1. If, on
+the other hand, a wakeup does not occur, both loads might see 0.
-In contrast, if a wakeup does occur, CPU 2's load from X would be guaranteed
-to see 1.
+wake_up_process() always executes a general memory barrier. The barrier again
+occurs before the task state is accessed. In particular, if the wake_up() in
+the previous snippet were replaced by a call to wake_up_process() then one of
+the two loads would be guaranteed to see 1.
The available waker functions include:
@@ -2224,6 +2233,8 @@ The available waker functions include:
wake_up_poll();
wake_up_process();
+In terms of memory ordering, these functions all provide the same guarantees of
+a wake_up() (or stronger).
[!] Note that the memory barriers implied by the sleeper and the waker do _not_
order multiple stores before the wake-up with respect to loads of those stored
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index 921739d00f69..7f01fb1c1084 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -1891,22 +1891,22 @@ Mandatory ë°°ë¦¬ì–´ë“¤ì€ SMP 시스템ì—ì„œë„ UP 시스템ì—ì„œë„ SMP 효ê³
/* ì†Œìœ ê¶Œì„ ìˆ˜ì • */
desc->status = DEVICE_OWN;
- /* MMIO 를 통해 디바ì´ìŠ¤ì— 공지를 하기 ì „ì— ë©”ëª¨ë¦¬ë¥¼ ë™ê¸°í™” */
- wmb();
-
/* ì—…ë°ì´íŠ¸ëœ 디스í¬ë¦½í„°ì˜ 디바ì´ìŠ¤ì— 공지 */
writel(DESC_NOTIFY, doorbell);
}
dma_rmb() 는 디스í¬ë¦½í„°ë¡œë¶€í„° ë°ì´í„°ë¥¼ ì½ì–´ì˜¤ê¸° ì „ì— ë””ë°”ì´ìŠ¤ê°€ 소유권ì„
- 내놓았ìŒì„ 보장하게 하고, dma_wmb() 는 디바ì´ìŠ¤ê°€ ìžì‹ ì´ ì†Œìœ ê¶Œì„ ë‹¤ì‹œ
- 가졌ìŒì„ 보기 ì „ì— ë””ìŠ¤í¬ë¦½í„°ì— ë°ì´í„°ê°€ 쓰였ìŒì„ 보장합니다. wmb() 는
- ìºì‹œ ì¼ê´€ì„±ì´ 없는 (cache incoherent) MMIO ì˜ì—­ì— 쓰기를 ì‹œë„하기 ì „ì—
- ìºì‹œ ì¼ê´€ì„±ì´ 있는 메모리 (cache coherent memory) 쓰기가 완료ë˜ì—ˆìŒì„
- 보장해주기 위해 필요합니다.
-
- consistent memory ì— ëŒ€í•œ ìžì„¸í•œ ë‚´ìš©ì„ ìœ„í•´ì„  Documentation/DMA-API.txt
- 문서를 참고하세요.
+ ë‚´ë ¤ë†“ì•˜ì„ ê²ƒì„ ë³´ìž¥í•˜ê³ , dma_wmb() 는 디바ì´ìŠ¤ê°€ ìžì‹ ì´ ì†Œìœ ê¶Œì„ ë‹¤ì‹œ
+ 가졌ìŒì„ 보기 ì „ì— ë””ìŠ¤í¬ë¦½í„°ì— ë°ì´í„°ê°€ ì“°ì˜€ì„ ê²ƒì„ ë³´ìž¥í•©ë‹ˆë‹¤. 참고로,
+ writel() ì„ ì‚¬ìš©í•˜ë©´ ìºì‹œ ì¼ê´€ì„±ì´ 있는 메모리 (cache coherent memory)
+ 쓰기가 MMIO ì˜ì—­ì—ì˜ ì“°ê¸° ì „ì— ì™„ë£Œë˜ì—ˆì„ ê²ƒì„ ë³´ìž¥í•˜ë¯€ë¡œ writel() ì•žì—
+ wmb() 를 실행할 필요가 ì—†ìŒì„ 알아ë‘시기 ë°”ëžë‹ˆë‹¤. writel() 보다 비용ì´
+ 저렴한 writel_relaxed() 는 ì´ëŸ° ë³´ìž¥ì„ ì œê³µí•˜ì§€ 않으므로 여기선 사용ë˜ì§€
+ 않아야 합니다.
+
+ writel_relaxed() 와 ê°™ì€ ì™„í™”ëœ I/O ì ‘ê·¼ìžë“¤ì— 대한 ìžì„¸í•œ ë‚´ìš©ì„ ìœ„í•´ì„œëŠ”
+ "ì»¤ë„ I/O ë°°ë¦¬ì–´ì˜ íš¨ê³¼" 섹션ì„, consistent memory ì— ëŒ€í•œ ìžì„¸í•œ ë‚´ìš©ì„
+ 위해선 Documentation/DMA-API.txt 문서를 참고하세요.
MMIO 쓰기 배리어
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index d10944e619d3..cb8db4f9d097 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4391,6 +4391,22 @@ all such vmexits.
Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
+7.14 KVM_CAP_S390_HPAGE_1M
+
+Architectures: s390
+Parameters: none
+Returns: 0 on success, -EINVAL if hpage module parameter was not set
+ or cmma is enabled
+
+With this capability the KVM support for memory backing with 1m pages
+through hugetlbfs can be enabled for a VM. After the capability is
+enabled, cmma can't be enabled anymore and pfmfi and the storage key
+interpretation are disabled. If cmma has already been enabled or the
+hpage module parameter is not set to 1, -EINVAL is returned.
+
+While it is generally possible to create a huge page backed VM without
+this capability, the VM will not be able to run.
+
8. Other capabilities.
----------------------
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index a16aa2113840..f662d3c530e5 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -29,7 +29,11 @@ mount options are:
L2 and L3 CDP are controlled seperately.
RDT features are orthogonal. A particular system may support only
-monitoring, only control, or both monitoring and control.
+monitoring, only control, or both monitoring and control. Cache
+pseudo-locking is a unique way of using cache control to "pin" or
+"lock" data in the cache. Details can be found in
+"Cache Pseudo-Locking".
+
The mount succeeds if either of allocation or monitoring is present, but
only those files and directories supported by the system will be created.
@@ -65,6 +69,29 @@ related to allocation:
some platforms support devices that have their
own settings for cache use which can over-ride
these bits.
+"bit_usage": Annotated capacity bitmasks showing how all
+ instances of the resource are used. The legend is:
+ "0" - Corresponding region is unused. When the system's
+ resources have been allocated and a "0" is found
+ in "bit_usage" it is a sign that resources are
+ wasted.
+ "H" - Corresponding region is used by hardware only
+ but available for software use. If a resource
+ has bits set in "shareable_bits" but not all
+ of these bits appear in the resource groups'
+ schematas then the bits appearing in
+ "shareable_bits" but no resource group will
+ be marked as "H".
+ "X" - Corresponding region is available for sharing and
+ used by hardware and software. These are the
+ bits that appear in "shareable_bits" as
+ well as a resource group's allocation.
+ "S" - Corresponding region is used by software
+ and available for sharing.
+ "E" - Corresponding region is used exclusively by
+ one resource group. No sharing allowed.
+ "P" - Corresponding region is pseudo-locked. No
+ sharing allowed.
Memory bandwitdh(MB) subdirectory contains the following files
with respect to allocation:
@@ -151,6 +178,9 @@ All groups contain the following files:
CPUs to/from this group. As with the tasks file a hierarchy is
maintained where MON groups may only include CPUs owned by the
parent CTRL_MON group.
+ When the resouce group is in pseudo-locked mode this file will
+ only be readable, reflecting the CPUs associated with the
+ pseudo-locked region.
"cpus_list":
@@ -163,6 +193,21 @@ When control is enabled all CTRL_MON groups will also contain:
A list of all the resources available to this group.
Each resource has its own line and format - see below for details.
+"size":
+ Mirrors the display of the "schemata" file to display the size in
+ bytes of each allocation instead of the bits representing the
+ allocation.
+
+"mode":
+ The "mode" of the resource group dictates the sharing of its
+ allocations. A "shareable" resource group allows sharing of its
+ allocations while an "exclusive" resource group does not. A
+ cache pseudo-locked region is created by first writing
+ "pseudo-locksetup" to the "mode" file before writing the cache
+ pseudo-locked region's schemata to the resource group's "schemata"
+ file. On successful pseudo-locked region creation the mode will
+ automatically change to "pseudo-locked".
+
When monitoring is enabled all MON groups will also contain:
"mon_data":
@@ -379,6 +424,170 @@ L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
L3DATA:0=fffff;1=fffff;2=3c0;3=fffff
L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
+Cache Pseudo-Locking
+--------------------
+CAT enables a user to specify the amount of cache space that an
+application can fill. Cache pseudo-locking builds on the fact that a
+CPU can still read and write data pre-allocated outside its current
+allocated area on a cache hit. With cache pseudo-locking, data can be
+preloaded into a reserved portion of cache that no application can
+fill, and from that point on will only serve cache hits. The cache
+pseudo-locked memory is made accessible to user space where an
+application can map it into its virtual address space and thus have
+a region of memory with reduced average read latency.
+
+The creation of a cache pseudo-locked region is triggered by a request
+from the user to do so that is accompanied by a schemata of the region
+to be pseudo-locked. The cache pseudo-locked region is created as follows:
+- Create a CAT allocation CLOSNEW with a CBM matching the schemata
+ from the user of the cache region that will contain the pseudo-locked
+ memory. This region must not overlap with any current CAT allocation/CLOS
+ on the system and no future overlap with this cache region is allowed
+ while the pseudo-locked region exists.
+- Create a contiguous region of memory of the same size as the cache
+ region.
+- Flush the cache, disable hardware prefetchers, disable preemption.
+- Make CLOSNEW the active CLOS and touch the allocated memory to load
+ it into the cache.
+- Set the previous CLOS as active.
+- At this point the closid CLOSNEW can be released - the cache
+ pseudo-locked region is protected as long as its CBM does not appear in
+ any CAT allocation. Even though the cache pseudo-locked region will from
+ this point on not appear in any CBM of any CLOS an application running with
+ any CLOS will be able to access the memory in the pseudo-locked region since
+ the region continues to serve cache hits.
+- The contiguous region of memory loaded into the cache is exposed to
+ user-space as a character device.
+
+Cache pseudo-locking increases the probability that data will remain
+in the cache via carefully configuring the CAT feature and controlling
+application behavior. There is no guarantee that data is placed in
+cache. Instructions like INVD, WBINVD, CLFLUSH, etc. can still evict
+“locked†data from cache. Power management C-states may shrink or
+power off cache. Deeper C-states will automatically be restricted on
+pseudo-locked region creation.
+
+It is required that an application using a pseudo-locked region runs
+with affinity to the cores (or a subset of the cores) associated
+with the cache on which the pseudo-locked region resides. A sanity check
+within the code will not allow an application to map pseudo-locked memory
+unless it runs with affinity to cores associated with the cache on which the
+pseudo-locked region resides. The sanity check is only done during the
+initial mmap() handling, there is no enforcement afterwards and the
+application self needs to ensure it remains affine to the correct cores.
+
+Pseudo-locking is accomplished in two stages:
+1) During the first stage the system administrator allocates a portion
+ of cache that should be dedicated to pseudo-locking. At this time an
+ equivalent portion of memory is allocated, loaded into allocated
+ cache portion, and exposed as a character device.
+2) During the second stage a user-space application maps (mmap()) the
+ pseudo-locked memory into its address space.
+
+Cache Pseudo-Locking Interface
+------------------------------
+A pseudo-locked region is created using the resctrl interface as follows:
+
+1) Create a new resource group by creating a new directory in /sys/fs/resctrl.
+2) Change the new resource group's mode to "pseudo-locksetup" by writing
+ "pseudo-locksetup" to the "mode" file.
+3) Write the schemata of the pseudo-locked region to the "schemata" file. All
+ bits within the schemata should be "unused" according to the "bit_usage"
+ file.
+
+On successful pseudo-locked region creation the "mode" file will contain
+"pseudo-locked" and a new character device with the same name as the resource
+group will exist in /dev/pseudo_lock. This character device can be mmap()'ed
+by user space in order to obtain access to the pseudo-locked memory region.
+
+An example of cache pseudo-locked region creation and usage can be found below.
+
+Cache Pseudo-Locking Debugging Interface
+---------------------------------------
+The pseudo-locking debugging interface is enabled by default (if
+CONFIG_DEBUG_FS is enabled) and can be found in /sys/kernel/debug/resctrl.
+
+There is no explicit way for the kernel to test if a provided memory
+location is present in the cache. The pseudo-locking debugging interface uses
+the tracing infrastructure to provide two ways to measure cache residency of
+the pseudo-locked region:
+1) Memory access latency using the pseudo_lock_mem_latency tracepoint. Data
+ from these measurements are best visualized using a hist trigger (see
+ example below). In this test the pseudo-locked region is traversed at
+ a stride of 32 bytes while hardware prefetchers and preemption
+ are disabled. This also provides a substitute visualization of cache
+ hits and misses.
+2) Cache hit and miss measurements using model specific precision counters if
+ available. Depending on the levels of cache on the system the pseudo_lock_l2
+ and pseudo_lock_l3 tracepoints are available.
+ WARNING: triggering this measurement uses from two (for just L2
+ measurements) to four (for L2 and L3 measurements) precision counters on
+ the system, if any other measurements are in progress the counters and
+ their corresponding event registers will be clobbered.
+
+When a pseudo-locked region is created a new debugfs directory is created for
+it in debugfs as /sys/kernel/debug/resctrl/<newdir>. A single
+write-only file, pseudo_lock_measure, is present in this directory. The
+measurement on the pseudo-locked region depends on the number, 1 or 2,
+written to this debugfs file. Since the measurements are recorded with the
+tracing infrastructure the relevant tracepoints need to be enabled before the
+measurement is triggered.
+
+Example of latency debugging interface:
+In this example a pseudo-locked region named "newlock" was created. Here is
+how we can measure the latency in cycles of reading from this region and
+visualize this data with a histogram that is available if CONFIG_HIST_TRIGGERS
+is set:
+# :> /sys/kernel/debug/tracing/trace
+# echo 'hist:keys=latency' > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/trigger
+# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+# echo 1 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+# cat /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/hist
+
+# event histogram
+#
+# trigger info: hist:keys=latency:vals=hitcount:sort=hitcount:size=2048 [active]
+#
+
+{ latency: 456 } hitcount: 1
+{ latency: 50 } hitcount: 83
+{ latency: 36 } hitcount: 96
+{ latency: 44 } hitcount: 174
+{ latency: 48 } hitcount: 195
+{ latency: 46 } hitcount: 262
+{ latency: 42 } hitcount: 693
+{ latency: 40 } hitcount: 3204
+{ latency: 38 } hitcount: 3484
+
+Totals:
+ Hits: 8192
+ Entries: 9
+ Dropped: 0
+
+Example of cache hits/misses debugging:
+In this example a pseudo-locked region named "newlock" was created on the L2
+cache of a platform. Here is how we can obtain details of the cache hits
+and misses using the platform's precision counters.
+
+# :> /sys/kernel/debug/tracing/trace
+# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+# echo 2 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+# cat /sys/kernel/debug/tracing/trace
+
+# tracer: nop
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ pseudo_lock_mea-1672 [002] .... 3132.860500: pseudo_lock_l2: hits=4097 miss=0
+
+
Examples for RDT allocation usage:
Example 1
@@ -502,7 +711,172 @@ siblings and only the real time threads are scheduled on the cores 4-7.
# echo F0 > p0/cpus
-4) Locking between applications
+Example 4
+---------
+
+The resource groups in previous examples were all in the default "shareable"
+mode allowing sharing of their cache allocations. If one resource group
+configures a cache allocation then nothing prevents another resource group
+to overlap with that allocation.
+
+In this example a new exclusive resource group will be created on a L2 CAT
+system with two L2 cache instances that can be configured with an 8-bit
+capacity bitmask. The new exclusive resource group will be configured to use
+25% of each cache instance.
+
+# mount -t resctrl resctrl /sys/fs/resctrl/
+# cd /sys/fs/resctrl
+
+First, we observe that the default group is configured to allocate to all L2
+cache:
+
+# cat schemata
+L2:0=ff;1=ff
+
+We could attempt to create the new resource group at this point, but it will
+fail because of the overlap with the schemata of the default group:
+# mkdir p0
+# echo 'L2:0=0x3;1=0x3' > p0/schemata
+# cat p0/mode
+shareable
+# echo exclusive > p0/mode
+-sh: echo: write error: Invalid argument
+# cat info/last_cmd_status
+schemata overlaps
+
+To ensure that there is no overlap with another resource group the default
+resource group's schemata has to change, making it possible for the new
+resource group to become exclusive.
+# echo 'L2:0=0xfc;1=0xfc' > schemata
+# echo exclusive > p0/mode
+# grep . p0/*
+p0/cpus:0
+p0/mode:exclusive
+p0/schemata:L2:0=03;1=03
+p0/size:L2:0=262144;1=262144
+
+A new resource group will on creation not overlap with an exclusive resource
+group:
+# mkdir p1
+# grep . p1/*
+p1/cpus:0
+p1/mode:shareable
+p1/schemata:L2:0=fc;1=fc
+p1/size:L2:0=786432;1=786432
+
+The bit_usage will reflect how the cache is used:
+# cat info/L2/bit_usage
+0=SSSSSSEE;1=SSSSSSEE
+
+A resource group cannot be forced to overlap with an exclusive resource group:
+# echo 'L2:0=0x1;1=0x1' > p1/schemata
+-sh: echo: write error: Invalid argument
+# cat info/last_cmd_status
+overlaps with exclusive group
+
+Example of Cache Pseudo-Locking
+-------------------------------
+Lock portion of L2 cache from cache id 1 using CBM 0x3. Pseudo-locked
+region is exposed at /dev/pseudo_lock/newlock that can be provided to
+application for argument to mmap().
+
+# mount -t resctrl resctrl /sys/fs/resctrl/
+# cd /sys/fs/resctrl
+
+Ensure that there are bits available that can be pseudo-locked, since only
+unused bits can be pseudo-locked the bits to be pseudo-locked needs to be
+removed from the default resource group's schemata:
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSSSS
+# echo 'L2:1=0xfc' > schemata
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSS00
+
+Create a new resource group that will be associated with the pseudo-locked
+region, indicate that it will be used for a pseudo-locked region, and
+configure the requested pseudo-locked region capacity bitmask:
+
+# mkdir newlock
+# echo pseudo-locksetup > newlock/mode
+# echo 'L2:1=0x3' > newlock/schemata
+
+On success the resource group's mode will change to pseudo-locked, the
+bit_usage will reflect the pseudo-locked region, and the character device
+exposing the pseudo-locked region will exist:
+
+# cat newlock/mode
+pseudo-locked
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSSPP
+# ls -l /dev/pseudo_lock/newlock
+crw------- 1 root root 243, 0 Apr 3 05:01 /dev/pseudo_lock/newlock
+
+/*
+ * Example code to access one page of pseudo-locked cache region
+ * from user space.
+ */
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+/*
+ * It is required that the application runs with affinity to only
+ * cores associated with the pseudo-locked region. Here the cpu
+ * is hardcoded for convenience of example.
+ */
+static int cpuid = 2;
+
+int main(int argc, char *argv[])
+{
+ cpu_set_t cpuset;
+ long page_size;
+ void *mapping;
+ int dev_fd;
+ int ret;
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpuid, &cpuset);
+ ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ if (ret < 0) {
+ perror("sched_setaffinity");
+ exit(EXIT_FAILURE);
+ }
+
+ dev_fd = open("/dev/pseudo_lock/newlock", O_RDWR);
+ if (dev_fd < 0) {
+ perror("open");
+ exit(EXIT_FAILURE);
+ }
+
+ mapping = mmap(0, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ dev_fd, 0);
+ if (mapping == MAP_FAILED) {
+ perror("mmap");
+ close(dev_fd);
+ exit(EXIT_FAILURE);
+ }
+
+ /* Application interacts with pseudo-locked memory @mapping */
+
+ ret = munmap(mapping, page_size);
+ if (ret < 0) {
+ perror("munmap");
+ close(dev_fd);
+ exit(EXIT_FAILURE);
+ }
+
+ close(dev_fd);
+ exit(EXIT_SUCCESS);
+}
+
+Locking between applications
+----------------------------
Certain operations on the resctrl filesystem, composed of read/writes
to/from multiple files, must be atomic.
@@ -510,7 +884,7 @@ to/from multiple files, must be atomic.
As an example, the allocation of an exclusive reservation of L3 cache
involves:
- 1. Read the cbmmasks from each directory
+ 1. Read the cbmmasks from each directory or the per-resource "bit_usage"
2. Find a contiguous set of bits in the global CBM bitmask that is clear
in any of the directory cbmmasks
3. Create a new directory
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index 8d109ef67ab6..ad6d2a80cf05 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -92,9 +92,7 @@ APICs
Timing
notsc
- Don't use the CPU time stamp counter to read the wall time.
- This can be used to work around timing problems on multiprocessor systems
- with not properly synchronized CPUs.
+ Deprecated, use tsc=unstable instead.
nohpet
Don't use the HPET timer.
@@ -156,6 +154,10 @@ NUMA
If given as an integer, fills all system RAM with N fake nodes
interleaved over physical nodes.
+ numa=fake=<N>U
+ If given as an integer followed by 'U', it will divide each
+ physical node into N emulated nodes.
+
ACPI
acpi=off Don't enable ACPI
diff --git a/MAINTAINERS b/MAINTAINERS
index 7cebd5bba8a8..0a2342770dee 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5930,7 +5930,7 @@ F: Documentation/dev-tools/gcov.rst
GDB KERNEL DEBUGGING HELPER SCRIPTS
M: Jan Kiszka <jan.kiszka@siemens.com>
-M: Kieran Bingham <kieran@bingham.xyz>
+M: Kieran Bingham <kbingham@kernel.org>
S: Supported
F: scripts/gdb/
@@ -8317,10 +8317,16 @@ M: Jade Alglave <j.alglave@ucl.ac.uk>
M: Luc Maranget <luc.maranget@inria.fr>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
R: Akira Yokosawa <akiyks@gmail.com>
+R: Daniel Lustig <dlustig@nvidia.com>
L: linux-kernel@vger.kernel.org
+L: linux-arch@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: tools/memory-model/
+F: Documentation/atomic_bitops.txt
+F: Documentation/atomic_t.txt
+F: Documentation/core-api/atomic_ops.rst
+F: Documentation/core-api/refcount-vs-atomic.rst
F: Documentation/memory-barriers.txt
LINUX SECURITY MODULE (LSM) FRAMEWORK
@@ -12039,9 +12045,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/
X: Documentation/RCU/torture.txt
F: include/linux/rcu*
-X: include/linux/srcu.h
+X: include/linux/srcu*.h
F: kernel/rcu/
-X: kernel/torture.c
+X: kernel/rcu/srcu*.c
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
@@ -12402,7 +12408,6 @@ F: drivers/pci/hotplug/s390_pci_hpc.c
S390 VFIO-CCW DRIVER
M: Cornelia Huck <cohuck@redhat.com>
-M: Dong Jia Shi <bjsdjshi@linux.ibm.com>
M: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: kvm@vger.kernel.org
@@ -13078,8 +13083,8 @@ L: linux-kernel@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F: include/linux/srcu.h
-F: kernel/rcu/srcu.c
+F: include/linux/srcu*.h
+F: kernel/rcu/srcu*.c
SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@@ -14438,6 +14443,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/torture.c
F: kernel/rcu/rcutorture.c
+F: kernel/rcu/rcuperf.c
F: kernel/locking/locktorture.c
TOSHIBA ACPI EXTRAS DRIVER
diff --git a/Makefile b/Makefile
index 7a3c4548162b..863f58503bee 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
NAME = Merciless Moray
# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 767bfdd42992..150a1c5d6a2c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -18,11 +18,11 @@
* To ensure dependency ordering is preserved for the _relaxed and
* _release atomics, an smp_read_barrier_depends() is unconditionally
* inserted into the _relaxed variants, which are used to build the
- * barriered versions. To avoid redundant back-to-back fences, we can
- * define the _acquire and _fence versions explicitly.
+ * barriered versions. Avoid redundant back-to-back fences in the
+ * _acquire and _fence versions.
*/
-#define __atomic_op_acquire(op, args...) op##_relaxed(args)
-#define __atomic_op_fence __atomic_op_release
+#define __atomic_acquire_fence()
+#define __atomic_post_full_fence()
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -206,7 +206,7 @@ ATOMIC_OPS(xor, xor)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -214,7 +214,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
@@ -235,38 +235,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
-
+#define atomic_fetch_add_unless atomic_fetch_add_unless
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns true iff @v was not @u.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
- long c, tmp;
+ long c, new, old;
smp_mb();
__asm__ __volatile__(
- "1: ldq_l %[tmp],%[mem]\n"
- " cmpeq %[tmp],%[u],%[c]\n"
- " addq %[tmp],%[a],%[tmp]\n"
+ "1: ldq_l %[old],%[mem]\n"
+ " cmpeq %[old],%[u],%[c]\n"
+ " addq %[old],%[a],%[new]\n"
" bne %[c],2f\n"
- " stq_c %[tmp],%[mem]\n"
- " beq %[tmp],3f\n"
+ " stq_c %[new],%[mem]\n"
+ " beq %[new],3f\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
- : [tmp] "=&r"(tmp), [c] "=&r"(c)
+ : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
: "memory");
smp_mb();
- return !c;
+ return old;
}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
@@ -295,31 +296,6 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
-
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-#define atomic64_inc_return(v) atomic64_add_return(1,(v))
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1,(v))
-#define atomic64_inc(v) atomic64_add(1,(v))
-
-#define atomic_dec(v) atomic_sub(1,(v))
-#define atomic64_dec(v) atomic64_sub(1,(v))
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 11859287c52a..4e0072730241 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -187,7 +187,8 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define atomic_andnot atomic_andnot
+#define atomic_fetch_andnot atomic_fetch_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -296,8 +297,6 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
-#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
-#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
@@ -308,48 +307,6 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v
- */
-#define __atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- \
- /* \
- * Explicit full memory barrier needed before/after as \
- * LLOCK/SCOND thmeselves don't provide any such semantics \
- */ \
- smp_mb(); \
- \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
- c = old; \
- \
- smp_mb(); \
- \
- c; \
-})
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-
-
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
@@ -472,7 +429,8 @@ static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define atomic64_andnot atomic64_andnot
+#define atomic64_fetch_andnot atomic64_fetch_andnot
ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc)
@@ -559,53 +517,43 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return val;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * if (v != u) { v += a; ret = 1} else {ret = 0}
- * Returns 1 iff @v was not @u (i.e. if add actually happened)
+ * Atomically adds @a to @v, if it was not @u.
+ * Returns the old value of @v
*/
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- long long val;
- int op_done;
+ long long old, temp;
smp_mb();
__asm__ __volatile__(
"1: llockd %0, [%2] \n"
- " mov %1, 1 \n"
" brne %L0, %L4, 2f # continue to add since v != u \n"
" breq.d %H0, %H4, 3f # return since v == u \n"
- " mov %1, 0 \n"
"2: \n"
- " add.f %L0, %L0, %L3 \n"
- " adc %H0, %H0, %H3 \n"
- " scondd %0, [%2] \n"
+ " add.f %L1, %L0, %L3 \n"
+ " adc %H1, %H0, %H3 \n"
+ " scondd %1, [%2] \n"
" bnz 1b \n"
"3: \n"
- : "=&r"(val), "=&r" (op_done)
+ : "=&r"(old), "=&r" (temp)
: "r"(&v->counter), "r"(a), "r"(u)
: "cc"); /* memory clobber comes from smp_mb() */
smp_mb();
- return op_done;
+ return old;
}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 2e52d18e6bc7..2c1b479d5aea 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -45,8 +45,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 42b05046fad9..df35d4c0b0b8 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -225,24 +225,18 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
/* If we have no pre-handler or it returned 0, we continue with
* normal processing. If we have a pre-handler and it returned
- * non-zero - which is expected from setjmp_pre_handler for
- * jprobe, we return without single stepping and leave that to
- * the break-handler which is invoked by a kprobe from
- * jprobe_return
+ * non-zero - which means user handler setup registers to exit
+ * to another instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
+ } else {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
}
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- setup_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
- return 1;
- }
}
/* no_kprobe: */
@@ -386,38 +380,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long sp_addr = regs->sp;
-
- kcb->jprobe_saved_regs = *regs;
- memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
- regs->ret = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- __asm__ __volatile__("unimp_s");
- return;
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long sp_addr;
-
- *regs = kcb->jprobe_saved_regs;
- sp_addr = regs->sp;
- memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
- preempt_enable_no_resched();
-
- return 1;
-}
-
static void __used kretprobe_trampoline_holder(void)
{
__asm__ __volatile__(".global kretprobe_trampoline\n"
@@ -483,9 +445,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->ret = orig_ret_address;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 843edfd000be..0f328d639d51 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -9,6 +9,7 @@ config ARM
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SET_MEMORY
@@ -337,8 +338,8 @@ config ARCH_MULTIPLATFORM
select TIMER_OF
select COMMON_CLK
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select MIGHT_HAVE_PCI
- select MULTI_IRQ_HANDLER
select PCI_DOMAINS if PCI
select SPARSE_IRQ
select USE_OF
@@ -465,9 +466,9 @@ config ARCH_DOVE
bool "Marvell Dove"
select CPU_PJ4
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select MIGHT_HAVE_PCI
- select MULTI_IRQ_HANDLER
select MVEBU_MBUS
select PINCTRL
select PINCTRL_DOVE
@@ -512,8 +513,8 @@ config ARCH_LPC32XX
select COMMON_CLK
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
- select MULTI_IRQ_HANDLER
select SPARSE_IRQ
select USE_OF
help
@@ -532,11 +533,11 @@ config ARCH_PXA
select TIMER_OF
select CPU_XSCALE if !CPU_XSC3
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIO_PXA
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
select PLAT_PXA
select SPARSE_IRQ
help
@@ -572,11 +573,11 @@ config ARCH_SA1100
select CPU_FREQ
select CPU_SA1100
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
select ISA
- select MULTI_IRQ_HANDLER
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
help
@@ -590,10 +591,10 @@ config ARCH_S3C24XX
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
select GPIOLIB
+ select GENERIC_IRQ_MULTI_HANDLER
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
- select MULTI_IRQ_HANDLER
select NEED_MACH_IO_H
select SAMSUNG_ATAGS
select USE_OF
@@ -627,10 +628,10 @@ config ARCH_OMAP1
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
select NEED_MACH_IO_H if PCCARD
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
@@ -921,11 +922,6 @@ config IWMMXT
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
-config MULTI_IRQ_HANDLER
- bool
- help
- Allow each machine to specify it's own IRQ handler at run time.
-
if !MMU
source "arch/arm/Kconfig-nommu"
endif
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index fc26c3d7b9b6..62ebeae9f837 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -46,12 +46,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__ARMEB__
AS += -EB
-LD += -EB
+LDFLAGS += -EB
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__ARMEL__
AS += -EL
-LD += -EL
+LDFLAGS += -EL
endif
#
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 0cd4dccbae78..b17ee03d280b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit
bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
#endif
.endm
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 66d0e215a773..f74756641410 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -130,7 +130,7 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -156,6 +156,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -215,15 +216,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
-
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
- c = old;
- return c;
-}
+#define atomic_fetch_andnot atomic_fetch_andnot
#endif /* __LINUX_ARM_ARCH__ */
@@ -254,17 +247,6 @@ ATOMIC_OPS(xor, ^=, eor)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
-#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
long long counter;
@@ -494,12 +476,13 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return result;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- long long val;
+ long long oldval, newval;
unsigned long tmp;
- int ret = 1;
smp_mb();
prefetchw(&v->counter);
@@ -508,33 +491,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
"1: ldrexd %0, %H0, [%4]\n"
" teq %0, %5\n"
" teqeq %H0, %H5\n"
-" moveq %1, #0\n"
" beq 2f\n"
-" adds %Q0, %Q0, %Q6\n"
-" adc %R0, %R0, %R6\n"
-" strexd %2, %0, %H0, [%4]\n"
+" adds %Q1, %Q0, %Q6\n"
+" adc %R1, %R0, %R6\n"
+" strexd %2, %1, %H1, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
- : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
- if (ret)
+ if (oldval != u)
smp_mb();
- return ret;
+ return oldval;
}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 4cab9bb823fb..c92e42a5c8f7 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5
-#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
@@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#else
-static inline int constant_fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
-/*
- * On ARMv5 and above those functions can be implemented around the
- * clz instruction for much better code efficiency. __clz returns
- * the number of leading zeros, zero input will return 32, and
- * 0x80000000 will return 0.
- */
-static inline unsigned int __clz(unsigned int x)
-{
- unsigned int ret;
-
- asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
-
- return ret;
-}
-
-/*
- * fls() returns zero if the input is zero, otherwise returns the bit
- * position of the last set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int fls(int x)
-{
- if (__builtin_constant_p(x))
- return constant_fls(x);
-
- return 32 - __clz(x);
-}
-
-/*
- * __fls() returns the bit position of the last bit set, where the
- * LSB is 0 and MSB is 31. Zero input is undefined.
- */
-static inline unsigned long __fls(unsigned long x)
-{
- return fls(x) - 1;
-}
-
-/*
- * ffs() returns zero if the input was zero, otherwise returns the bit
- * position of the first set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int ffs(int x)
-{
- return fls(x & -x);
-}
-
/*
- * __ffs() returns the bit position of the first bit set, where the
- * LSB is 0 and MSB is 31. Zero input is undefined.
+ * On ARMv5 and above, the gcc built-ins may rely on the clz instruction
+ * and produce optimal inlined code in all cases. On ARMv7 it is even
+ * better by also using the rbit instruction.
*/
-static inline unsigned long __ffs(unsigned long x)
-{
- return ffs(x) - 1;
-}
-
-#define ffz(x) __ffs( ~(x) )
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-ffs.h>
#endif
+#include <asm-generic/bitops/ffz.h>
+
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 17f1f1a814ff..38badaae8d9d 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -58,6 +58,9 @@ void efi_virtmap_unload(void);
#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
#define efi_is_64bit() (false)
+#define efi_table_attr(table, attr, instance) \
+ ((table##_t *)instance)->attr
+
#define efi_call_proto(protocol, f, instance, ...) \
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index e46e4e7bdba3..ac54c06764e6 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -111,14 +111,17 @@ static inline void decode_ctrl_reg(u32 reg,
asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
} while (0)
+struct perf_event_attr;
struct notifier_block;
struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index b6f319606e30..c883fcbe93b6 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -31,11 +31,6 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-#endif
-
#ifdef CONFIG_SMP
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 59655459da59..82290f212d8e 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -44,8 +44,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 5c1ad11aa392..bb8851208e17 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -59,7 +59,7 @@ struct machine_desc {
void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
void (*restart)(enum reboot_mode, const char *);
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 0f79e4dec7f9..4ac3a019a46f 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -13,7 +13,6 @@
extern void timer_tick(void);
typedef void (*clock_access_fn)(struct timespec64 *);
-extern int register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent);
+extern int register_persistent_clock(clock_access_fn read_persistent);
#endif
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
index 1e5b9bb92270..991c9127c650 100644
--- a/arch/arm/include/asm/probes.h
+++ b/arch/arm/include/asm/probes.h
@@ -51,7 +51,6 @@ struct arch_probes_insn {
* We assume one instruction can consume at most 64 bytes stack, which is
* 'push {r0-r15}'. Instructions consume more or unknown stack space like
* 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
- * Both kprobe and jprobe use this macro.
*/
#define MAX_STACK_SIZE 64
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index e71cc35de163..9b37b6ab27fe 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -123,8 +123,8 @@ struct user_vfp_exc;
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
+extern int vfp_restore_user_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
#endif
/*
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index d5562f9ce600..f854148c8d7c 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -292,5 +292,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
{
}
+static inline void tlb_flush_remove_tables(struct mm_struct *mm)
+{
+}
+
+static inline void tlb_flush_remove_tables_local(void *arg)
+{
+}
+
#endif /* CONFIG_MMU */
#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 3d614e90c19f..5451e1f05a19 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -85,6 +85,13 @@ static inline void set_fs(mm_segment_t fs)
flag; })
/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
* which read from user space (*get_*) need to take care not to leak
@@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
register typeof(*(p)) __user *__p asm("r0") = (p); \
- register typeof(x) __r2 asm("r2"); \
+ register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
@@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
#define user_addr_max() \
(uaccess_kernel() ? ~0UL : get_fs())
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1, it is not worth fixing the non-
+ * verifying accessors, because we need to add verification of the
+ * address space there. Force these to use the standard get_user()
+ * version instead.
+ */
+#define __get_user(x, ptr) get_user(x, ptr)
+#else
+
/*
* The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
@@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
__gu_err; \
})
-#define __get_user_error(x, ptr, err) \
-({ \
- __get_user_err((x), (ptr), err); \
- (void) 0; \
-})
-
#define __get_user_err(x, ptr, err) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
@@ -324,6 +335,7 @@ do { \
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
+#endif
#define __put_user_switch(x, ptr, __err, __fn) \
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 179a9f6bd1e3..e85a3af9ddeb 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -22,7 +22,7 @@
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
+#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
#include <mach/entry-macro.S>
#endif
#include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
* Interrupt handling.
*/
.macro irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
badr lr, 9997f
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
.globl cr_alignment
cr_alignment:
.space 4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- .globl handle_arch_irq
-handle_arch_irq:
- .space 4
-#endif
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 7a9b86978ee1..ec29de250076 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -53,7 +53,11 @@ ENTRY(stext)
THUMB(1: )
#endif
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install
+#endif
+ @ ensure svc mode and all interrupts masked
+ safe_svcmode_maskall r9
@ and irqs disabled
#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
@@ -89,7 +93,11 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install_secondary
+#endif
+ safe_svcmode_maskall r9
+
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 629e25152c0d..1d5fbf1d1c67 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -456,14 +456,13 @@ static int get_hbp_len(u8 hbp_len)
/*
* Check whether bp virtual address is in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->ctrl.len);
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -518,42 +517,42 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->ctrl.type = ARM_BREAKPOINT_LOAD;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->ctrl.type = ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
- info->ctrl.len = ARM_BREAKPOINT_LEN_8;
- if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
default:
@@ -566,24 +565,24 @@ static int arch_build_bp_info(struct perf_event *bp)
* by the hardware and must be aligned to the appropriate number of
* bytes.
*/
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL;
/* Address */
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/* Privilege */
- info->ctrl.privilege = ARM_BREAKPOINT_USER;
- if (arch_check_bp_in_kernelspace(bp))
- info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
+ hw->ctrl.privilege = ARM_BREAKPOINT_USER;
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
/* Enabled? */
- info->ctrl.enabled = !bp->attr.disabled;
+ hw->ctrl.enabled = !attr->disabled;
/* Mismatch */
- info->ctrl.mismatch = 0;
+ hw->ctrl.mismatch = 0;
return 0;
}
@@ -591,9 +590,10 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int ret = 0;
u32 offset, alignment_mask = 0x3;
@@ -602,14 +602,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -ENODEV;
/* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
goto out;
/* Check address alignment. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
switch (offset) {
case 0:
/* Aligned */
@@ -617,19 +617,19 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
case 1:
case 2:
/* Allow halfword watchpoints and breakpoints. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
case 3:
/* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
default:
ret = -EINVAL;
goto out;
}
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
if (is_default_overflow_handler(bp)) {
/*
@@ -640,7 +640,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -EINVAL;
/* We don't allow mismatch breakpoints in kernel space. */
- if (arch_check_bp_in_kernelspace(bp))
+ if (arch_check_bp_in_kernelspace(hw))
return -EPERM;
/*
@@ -655,8 +655,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* reports them.
*/
if (!debug_exception_updates_fsr() &&
- (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
- info->ctrl.type == ARM_BREAKPOINT_STORE))
+ (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ hw->ctrl.type == ARM_BREAKPOINT_STORE))
return -EINVAL;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ece04a457486..9908dacf9229 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -102,16 +102,6 @@ void __init init_IRQ(void)
uniphier_cache_init();
}
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- if (handle_arch_irq)
- return;
-
- handle_arch_irq = handle_irq;
-}
-#endif
-
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 35ca494c028c..4c249cb261f3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1145,7 +1145,7 @@ void __init setup_arch(char **cmdline_p)
reserve_crashkernel();
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index dec130e7078c..b8f766cf3a90 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -150,22 +150,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
static int restore_vfp_context(char __user **auxp)
{
- struct vfp_sigframe __user *frame =
- (struct vfp_sigframe __user *)*auxp;
- unsigned long magic;
- unsigned long size;
- int err = 0;
-
- __get_user_error(magic, &frame->magic, err);
- __get_user_error(size, &frame->size, err);
+ struct vfp_sigframe frame;
+ int err;
+ err = __copy_from_user(&frame, *auxp, sizeof(frame));
if (err)
- return -EFAULT;
- if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ return err;
+
+ if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
return -EINVAL;
- *auxp += size;
- return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
+ *auxp += sizeof(frame);
+ return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
}
#endif
@@ -176,6 +172,7 @@ static int restore_vfp_context(char __user **auxp)
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
+ struct sigcontext context;
char __user *aux;
sigset_t set;
int err;
@@ -184,23 +181,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
if (err == 0)
set_current_blocked(&set);
- __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
- __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
- __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
- __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
- __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
- __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
- __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
- __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
- __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
- __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
- __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
- __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
- __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
- __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
- __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
- __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
- __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+ err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
+ if (err == 0) {
+ regs->ARM_r0 = context.arm_r0;
+ regs->ARM_r1 = context.arm_r1;
+ regs->ARM_r2 = context.arm_r2;
+ regs->ARM_r3 = context.arm_r3;
+ regs->ARM_r4 = context.arm_r4;
+ regs->ARM_r5 = context.arm_r5;
+ regs->ARM_r6 = context.arm_r6;
+ regs->ARM_r7 = context.arm_r7;
+ regs->ARM_r8 = context.arm_r8;
+ regs->ARM_r9 = context.arm_r9;
+ regs->ARM_r10 = context.arm_r10;
+ regs->ARM_fp = context.arm_fp;
+ regs->ARM_ip = context.arm_ip;
+ regs->ARM_sp = context.arm_sp;
+ regs->ARM_lr = context.arm_lr;
+ regs->ARM_pc = context.arm_pc;
+ regs->ARM_cpsr = context.arm_cpsr;
+ }
err |= !valid_user_regs(regs);
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 1df21a61e379..f0dd4b6ebb63 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
- __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
- __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
- __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
+ struct oabi_sembuf osb;
+ err |= __copy_from_user(&osb, tsops, sizeof(osb));
+ sops[i].sem_num = osb.sem_num;
+ sops[i].sem_op = osb.sem_op;
+ sops[i].sem_flg = osb.sem_flg;
tsops++;
}
if (timeout) {
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index cf2701cb0de8..078b259ead4e 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -83,29 +83,18 @@ static void dummy_clock_access(struct timespec64 *ts)
}
static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;
void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
}
-void read_boot_clock64(struct timespec64 *ts)
-{
- __read_boot_clock(ts);
-}
-
-int __init register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent)
+int __init register_persistent_clock(clock_access_fn read_persistent)
{
/* Only allow the clockaccess functions to be registered once */
- if (__read_persistent_clock == dummy_clock_access &&
- __read_boot_clock == dummy_clock_access) {
- if (read_boot)
- __read_boot_clock = read_boot;
+ if (__read_persistent_clock == dummy_clock_access) {
if (read_persistent)
__read_persistent_clock = read_persistent;
-
return 0;
}
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a4b06049001..a826df3d3814 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -90,6 +90,15 @@
.text
ENTRY(arm_copy_from_user)
+#ifdef CONFIG_CPU_SPECTRE
+ get_thread_info r3
+ ldr r3, [r3, #TI_ADDR_LIMIT]
+ adds ip, r1, r2 @ ip=addr+size
+ sub r3, r3, #1 @ addr_limit - 1
+ cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
+ movcs r1, #0 @ addr = NULL
+ csdb
+#endif
#include "copy_template.S"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 96a7b6cf459b..b169e580bf82 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -702,7 +702,6 @@ config ARM_THUMBEE
config ARM_VIRT_EXT
bool
- depends on MMU
default y if CPU_V7
help
Enable the kernel to make use of the ARM Virtualization
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 5dd6c58d653b..7d67c70bbded 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -53,7 +53,8 @@ static inline bool security_extensions_enabled(void)
{
/* Check CPUID Identification Scheme before ID_PFR1 read */
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
- return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+ return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
+ cpuid_feature_extract(CPUID_EXT_PFR1, 20);
return 0;
}
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h
index 8015ad434a40..24101925fe64 100644
--- a/arch/arm/mm/tcm.h
+++ b/arch/arm/mm/tcm.h
@@ -11,7 +11,7 @@
void __init tcm_init(void);
#else
/* No TCM support, just blank inlines to be optimized out */
-inline void tcm_init(void)
+static inline void tcm_init(void)
{
}
#endif
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 2438b96004c1..fcc5bfec8bd1 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -110,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
- register_persistent_clock(NULL, omap_read_persistent_clock64);
+ register_persistent_clock(omap_read_persistent_clock64);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0;
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index e90cc8a08186..f8bd523d64d1 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -47,9 +47,6 @@
(unsigned long)(addr) + \
(size))
-/* Used as a marker in ARM_pc to note when we're in a jprobe. */
-#define JPROBE_MAGIC_ADDR 0xffffffff
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -289,8 +286,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
break;
case KPROBE_REENTER:
/* A nested probe was hit in FIQ, it is a BUG */
- pr_warn("Unrecoverable kprobe detected at %p.\n",
- p->addr);
+ pr_warn("Unrecoverable kprobe detected.\n");
+ dump_kprobe(p);
/* fall through */
default:
/* impossible cases */
@@ -303,10 +300,10 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
- * continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry,
- * so get out doing nothing more here.
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
kcb->kprobe_status = KPROBE_HIT_SS;
@@ -315,20 +312,9 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
- reset_current_kprobe();
- }
- }
- } else if (cur) {
- /* We probably hit a jprobe. Call its break handler. */
- if (cur->break_handler && cur->break_handler(cur, regs)) {
- kcb->kprobe_status = KPROBE_HIT_SS;
- singlestep(cur, regs, kcb);
- if (cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
}
+ reset_current_kprobe();
}
- reset_current_kprobe();
} else {
/*
* The probe was removed and a race is in progress.
@@ -521,117 +507,6 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long sp_addr = regs->ARM_sp;
- long cpsr;
-
- kcb->jprobe_saved_regs = *regs;
- memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
- regs->ARM_pc = (long)jp->entry;
-
- cpsr = regs->ARM_cpsr | PSR_I_BIT;
-#ifdef CONFIG_THUMB2_KERNEL
- /* Set correct Thumb state in cpsr */
- if (regs->ARM_pc & 1)
- cpsr |= PSR_T_BIT;
- else
- cpsr &= ~PSR_T_BIT;
-#endif
- regs->ARM_cpsr = cpsr;
-
- preempt_disable();
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- __asm__ __volatile__ (
- /*
- * Setup an empty pt_regs. Fill SP and PC fields as
- * they're needed by longjmp_break_handler.
- *
- * We allocate some slack between the original SP and start of
- * our fabricated regs. To be precise we want to have worst case
- * covered which is STMFD with all 16 regs so we allocate 2 *
- * sizeof(struct_pt_regs)).
- *
- * This is to prevent any simulated instruction from writing
- * over the regs when they are accessing the stack.
- */
-#ifdef CONFIG_THUMB2_KERNEL
- "sub r0, %0, %1 \n\t"
- "mov sp, r0 \n\t"
-#else
- "sub sp, %0, %1 \n\t"
-#endif
- "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
- "str %0, [sp, %2] \n\t"
- "str r0, [sp, %3] \n\t"
- "mov r0, sp \n\t"
- "bl kprobe_handler \n\t"
-
- /*
- * Return to the context saved by setjmp_pre_handler
- * and restored by longjmp_break_handler.
- */
-#ifdef CONFIG_THUMB2_KERNEL
- "ldr lr, [sp, %2] \n\t" /* lr = saved sp */
- "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */
- "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */
- "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */
- /* rfe context */
- "ldmia sp, {r0 - r12} \n\t"
- "mov sp, lr \n\t"
- "ldr lr, [sp], #4 \n\t"
- "rfeia sp! \n\t"
-#else
- "ldr r0, [sp, %4] \n\t"
- "msr cpsr_cxsf, r0 \n\t"
- "ldmia sp, {r0 - pc} \n\t"
-#endif
- :
- : "r" (kcb->jprobe_saved_regs.ARM_sp),
- "I" (sizeof(struct pt_regs) * 2),
- "J" (offsetof(struct pt_regs, ARM_sp)),
- "J" (offsetof(struct pt_regs, ARM_pc)),
- "J" (offsetof(struct pt_regs, ARM_cpsr)),
- "J" (offsetof(struct pt_regs, ARM_lr))
- : "memory", "cc");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long stack_addr = kcb->jprobe_saved_regs.ARM_sp;
- long orig_sp = regs->ARM_sp;
- struct jprobe *jp = container_of(p, struct jprobe, kp);
-
- if (regs->ARM_pc == JPROBE_MAGIC_ADDR) {
- if (orig_sp != stack_addr) {
- struct pt_regs *saved_regs =
- (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp;
- printk("current sp %lx does not match saved sp %lx\n",
- orig_sp, stack_addr);
- printk("Saved registers for jprobe %p\n", jp);
- show_regs(saved_regs);
- printk("Current registers\n");
- show_regs(regs);
- BUG();
- }
- *regs = kcb->jprobe_saved_regs;
- memcpy((void *)stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index 14db14152909..cc237fa9b90f 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -1461,7 +1461,6 @@ fail:
print_registers(&result_regs);
if (mem) {
- pr_err("current_stack=%p\n", current_stack);
pr_err("expected_memory:\n");
print_memory(expected_memory, mem_size);
pr_err("result_memory:\n");
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index a81404c09d5d..94516c40ebd3 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -8,8 +8,5 @@
# asflags-y := -DDEBUG
KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
-LDFLAGS +=--no-warn-mismatch
-obj-y += vfp.o
-
-vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
+obj-y += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 35d0f823e823..dc7e6b50ef67 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -596,13 +596,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
}
/* Sanitise and restore the current VFP state from the provided structures. */
-int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
- struct user_vfp_exc __user *ufp_exc)
+int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
unsigned long fpexc;
- int err = 0;
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
@@ -611,17 +609,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
- err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
- sizeof(hwstate->fpregs));
+ memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
- __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+ hwstate->fpscr = ufp->fpscr;
/*
* Sanitise and restore the exception registers.
*/
- __get_user_error(fpexc, &ufp_exc->fpexc, err);
+ fpexc = ufp_exc->fpexc;
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
@@ -630,10 +627,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
hwstate->fpexc = fpexc;
- __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
- __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+ hwstate->fpinst = ufp_exc->fpinst;
+ hwstate->fpinst2 = ufp_exc->fpinst2;
- return err ? -EFAULT : 0;
+ return 0;
}
/*
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 42c090cf0292..3d1011957823 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -74,6 +74,7 @@ config ARM64
select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -264,9 +265,6 @@ config ARCH_SUPPORTS_UPROBES
config ARCH_PROC_KCORE_TEXT
def_bool y
-config MULTI_IRQ_HANDLER
- def_bool y
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 88f5aef7934c..e3a375c4cb83 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -19,33 +19,24 @@
* u32 *macp, u8 const rk[], u32 rounds);
*/
ENTRY(ce_aes_ccm_auth_data)
- frame_push 7
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
-
- ldr w25, [x22] /* leftover from prev round? */
+ ldr w8, [x3] /* leftover from prev round? */
ld1 {v0.16b}, [x0] /* load mac */
- cbz w25, 1f
- sub w25, w25, #16
+ cbz w8, 1f
+ sub w8, w8, #16
eor v1.16b, v1.16b, v1.16b
-0: ldrb w7, [x20], #1 /* get 1 byte of input */
- subs w21, w21, #1
- add w25, w25, #1
+0: ldrb w7, [x1], #1 /* get 1 byte of input */
+ subs w2, w2, #1
+ add w8, w8, #1
ins v1.b[0], w7
ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
beq 8f /* out of input? */
- cbnz w25, 0b
+ cbnz w8, 0b
eor v0.16b, v0.16b, v1.16b
-1: ld1 {v3.4s}, [x23] /* load first round key */
- prfm pldl1strm, [x20]
- cmp w24, #12 /* which key size? */
- add x6, x23, #16
- sub w7, w24, #2 /* modified # of rounds */
+1: ld1 {v3.4s}, [x4] /* load first round key */
+ prfm pldl1strm, [x1]
+ cmp w5, #12 /* which key size? */
+ add x6, x4, #16
+ sub w7, w5, #2 /* modified # of rounds */
bmi 2f
bne 5f
mov v5.16b, v3.16b
@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data)
ld1 {v5.4s}, [x6], #16 /* load next round key */
bpl 3b
aese v0.16b, v4.16b
- subs w21, w21, #16 /* last data? */
+ subs w2, w2, #16 /* last data? */
eor v0.16b, v0.16b, v5.16b /* final round */
bmi 6f
- ld1 {v1.16b}, [x20], #16 /* load next input block */
+ ld1 {v1.16b}, [x1], #16 /* load next input block */
eor v0.16b, v0.16b, v1.16b /* xor with mac */
- beq 6f
-
- if_will_cond_yield_neon
- st1 {v0.16b}, [x19] /* store mac */
- do_cond_yield_neon
- ld1 {v0.16b}, [x19] /* reload mac */
- endif_yield_neon
-
- b 1b
-6: st1 {v0.16b}, [x19] /* store mac */
+ bne 1b
+6: st1 {v0.16b}, [x0] /* store mac */
beq 10f
- adds w21, w21, #16
+ adds w2, w2, #16
beq 10f
- mov w25, w21
-7: ldrb w7, [x20], #1
+ mov w8, w2
+7: ldrb w7, [x1], #1
umov w6, v0.b[0]
eor w6, w6, w7
- strb w6, [x19], #1
- subs w21, w21, #1
+ strb w6, [x0], #1
+ subs w2, w2, #1
beq 10f
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
b 7b
-8: mov w7, w25
- add w25, w25, #16
+8: mov w7, w8
+ add w8, w8, #16
9: ext v1.16b, v1.16b, v1.16b, #1
adds w7, w7, #1
bne 9b
eor v0.16b, v0.16b, v1.16b
- st1 {v0.16b}, [x19]
-10: str w25, [x22]
-
- frame_pop
+ st1 {v0.16b}, [x0]
+10: str w8, [x3]
ret
ENDPROC(ce_aes_ccm_auth_data)
@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final)
ENDPROC(ce_aes_ccm_final)
.macro aes_ccm_do_crypt,enc
- frame_push 8
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
- mov x25, x6
-
- ldr x26, [x25, #8] /* load lower ctr */
- ld1 {v0.16b}, [x24] /* load mac */
-CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
+ ldr x8, [x6, #8] /* load lower ctr */
+ ld1 {v0.16b}, [x5] /* load mac */
+CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
0: /* outer loop */
- ld1 {v1.8b}, [x25] /* load upper ctr */
- prfm pldl1strm, [x20]
- add x26, x26, #1
- rev x9, x26
- cmp w23, #12 /* which key size? */
- sub w7, w23, #2 /* get modified # of rounds */
+ ld1 {v1.8b}, [x6] /* load upper ctr */
+ prfm pldl1strm, [x1]
+ add x8, x8, #1
+ rev x9, x8
+ cmp w4, #12 /* which key size? */
+ sub w7, w4, #2 /* get modified # of rounds */
ins v1.d[1], x9 /* no carry in lower ctr */
- ld1 {v3.4s}, [x22] /* load first round key */
- add x10, x22, #16
+ ld1 {v3.4s}, [x3] /* load first round key */
+ add x10, x3, #16
bmi 1f
bne 4f
mov v5.16b, v3.16b
@@ -194,9 +165,9 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
bpl 2b
aese v0.16b, v4.16b
aese v1.16b, v4.16b
- subs w21, w21, #16
- bmi 7f /* partial block? */
- ld1 {v2.16b}, [x20], #16 /* load next input block */
+ subs w2, w2, #16
+ bmi 6f /* partial block? */
+ ld1 {v2.16b}, [x1], #16 /* load next input block */
.if \enc == 1
eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
@@ -205,29 +176,18 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
eor v1.16b, v2.16b, v5.16b /* final round enc */
.endif
eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
- st1 {v1.16b}, [x19], #16 /* write output block */
- beq 5f
-
- if_will_cond_yield_neon
- st1 {v0.16b}, [x24] /* store mac */
- do_cond_yield_neon
- ld1 {v0.16b}, [x24] /* reload mac */
- endif_yield_neon
-
- b 0b
-5:
-CPU_LE( rev x26, x26 )
- st1 {v0.16b}, [x24] /* store mac */
- str x26, [x25, #8] /* store lsb end of ctr (BE) */
-
-6: frame_pop
- ret
-
-7: eor v0.16b, v0.16b, v5.16b /* final round mac */
+ st1 {v1.16b}, [x0], #16 /* write output block */
+ bne 0b
+CPU_LE( rev x8, x8 )
+ st1 {v0.16b}, [x5] /* store mac */
+ str x8, [x6, #8] /* store lsb end of ctr (BE) */
+5: ret
+
+6: eor v0.16b, v0.16b, v5.16b /* final round mac */
eor v1.16b, v1.16b, v5.16b /* final round enc */
- st1 {v0.16b}, [x24] /* store mac */
- add w21, w21, #16 /* process partial tail block */
-8: ldrb w9, [x20], #1 /* get 1 byte of input */
+ st1 {v0.16b}, [x5] /* store mac */
+ add w2, w2, #16 /* process partial tail block */
+7: ldrb w9, [x1], #1 /* get 1 byte of input */
umov w6, v1.b[0] /* get top crypted ctr byte */
umov w7, v0.b[0] /* get top mac byte */
.if \enc == 1
@@ -237,13 +197,13 @@ CPU_LE( rev x26, x26 )
eor w9, w9, w6
eor w7, w7, w9
.endif
- strb w9, [x19], #1 /* store out byte */
- strb w7, [x24], #1 /* store mac byte */
- subs w21, w21, #1
- beq 6b
+ strb w9, [x0], #1 /* store out byte */
+ strb w7, [x5], #1 /* store mac byte */
+ subs w2, w2, #1
+ beq 5b
ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
- b 8b
+ b 7b
.endm
/*
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index dcffb9e77589..c723647b37db 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -322,55 +322,41 @@ ENDPROC(pmull_ghash_update_p8)
.endm
.macro pmull_gcm_do_crypt, enc
- frame_push 10
+ ld1 {SHASH.2d}, [x4]
+ ld1 {XL.2d}, [x1]
+ ldr x8, [x5, #8] // load lower counter
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
- mov x25, x6
- mov x26, x7
- .if \enc == 1
- ldr x27, [sp, #96] // first stacked arg
- .endif
-
- ldr x28, [x24, #8] // load lower counter
-CPU_LE( rev x28, x28 )
-
-0: mov x0, x25
- load_round_keys w26, x0
- ld1 {SHASH.2d}, [x23]
- ld1 {XL.2d}, [x20]
+ load_round_keys w7, x6
movi MASK.16b, #0xe1
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+CPU_LE( rev x8, x8 )
shl MASK.2d, MASK.2d, #57
eor SHASH2.16b, SHASH2.16b, SHASH.16b
.if \enc == 1
- ld1 {KS.16b}, [x27]
+ ldr x10, [sp]
+ ld1 {KS.16b}, [x10]
.endif
-1: ld1 {CTR.8b}, [x24] // load upper counter
- ld1 {INP.16b}, [x22], #16
- rev x9, x28
- add x28, x28, #1
- sub w19, w19, #1
+0: ld1 {CTR.8b}, [x5] // load upper counter
+ ld1 {INP.16b}, [x3], #16
+ rev x9, x8
+ add x8, x8, #1
+ sub w0, w0, #1
ins CTR.d[1], x9 // set lower counter
.if \enc == 1
eor INP.16b, INP.16b, KS.16b // encrypt input
- st1 {INP.16b}, [x21], #16
+ st1 {INP.16b}, [x2], #16
.endif
rev64 T1.16b, INP.16b
- cmp w26, #12
- b.ge 4f // AES-192/256?
+ cmp w7, #12
+ b.ge 2f // AES-192/256?
-2: enc_round CTR, v21
+1: enc_round CTR, v21
ext T2.16b, XL.16b, XL.16b, #8
ext IN1.16b, T1.16b, T1.16b, #8
@@ -425,39 +411,27 @@ CPU_LE( rev x28, x28 )
.if \enc == 0
eor INP.16b, INP.16b, KS.16b
- st1 {INP.16b}, [x21], #16
+ st1 {INP.16b}, [x2], #16
.endif
- cbz w19, 3f
+ cbnz w0, 0b
- if_will_cond_yield_neon
- st1 {XL.2d}, [x20]
- .if \enc == 1
- st1 {KS.16b}, [x27]
- .endif
- do_cond_yield_neon
- b 0b
- endif_yield_neon
+CPU_LE( rev x8, x8 )
+ st1 {XL.2d}, [x1]
+ str x8, [x5, #8] // store lower counter
- b 1b
-
-3: st1 {XL.2d}, [x20]
.if \enc == 1
- st1 {KS.16b}, [x27]
+ st1 {KS.16b}, [x10]
.endif
-CPU_LE( rev x28, x28 )
- str x28, [x24, #8] // store lower counter
-
- frame_pop
ret
-4: b.eq 5f // AES-192?
+2: b.eq 3f // AES-192?
enc_round CTR, v17
enc_round CTR, v18
-5: enc_round CTR, v19
+3: enc_round CTR, v19
enc_round CTR, v20
- b 2b
+ b 1b
.endm
/*
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index c0235e0ff849..9bca54dda75c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -40,17 +40,6 @@
#include <asm/cmpxchg.h>
-#define ___atomic_add_unless(v, a, u, sfx) \
-({ \
- typeof((v)->counter) c, old; \
- \
- c = atomic##sfx##_read(v); \
- while (c != (u) && \
- (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c; \
- })
-
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
@@ -61,21 +50,11 @@
#define atomic_add_return_release atomic_add_return_release
#define atomic_add_return atomic_add_return
-#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
-#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
-#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_sub_return_acquire atomic_sub_return_acquire
#define atomic_sub_return_release atomic_sub_return_release
#define atomic_sub_return atomic_sub_return
-#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
-#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
-#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#define atomic_fetch_add_release atomic_fetch_add_release
@@ -119,13 +98,6 @@
cmpxchg_release(&((v)->counter), (old), (new))
#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
-#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
#define atomic_andnot atomic_andnot
/*
@@ -140,21 +112,11 @@
#define atomic64_add_return_release atomic64_add_return_release
#define atomic64_add_return atomic64_add_return
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
-#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
-#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
#define atomic64_sub_return_release atomic64_sub_return_release
#define atomic64_sub_return atomic64_sub_return
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
-#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
-#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
#define atomic64_fetch_add_release atomic64_fetch_add_release
@@ -195,16 +157,9 @@
#define atomic64_cmpxchg_release atomic_cmpxchg_release
#define atomic64_cmpxchg atomic_cmpxchg
-#define atomic64_inc(v) atomic64_add(1, (v))
-#define atomic64_dec(v) atomic64_sub(1, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
-#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u)
#define atomic64_andnot atomic64_andnot
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif
#endif
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
index 9c19594ce7cb..10d536b1af74 100644
--- a/arch/arm64/include/asm/bitops.h
+++ b/arch/arm64/include/asm/bitops.h
@@ -17,22 +17,11 @@
#define __ASM_BITOPS_H
#include <linux/compiler.h>
-#include <asm/barrier.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
-/*
- * Little endian assembly atomic bitops.
- */
-extern void set_bit(int nr, volatile unsigned long *p);
-extern void clear_bit(int nr, volatile unsigned long *p);
-extern void change_bit(int nr, volatile unsigned long *p);
-extern int test_and_set_bit(int nr, volatile unsigned long *p);
-extern int test_and_clear_bit(int nr, volatile unsigned long *p);
-extern int test_and_change_bit(int nr, volatile unsigned long *p);
-
#include <asm-generic/bitops/builtin-__ffs.h>
#include <asm-generic/bitops/builtin-ffs.h>
#include <asm-generic/bitops/builtin-__fls.h>
@@ -44,15 +33,11 @@ extern int test_and_change_bit(int nr, volatile unsigned long *p);
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
-
-/*
- * Ext2 is defined to use little-endian byte ordering.
- */
-#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p)
-#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __ASM_BITOPS_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 192d791f1103..7ed320895d1f 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
#define efi_is_64bit() (true)
+#define efi_table_attr(table, attr, instance) \
+ ((table##_t *)instance)->attr
+
#define efi_call_proto(protocol, f, instance, ...) \
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 41770766d964..6a53e59ced95 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg,
struct task_struct;
struct notifier_block;
+struct perf_event_attr;
struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index a0fee6985e6a..b2b0c6405eb0 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -8,8 +8,6 @@
struct pt_regs;
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
static inline int nr_legacy_irqs(void)
{
return 0;
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 6deb8d726041..d5a44cf859e9 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -48,7 +48,6 @@ struct kprobe_ctlblk {
unsigned long saved_irqflag;
struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
- struct pt_regs jprobe_saved_regs;
};
void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 413dbe530da8..8c9644376326 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -343,14 +343,13 @@ static int get_hbp_len(u8 hbp_len)
/*
* Check whether bp virtual address is in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->ctrl.len);
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -421,53 +420,53 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->ctrl.type = ARM_BREAKPOINT_LOAD;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->ctrl.type = ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_3:
- info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
break;
case HW_BREAKPOINT_LEN_4:
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_5:
- info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
break;
case HW_BREAKPOINT_LEN_6:
- info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
break;
case HW_BREAKPOINT_LEN_7:
- info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
break;
case HW_BREAKPOINT_LEN_8:
- info->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
@@ -478,37 +477,37 @@ static int arch_build_bp_info(struct perf_event *bp)
* AArch32 also requires breakpoints of length 2 for Thumb.
* Watchpoints can be of length 1, 2, 4 or 8 bytes.
*/
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
if (is_compat_bp(bp)) {
- if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL;
- } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
+ } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
/*
* FIXME: Some tools (I'm looking at you perf) assume
* that breakpoints should be sizeof(long). This
* is nonsense. For now, we fix up the parameter
* but we should probably return -EINVAL instead.
*/
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
}
}
/* Address */
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/*
* Privilege
* Note that we disallow combined EL0/EL1 breakpoints because
* that would complicate the stepping code.
*/
- if (arch_check_bp_in_kernelspace(bp))
- info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
else
- info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
+ hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
/* Enabled? */
- info->ctrl.enabled = !bp->attr.disabled;
+ hw->ctrl.enabled = !attr->disabled;
return 0;
}
@@ -516,14 +515,15 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int ret;
u64 alignment_mask, offset;
/* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
@@ -537,42 +537,42 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* that here.
*/
if (is_compat_bp(bp)) {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
else
alignment_mask = 0x3;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
switch (offset) {
case 0:
/* Aligned */
break;
case 1:
/* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
default:
return -EINVAL;
}
} else {
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
alignment_mask = 0x3;
else
alignment_mask = 0x7;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
}
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
/*
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
*/
- if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
+ if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
return -EINVAL;
return 0;
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 60e5fc661f74..780a12f59a8f 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -42,16 +42,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
-void (*handle_arch_irq)(struct pt_regs *) = NULL;
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- if (handle_arch_irq)
- return;
-
- handle_arch_irq = handle_irq;
-}
-
#ifdef CONFIG_VMAP_STACK
static void init_irq_stacks(void)
{
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d849d9804011..e78c3ef04d95 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
- pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
break;
@@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry,
- * so get out doing nothing more here.
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
*
* pre_handler can hit a breakpoint and can step thru
* before return, keep PSTATE D-flag enabled until
@@ -405,16 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs, kcb, 0);
- return;
- }
- }
- } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
- BRK64_OPCODE_KPROBES) && cur_kprobe) {
- /* We probably hit a jprobe. Call its break handler. */
- if (cur_kprobe->break_handler &&
- cur_kprobe->break_handler(cur_kprobe, regs)) {
- setup_singlestep(cur_kprobe, regs, kcb, 0);
- return;
+ } else
+ reset_current_kprobe();
}
}
/*
@@ -465,74 +457,6 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
return DBG_HOOK_HANDLED;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- /*
- * Since we can't be sure where in the stack frame "stacked"
- * pass-by-value arguments are stored we just don't try to
- * duplicate any of the stack. Do not use jprobes on functions that
- * use more than 64 bytes (after padding each to an 8 byte boundary)
- * of arguments, or pass individual arguments larger than 16 bytes.
- */
-
- instruction_pointer_set(regs, (unsigned long) jp->entry);
- preempt_disable();
- pause_graph_tracing();
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- /*
- * Jprobe handler return by entering break exception,
- * encoded same as kprobe, but with following conditions
- * -a special PC to identify it from the other kprobes.
- * -restore stack addr to original saved pt_regs
- */
- asm volatile(" mov sp, %0 \n"
- "jprobe_return_break: brk %1 \n"
- :
- : "r" (kcb->jprobe_saved_regs.sp),
- "I" (BRK64_ESR_KPROBES)
- : "memory");
-
- unreachable();
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long stack_addr = kcb->jprobe_saved_regs.sp;
- long orig_sp = kernel_stack_pointer(regs);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- extern const char jprobe_return_break[];
-
- if (instruction_pointer(regs) != (u64) jprobe_return_break)
- return 0;
-
- if (orig_sp != stack_addr) {
- struct pt_regs *saved_regs =
- (struct pt_regs *)kcb->jprobe_saved_regs.sp;
- pr_err("current sp %lx does not match saved sp %lx\n",
- orig_sp, stack_addr);
- pr_err("Saved registers for jprobe %p\n", jp);
- __show_regs(saved_regs);
- pr_err("Current registers\n");
- __show_regs(regs);
- BUG();
- }
- unpause_graph_tracing();
- *regs = kcb->jprobe_saved_regs;
- preempt_enable_no_resched();
- return 1;
-}
-
bool arch_within_kprobe_blacklist(unsigned long addr)
{
if ((addr >= (unsigned long)__kprobes_text_start &&
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 137710f4dac3..68755fd70dcf 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
+lib-y := clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
clear_page.o memchr.o memcpy.o memmove.o memset.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
deleted file mode 100644
index 43ac736baa5b..000000000000
--- a/arch/arm64/lib/bitops.S
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Based on arch/arm/lib/bitops.h
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/lse.h>
-
-/*
- * x0: bits 5:0 bit offset
- * bits 31:6 word offset
- * x1: address
- */
- .macro bitop, name, llsc, lse
-ENTRY( \name )
- and w3, w0, #63 // Get bit offset
- eor w0, w0, w3 // Clear low bits
- mov x2, #1
- add x1, x1, x0, lsr #3 // Get word offset
-alt_lse " prfm pstl1strm, [x1]", "nop"
- lsl x3, x2, x3 // Create mask
-
-alt_lse "1: ldxr x2, [x1]", "\lse x3, [x1]"
-alt_lse " \llsc x2, x2, x3", "nop"
-alt_lse " stxr w0, x2, [x1]", "nop"
-alt_lse " cbnz w0, 1b", "nop"
-
- ret
-ENDPROC(\name )
- .endm
-
- .macro testop, name, llsc, lse
-ENTRY( \name )
- and w3, w0, #63 // Get bit offset
- eor w0, w0, w3 // Clear low bits
- mov x2, #1
- add x1, x1, x0, lsr #3 // Get word offset
-alt_lse " prfm pstl1strm, [x1]", "nop"
- lsl x4, x2, x3 // Create mask
-
-alt_lse "1: ldxr x2, [x1]", "\lse x4, x2, [x1]"
- lsr x0, x2, x3
-alt_lse " \llsc x2, x2, x4", "nop"
-alt_lse " stlxr w5, x2, [x1]", "nop"
-alt_lse " cbnz w5, 1b", "nop"
-alt_lse " dmb ish", "nop"
-
- and x0, x0, #1
- ret
-ENDPROC(\name )
- .endm
-
-/*
- * Atomic bit operations.
- */
- bitop change_bit, eor, steor
- bitop clear_bit, bic, stclr
- bitop set_bit, orr, stset
-
- testop test_and_change_bit, eor, ldeoral
- testop test_and_clear_bit, bic, ldclral
- testop test_and_set_bit, orr, ldsetal
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 493ff75670ff..8ae5d7ae4af3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -977,12 +977,12 @@ int pmd_clear_huge(pmd_t *pmdp)
return 1;
}
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return pud_none(*pud);
}
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return pmd_none(*pmd);
}
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 941e7554e886..c6b6a06231b2 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -2,8 +2,10 @@
#ifndef __ARCH_H8300_ATOMIC__
#define __ARCH_H8300_ATOMIC__
+#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
+#include <asm/irqflags.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -15,8 +17,6 @@
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#include <linux/kernel.h>
-
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
@@ -69,18 +69,6 @@ ATOMIC_OPS(sub, -=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_inc_return(v) atomic_add_return(1, v)
-#define atomic_dec_return(v) atomic_sub_return(1, v)
-
-#define atomic_inc(v) (void)atomic_inc_return(v)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec(v) (void)atomic_dec_return(v)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -94,7 +82,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
h8300flags flags;
@@ -106,5 +94,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
arch_local_irq_restore(flags);
return ret;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index fb3dfb2a667e..311b9894ccc8 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -164,7 +164,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
@@ -173,7 +173,7 @@ ATOMIC_OPS(xor)
*
*/
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;
register int tmp;
@@ -196,18 +196,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
);
return __oldval;
}
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
-#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 2524fb60fbc2..206530d0751b 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -215,91 +215,10 @@ ATOMIC64_FETCH_OP(xor, ^)
(cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
-
-static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
-/*
- * Atomically add I to V and return TRUE if the resulting value is
- * negative.
- */
-static __inline__ int
-atomic_add_negative (int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
-static __inline__ long
-atomic64_add_negative (__s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-
#define atomic_add(i,v) (void)atomic_add_return((i), (v))
#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
-#define atomic64_inc(v) atomic64_add(1, (v))
-#define atomic64_dec(v) atomic64_sub(1, (v))
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
index 0302b3664789..580356a2eea6 100644
--- a/arch/ia64/include/asm/kprobes.h
+++ b/arch/ia64/include/asm/kprobes.h
@@ -82,8 +82,6 @@ struct prev_kprobe {
#define ARCH_PREV_KPROBE_SZ 2
struct kprobe_ctlblk {
unsigned long kprobe_status;
- struct pt_regs jprobe_saved_regs;
- unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
unsigned long *bsp;
unsigned long cfm;
atomic_t prev_kprobe_index;
diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h
index 5d742bcb0018..4ca110f0a94b 100644
--- a/arch/ia64/include/uapi/asm/break.h
+++ b/arch/ia64/include/uapi/asm/break.h
@@ -14,7 +14,6 @@
*/
#define __IA64_BREAK_KDB 0x80100
#define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */
-#define __IA64_BREAK_JPROBE 0x82000
/*
* OS-specific break numbers:
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 498f3da3f225..d0c0ccdd656a 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
-obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
deleted file mode 100644
index f69389c7be1d..000000000000
--- a/arch/ia64/kernel/jprobes.S
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Jprobe specific operations
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) Intel Corporation, 2005
- *
- * 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
- * <anil.s.keshavamurthy@intel.com> initial implementation
- *
- * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
- * probe to be inserted into the beginning of a function call. The fundamental
- * difference between a jprobe and a kprobe is the jprobe handler is executed
- * in the same context as the target function, while the kprobe handlers
- * are executed in interrupt context.
- *
- * For jprobes we initially gain control by placing a break point in the
- * first instruction of the targeted function. When we catch that specific
- * break, we:
- * * set the return address to our jprobe_inst_return() function
- * * jump to the jprobe handler function
- *
- * Since we fixed up the return address, the jprobe handler will return to our
- * jprobe_inst_return() function, giving us control again. At this point we
- * are back in the parents frame marker, so we do yet another call to our
- * jprobe_break() function to fix up the frame marker as it would normally
- * exist in the target function.
- *
- * Our jprobe_return function then transfers control back to kprobes.c by
- * executing a break instruction using one of our reserved numbers. When we
- * catch that break in kprobes.c, we continue like we do for a normal kprobe
- * by single stepping the emulated instruction, and then returning execution
- * to the correct location.
- */
-#include <asm/asmmacro.h>
-#include <asm/break.h>
-
- /*
- * void jprobe_break(void)
- */
- .section .kprobes.text, "ax"
-ENTRY(jprobe_break)
- break.m __IA64_BREAK_JPROBE
-END(jprobe_break)
-
- /*
- * void jprobe_inst_return(void)
- */
-GLOBAL_ENTRY(jprobe_inst_return)
- br.call.sptk.many b0=jprobe_break
-END(jprobe_inst_return)
-
-GLOBAL_ENTRY(invalidate_stacked_regs)
- movl r16=invalidate_restore_cfm
- ;;
- mov b6=r16
- ;;
- br.ret.sptk.many b6
- ;;
-invalidate_restore_cfm:
- mov r16=ar.rsc
- ;;
- mov ar.rsc=r0
- ;;
- loadrs
- ;;
- mov ar.rsc=r16
- ;;
- br.cond.sptk.many rp
-END(invalidate_stacked_regs)
-
-GLOBAL_ENTRY(flush_register_stack)
- // flush dirty regs to backing store (must be first in insn group)
- flushrs
- ;;
- br.ret.sptk.many rp
-END(flush_register_stack)
-
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f5f3a5e6fcd1..aa41bd5cf9b7 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -35,8 +35,6 @@
#include <asm/sections.h>
#include <asm/exception.h>
-extern void jprobe_inst_return(void);
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -480,12 +478,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
*/
break;
}
-
kretprobe_assert(ri, orig_ret_address, trampoline_address);
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
@@ -819,14 +814,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
prepare_ss(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
- } else if (args->err == __IA64_BREAK_JPROBE) {
- /*
- * jprobe instrumented function just completed
- */
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
- }
} else if (!is_ia64_break_inst(regs)) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
@@ -861,15 +848,12 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
set_current_kprobe(p, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
- /*
- * Our pre-handler is specifically requesting that we just
- * do a return. This is used for both the jprobe pre-handler
- * and the kretprobe trampoline
- */
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
#if !defined(CONFIG_PREEMPT)
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
@@ -992,7 +976,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
case DIE_BREAK:
/* err is break number from ia64_bad_break() */
if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
- || args->err == __IA64_BREAK_JPROBE
|| args->err == 0)
if (pre_kprobes_handler(args))
ret = NOTIFY_STOP;
@@ -1040,74 +1023,6 @@ unsigned long arch_deref_entry_point(void *entry)
return ((struct fnptr *)entry)->ip;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr = arch_deref_entry_point(jp->entry);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- struct param_bsp_cfm pa;
- int bytes;
-
- /*
- * Callee owns the argument space and could overwrite it, eg
- * tail call optimization. So to be absolutely safe
- * we save the argument space before transferring the control
- * to instrumented jprobe function which runs in
- * the process context
- */
- pa.ip = regs->cr_iip;
- unw_init_running(ia64_get_bsp_cfm, &pa);
- bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
- - (char *)pa.bsp;
- memcpy( kcb->jprobes_saved_stacked_regs,
- pa.bsp,
- bytes );
- kcb->bsp = pa.bsp;
- kcb->cfm = pa.cfm;
-
- /* save architectural state */
- kcb->jprobe_saved_regs = *regs;
-
- /* after rfi, execute the jprobe instrumented function */
- regs->cr_iip = addr & ~0xFULL;
- ia64_psr(regs)->ri = addr & 0xf;
- regs->r1 = ((struct fnptr *)(jp->entry))->gp;
-
- /*
- * fix the return address to our jprobe_inst_return() function
- * in the jprobes.S file
- */
- regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
-
- return 1;
-}
-
-/* ia64 does not need this */
-void __kprobes jprobe_return(void)
-{
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- int bytes;
-
- /* restoring architectural state */
- *regs = kcb->jprobe_saved_regs;
-
- /* restoring the original argument space */
- flush_register_stack();
- bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
- - (char *)kcb->bsp;
- memcpy( kcb->bsp,
- kcb->jprobes_saved_stacked_regs,
- bytes );
- invalidate_stacked_regs();
-
- preempt_enable_no_resched();
- return 1;
-}
-
static struct kprobe trampoline_p = {
.pre_handler = trampoline_probe_handler
};
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 785612b576f7..b29f93774d95 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -2,6 +2,7 @@
config M68K
bool
default y
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_IDE
@@ -24,6 +25,10 @@ config M68K
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
select OLD_SIGACTION
+ select DMA_NONCOHERENT_OPS if HAS_DMA
+ select HAVE_MEMBLOCK
+ select ARCH_DISCARD_MEMBLOCK
+ select NO_BOOTMEM
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index b2a6bc63f8cd..aef8d42e078d 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -31,7 +31,6 @@ extern void dn_sched_init(irq_handler_t handler);
extern void dn_init_IRQ(void);
extern u32 dn_gettimeoffset(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
-extern int dn_dummy_set_clock_mmss(unsigned long);
extern void dn_dummy_reset(void);
#ifdef CONFIG_HEARTBEAT
static void dn_heartbeat(int on);
@@ -156,7 +155,6 @@ void __init config_apollo(void)
arch_gettimeoffset = dn_gettimeoffset;
mach_max_dma_address = 0xffffffff;
mach_hwclk = dn_dummy_hwclk; /* */
- mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */
mach_reset = dn_dummy_reset; /* */
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = dn_heartbeat;
@@ -240,12 +238,6 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) {
}
-int dn_dummy_set_clock_mmss(unsigned long nowtime)
-{
- pr_info("set_clock_mmss\n");
- return 0;
-}
-
void dn_dummy_reset(void) {
dn_serial_print("The end !\n");
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 565c6f06ab0b..bd96702a1ad0 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -81,9 +81,6 @@ extern void atari_sched_init(irq_handler_t);
extern u32 atari_gettimeoffset(void);
extern int atari_mste_hwclk (int, struct rtc_time *);
extern int atari_tt_hwclk (int, struct rtc_time *);
-extern int atari_mste_set_clock_mmss (unsigned long);
-extern int atari_tt_set_clock_mmss (unsigned long);
-
/* ++roman: This is a more elaborate test for an SCC chip, since the plain
* Medusa board generates DTACK at the SCC's standard addresses, but a SCC
@@ -362,13 +359,11 @@ void __init config_atari(void)
ATARIHW_SET(TT_CLK);
pr_cont(" TT_CLK");
mach_hwclk = atari_tt_hwclk;
- mach_set_clock_mmss = atari_tt_set_clock_mmss;
}
if (hwreg_present(&mste_rtc.sec_ones)) {
ATARIHW_SET(MSTE_CLK);
pr_cont(" MSTE_CLK");
mach_hwclk = atari_mste_hwclk;
- mach_set_clock_mmss = atari_mste_set_clock_mmss;
}
if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) &&
hwreg_write(&dma_wd.fdc_speed, 0)) {
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index c549b48174ec..9cca64286464 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -285,69 +285,6 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
return( 0 );
}
-
-int atari_mste_set_clock_mmss (unsigned long nowtime)
-{
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- struct MSTE_RTC val;
- unsigned char rtc_minutes;
-
- mste_read(&val);
- rtc_minutes= val.min_ones + val.min_tens * 10;
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- val.sec_ones = real_seconds % 10;
- val.sec_tens = real_seconds / 10;
- val.min_ones = real_minutes % 10;
- val.min_tens = real_minutes / 10;
- mste_write(&val);
- }
- else
- return -1;
- return 0;
-}
-
-int atari_tt_set_clock_mmss (unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- unsigned char save_control, save_freq_select, rtc_minutes;
-
- save_control = RTC_READ (RTC_CONTROL); /* tell the clock it's being set */
- RTC_WRITE (RTC_CONTROL, save_control | RTC_SET);
-
- save_freq_select = RTC_READ (RTC_FREQ_SELECT); /* stop and reset prescaler */
- RTC_WRITE (RTC_FREQ_SELECT, save_freq_select | RTC_DIV_RESET2);
-
- rtc_minutes = RTC_READ (RTC_MINUTES);
- if (!(save_control & RTC_DM_BINARY))
- rtc_minutes = bcd2bin(rtc_minutes);
-
- /* Since we're only adjusting minutes and seconds, don't interfere
- with hour overflow. This avoids messing with unknown time zones
- but requires your RTC not to be off by more than 30 minutes. */
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- if (!(save_control & RTC_DM_BINARY))
- {
- real_seconds = bin2bcd(real_seconds);
- real_minutes = bin2bcd(real_minutes);
- }
- RTC_WRITE (RTC_SECONDS, real_seconds);
- RTC_WRITE (RTC_MINUTES, real_minutes);
- }
- else
- retval = -1;
-
- RTC_WRITE (RTC_FREQ_SELECT, save_freq_select);
- RTC_WRITE (RTC_CONTROL, save_control);
- return retval;
-}
-
/*
* Local variables:
* c-indent-level: 4
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 2cfff4765040..143ee9fa3893 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -41,7 +41,6 @@ static void bvme6000_get_model(char *model);
extern void bvme6000_sched_init(irq_handler_t handler);
extern u32 bvme6000_gettimeoffset(void);
extern int bvme6000_hwclk (int, struct rtc_time *);
-extern int bvme6000_set_clock_mmss (unsigned long);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
@@ -113,7 +112,6 @@ void __init config_bvme6000(void)
mach_init_IRQ = bvme6000_init_IRQ;
arch_gettimeoffset = bvme6000_gettimeoffset;
mach_hwclk = bvme6000_hwclk;
- mach_set_clock_mmss = bvme6000_set_clock_mmss;
mach_reset = bvme6000_reset;
mach_get_model = bvme6000_get_model;
@@ -305,46 +303,3 @@ int bvme6000_hwclk(int op, struct rtc_time *t)
return 0;
}
-
-/*
- * Set the minutes and seconds from seconds value 'nowtime'. Fail if
- * clock is out by > 30 minutes. Logic lifted from atari code.
- * Algorithm is to wait for the 10ms register to change, and then to
- * wait a short while, and then set it.
- */
-
-int bvme6000_set_clock_mmss (unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- unsigned char rtc_minutes, rtc_tenms;
- volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
- unsigned char msr = rtc->msr & 0xc0;
- unsigned long flags;
- volatile int i;
-
- rtc->msr = 0; /* Ensure clock accessible */
- rtc_minutes = bcd2bin (rtc->bcd_min);
-
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- local_irq_save(flags);
- rtc_tenms = rtc->bcd_tenms;
- while (rtc_tenms == rtc->bcd_tenms)
- ;
- for (i = 0; i < 1000; i++)
- ;
- rtc->bcd_min = bin2bcd(real_minutes);
- rtc->bcd_sec = bin2bcd(real_seconds);
- local_irq_restore(flags);
- }
- else
- retval = -1;
-
- rtc->msr = msr;
-
- return retval;
-}
-
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index a874e54404d1..1d5483f6e457 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -52,6 +52,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -98,18 +99,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -122,6 +119,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -200,7 +198,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -231,7 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -260,7 +256,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -356,6 +352,7 @@ CONFIG_A2091_SCSI=y
CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -363,6 +360,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -402,8 +400,8 @@ CONFIG_A2065=y
CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -412,8 +410,10 @@ CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
CONFIG_HYDRA=y
CONFIG_APNE=y
CONFIG_ZORRO8390=y
@@ -426,9 +426,9 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -478,6 +478,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -499,7 +500,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -600,6 +601,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -622,6 +624,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -657,6 +664,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 8ce39e23aa42..52a0af127951 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -381,14 +378,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -400,9 +398,9 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -440,6 +438,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -458,7 +457,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -559,6 +558,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -581,6 +581,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -616,6 +621,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 346c4e75edf8..b3103e51268a 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -391,14 +388,15 @@ CONFIG_VETH=m
CONFIG_ATARILANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
CONFIG_NE2000=y
@@ -411,9 +409,9 @@ CONFIG_NE2000=y
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -480,7 +478,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -581,6 +579,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -603,6 +602,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -638,6 +642,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fca9c7aa71a3..fb7d651a4cab 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index f9eab174915c..6b37f5537c39 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -382,14 +379,15 @@ CONFIG_VETH=m
CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -401,9 +399,9 @@ CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -443,6 +441,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -460,7 +459,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -561,6 +560,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -583,6 +583,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -618,6 +623,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b52e597899eb..930cc2965a11 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -49,6 +49,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -95,18 +96,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -119,6 +116,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -197,7 +195,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -228,7 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -257,7 +253,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -398,8 +395,8 @@ CONFIG_VETH=m
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -407,6 +404,7 @@ CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MACSONIC=y
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -420,9 +418,9 @@ CONFIG_MAC8390=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -465,6 +463,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -482,7 +481,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -583,6 +582,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -605,6 +605,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -640,6 +645,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 2a84eeec5b02..e7dd25300127 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -59,6 +59,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -105,18 +106,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -129,6 +126,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -207,7 +205,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -238,7 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -267,7 +263,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -311,6 +306,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -373,6 +369,7 @@ CONFIG_A2091_SCSI=y
CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
CONFIG_ATARI_SCSI=y
CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
@@ -387,6 +384,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -438,8 +436,8 @@ CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -449,9 +447,11 @@ CONFIG_BVME6000_NET=y
CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MACSONIC=y
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
CONFIG_HYDRA=y
CONFIG_MAC8390=y
CONFIG_NE2000=y
@@ -466,9 +466,9 @@ CONFIG_ZORRO8390=y
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -533,6 +533,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -562,7 +563,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -663,6 +664,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -685,6 +687,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -720,6 +727,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 476e69994340..b383327fd77a 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -47,6 +47,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -93,18 +94,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -117,6 +114,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +193,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -226,7 +223,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -255,7 +251,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -296,6 +291,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -343,6 +339,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 1477cda9146e..9783d3deb9e9 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index b3a543dc48a0..a35d10ee10cb 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -350,6 +346,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -388,8 +385,8 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -398,6 +395,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
CONFIG_NE2000=y
@@ -410,9 +408,9 @@ CONFIG_NE2000=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -455,6 +453,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -473,7 +472,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -574,6 +573,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -596,6 +596,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -631,6 +636,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index d543ed5dfa96..573bf922d448 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -385,6 +382,7 @@ CONFIG_SUN3LANCE=y
CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -574,6 +574,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -609,6 +614,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a67e54246023..efb27a7fcc55 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -378,14 +375,15 @@ CONFIG_VETH=m
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -575,6 +575,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -610,6 +615,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 4d8d68c4e3dd..a4b8d3331a9e 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,6 +1,7 @@
generic-y += barrier.h
generic-y += compat.h
generic-y += device.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e993e2860ee1..47228b0d4163 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -126,11 +126,13 @@ static inline void atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
+#define atomic_inc atomic_inc
static inline void atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
+#define atomic_dec atomic_dec
static inline int atomic_dec_and_test(atomic_t *v)
{
@@ -138,6 +140,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
+#define atomic_dec_and_test atomic_dec_and_test
static inline int atomic_dec_and_test_lt(atomic_t *v)
{
@@ -155,6 +158,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
+#define atomic_inc_and_test atomic_inc_and_test
#ifdef CONFIG_RMW_INSNS
@@ -190,9 +194,6 @@ static inline int atomic_xchg(atomic_t *v, int new)
#endif /* !CONFIG_RMW_INSNS */
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
char c;
@@ -201,6 +202,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
+#define atomic_sub_and_test atomic_sub_and_test
static inline int atomic_add_negative(int i, atomic_t *v)
{
@@ -210,20 +212,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
+#define atomic_add_negative atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index 93b47b1f6fb4..d979f38af751 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -454,7 +454,7 @@ static inline unsigned long ffz(unsigned long word)
*/
#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
!defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
-static inline int __ffs(int x)
+static inline unsigned long __ffs(unsigned long x)
{
__asm__ __volatile__ ("bitrev %0; ff1 %0"
: "=d" (x)
@@ -493,7 +493,11 @@ static inline int ffs(int x)
: "dm" (x & -x));
return 32 - cnt;
}
-#define __ffs(x) (ffs(x) - 1)
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ return ffs(x) - 1;
+}
/*
* fls: find last bit set.
@@ -515,12 +519,16 @@ static inline int __fls(int x)
#endif
+/* Simple test-and-set bit locks */
+#define test_and_set_bit_lock test_and_set_bit
+#define clear_bit_unlock clear_bit
+#define __clear_bit_unlock clear_bit_unlock
+
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
deleted file mode 100644
index e3722ed04fbb..000000000000
--- a/arch/m68k/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_DMA_MAPPING_H
-#define _M68K_DMA_MAPPING_H
-
-extern const struct dma_map_ops m68k_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &m68k_dma_ops;
-}
-
-#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/io.h b/arch/m68k/include/asm/io.h
index ca2849afb087..aabe6420ead2 100644
--- a/arch/m68k/include/asm/io.h
+++ b/arch/m68k/include/asm/io.h
@@ -1,6 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _M68K_IO_H
+#define _M68K_IO_H
+
#if defined(__uClinux__) || defined(CONFIG_COLDFIRE)
#include <asm/io_no.h>
#else
#include <asm/io_mm.h>
#endif
+
+#include <asm-generic/io.h>
+
+#endif /* _M68K_IO_H */
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index fe485f4f5fac..782b78f8a048 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -16,13 +16,11 @@
* isa_readX(),isa_writeX() are for ISA memory
*/
-#ifndef _IO_H
-#define _IO_H
+#ifndef _M68K_IO_MM_H
+#define _M68K_IO_MM_H
#ifdef __KERNEL__
-#define ARCH_HAS_IOREMAP_WT
-
#include <linux/compiler.h>
#include <asm/raw_io.h>
#include <asm/virtconvert.h>
@@ -369,40 +367,6 @@ static inline void isa_delay(void)
#define writew(val, addr) out_le16((addr), (val))
#endif /* CONFIG_ATARI_ROM_ISA */
-#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
-/*
- * We need to define dummy functions for GENERIC_IOMAP support.
- */
-#define inb(port) 0xff
-#define inb_p(port) 0xff
-#define outb(val,port) ((void)0)
-#define outb_p(val,port) ((void)0)
-#define inw(port) 0xffff
-#define inw_p(port) 0xffff
-#define outw(val,port) ((void)0)
-#define outw_p(val,port) ((void)0)
-#define inl(port) 0xffffffffUL
-#define inl_p(port) 0xffffffffUL
-#define outl(val,port) ((void)0)
-#define outl_p(val,port) ((void)0)
-
-#define insb(port,buf,nr) ((void)0)
-#define outsb(port,buf,nr) ((void)0)
-#define insw(port,buf,nr) ((void)0)
-#define outsw(port,buf,nr) ((void)0)
-#define insl(port,buf,nr) ((void)0)
-#define outsl(port,buf,nr) ((void)0)
-
-/*
- * These should be valid on any ioremap()ed region
- */
-#define readb(addr) in_8(addr)
-#define writeb(val,addr) out_8((addr),(val))
-#define readw(addr) in_le16(addr)
-#define writew(val,addr) out_le16((addr),(val))
-
-#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
-
#define readl(addr) in_le32(addr)
#define writel(val,addr) out_le32((addr),(val))
@@ -444,4 +408,4 @@ static inline void isa_delay(void)
#define writew_relaxed(b, addr) writew(b, addr)
#define writel_relaxed(b, addr) writel(b, addr)
-#endif /* _IO_H */
+#endif /* _M68K_IO_MM_H */
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index 83a0a6d449f4..0498192e1d98 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -131,19 +131,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define PCI_SPACE_LIMIT PCI_IO_MASK
#endif /* CONFIG_PCI */
-/*
- * These are defined in kmap.h as static inline functions. To maintain
- * previous behavior we put these define guards here so io_mm.h doesn't
- * see them.
- */
-#ifdef CONFIG_MMU
-#define memset_io memset_io
-#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
-#endif
-
#include <asm/kmap.h>
#include <asm/virtconvert.h>
-#include <asm-generic/io.h>
#endif /* _M68KNOMMU_IO_H */
diff --git a/arch/m68k/include/asm/kmap.h b/arch/m68k/include/asm/kmap.h
index 84b8333db8ad..aac7f045f7f0 100644
--- a/arch/m68k/include/asm/kmap.h
+++ b/arch/m68k/include/asm/kmap.h
@@ -4,6 +4,8 @@
#ifdef CONFIG_MMU
+#define ARCH_HAS_IOREMAP_WT
+
/* Values for nocacheflag and cmode */
#define IOMAP_FULL_CACHING 0
#define IOMAP_NOCACHE_SER 1
@@ -16,6 +18,7 @@
*/
extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
int cacheflag);
+#define iounmap iounmap
extern void iounmap(void __iomem *addr);
extern void __iounmap(void *addr, unsigned long size);
@@ -33,31 +36,35 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr,
}
#define ioremap_uc ioremap_nocache
+#define ioremap_wt ioremap_wt
static inline void __iomem *ioremap_wt(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
-#define ioremap_fillcache ioremap_fullcache
+#define ioremap_fullcache ioremap_fullcache
static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
+#define memset_io memset_io
static inline void memset_io(volatile void __iomem *addr, unsigned char val,
int count)
{
__builtin_memset((void __force *) addr, val, count);
}
+#define memcpy_fromio memcpy_fromio
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
int count)
{
__builtin_memcpy(dst, (void __force *) src, count);
}
+#define memcpy_toio memcpy_toio
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
int count)
{
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 1605da48ebf2..49bd3266b4b1 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -22,7 +22,6 @@ extern int (*mach_hwclk)(int, struct rtc_time*);
extern unsigned int (*mach_get_ss)(void);
extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
-extern int (*mach_set_clock_mmss)(unsigned long);
extern void (*mach_reset)( void );
extern void (*mach_halt)( void );
extern void (*mach_power_off)( void );
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 9b840c03ebb7..08cee11180e6 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -57,7 +57,6 @@ struct mac_model
#define MAC_SCSI_IIFX 5
#define MAC_SCSI_DUO 6
#define MAC_SCSI_LC 7
-#define MAC_SCSI_LATE 8
#define MAC_IDE_NONE 0
#define MAC_IDE_QUADRA 1
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index e644c4daf540..6bbe52025de3 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -18,7 +18,7 @@ extern unsigned long memory_end;
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
#define __pa(vaddr) ((unsigned long)(vaddr))
-#define __va(paddr) ((void *)(paddr))
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 463572c4943f..e99993c57d6b 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -6,7 +6,7 @@
#undef DEBUG
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -19,7 +19,7 @@
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
-static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t flag, unsigned long attrs)
{
struct page *page, **map;
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
return addr;
}
-static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+void arch_dma_free(struct device *dev, size_t size, void *addr,
dma_addr_t handle, unsigned long attrs)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
@@ -73,8 +73,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr,
#include <asm/cacheflush.h>
-static void *m68k_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -89,7 +89,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
return ret;
}
-static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
@@ -97,8 +97,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
-static void m68k_dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -115,58 +115,6 @@ static void m68k_dma_sync_single_for_device(struct device *dev,
}
}
-static void m68k_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nents, enum dma_data_direction dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- dma_sync_single_for_device(dev, sg->dma_address, sg->length,
- dir);
- }
-}
-
-static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t handle = page_to_phys(page) + offset;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_sync_single_for_device(dev, handle, size, dir);
-
- return handle;
-}
-
-static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- dma_sync_single_for_device(dev, sg->dma_address, sg->length,
- dir);
- }
- return nents;
-}
-
-const struct dma_map_ops m68k_dma_ops = {
- .alloc = m68k_dma_alloc,
- .free = m68k_dma_free,
- .map_page = m68k_dma_map_page,
- .map_sg = m68k_dma_map_sg,
- .sync_single_for_device = m68k_dma_sync_single_for_device,
- .sync_sg_for_device = m68k_dma_sync_sg_for_device,
-};
-EXPORT_SYMBOL(m68k_dma_ops);
-
void arch_setup_pdev_archdata(struct platform_device *pdev)
{
if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE &&
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index f35e3ebd6331..5d3596c180f9 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
@@ -88,7 +89,6 @@ void (*mach_get_hardware_list) (struct seq_file *m);
/* machine dependent timer functions */
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
-int (*mach_set_clock_mmss) (unsigned long);
unsigned int (*mach_get_ss)(void);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
@@ -165,6 +165,8 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
be32_to_cpu(m->addr);
m68k_memory[m68k_num_memory].size =
be32_to_cpu(m->size);
+ memblock_add(m68k_memory[m68k_num_memory].addr,
+ m68k_memory[m68k_num_memory].size);
m68k_num_memory++;
} else
pr_warn("%s: too many memory chunks\n",
@@ -224,10 +226,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
-#ifndef CONFIG_SUN3
- int i;
-#endif
-
/* The bootinfo is located right after the kernel */
if (!CPU_IS_COLDFIRE)
m68k_parse_bootinfo((const struct bi_record *)_end);
@@ -356,14 +354,9 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifndef CONFIG_SUN3
- for (i = 1; i < m68k_num_memory; i++)
- free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
- m68k_memory[i].size);
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
- reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
- m68k_ramdisk.addr, m68k_ramdisk.size,
- BOOTMEM_DEFAULT);
+ memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index a98af1018201..cfd5475bfc31 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/initrd.h>
@@ -51,7 +52,6 @@ char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
-int (*mach_set_clock_mmss)(unsigned long);
int (*mach_hwclk) (int, struct rtc_time*);
/* machine dependent reboot functions */
@@ -86,8 +86,6 @@ void (*mach_power_off)(void);
void __init setup_arch(char **cmdline_p)
{
- int bootmap_size;
-
memory_start = PAGE_ALIGN(_ramstart);
memory_end = _ramend;
@@ -142,6 +140,8 @@ void __init setup_arch(char **cmdline_p)
pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
__bss_stop, memory_start, memory_start, memory_end);
+ memblock_add(memory_start, memory_end - memory_start);
+
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
@@ -158,23 +158,10 @@ void __init setup_arch(char **cmdline_p)
min_low_pfn = PFN_DOWN(memory_start);
max_pfn = max_low_pfn = PFN_DOWN(memory_end);
- bootmap_size = init_bootmem_node(
- NODE_DATA(0),
- min_low_pfn, /* map goes here */
- PFN_DOWN(PAGE_OFFSET),
- max_pfn);
- /*
- * Free the usable memory, we have to make sure we do not free
- * the bootmem bitmap so we then reserve it after freeing it :-)
- */
- free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
-
#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
if ((initrd_start > 0) && (initrd_start < initrd_end) &&
(initrd_end < memory_end))
- reserve_bootmem(initrd_start, initrd_end - initrd_start,
- BOOTMEM_DEFAULT);
+ memblock_reserve(initrd_start, initrd_end - initrd_start);
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
/*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index e522307db47c..b02d7254b73a 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -57,7 +57,6 @@ static unsigned long mac_orig_videoaddr;
/* Mac specific timer functions */
extern u32 mac_gettimeoffset(void);
extern int mac_hwclk(int, struct rtc_time *);
-extern int mac_set_clock_mmss(unsigned long);
extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
@@ -158,7 +157,6 @@ void __init config_mac(void)
mach_get_model = mac_get_model;
arch_gettimeoffset = mac_gettimeoffset;
mach_hwclk = mac_hwclk;
- mach_set_clock_mmss = mac_set_clock_mmss;
mach_reset = mac_reset;
mach_halt = mac_poweroff;
mach_power_off = mac_poweroff;
@@ -709,7 +707,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook 520",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_QUADRA,
- .scsi_type = MAC_SCSI_LATE,
+ .scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -943,18 +941,6 @@ static const struct resource mac_scsi_old_rsrc[] __initconst = {
},
};
-static const struct resource mac_scsi_late_rsrc[] __initconst = {
- {
- .flags = IORESOURCE_IRQ,
- .start = IRQ_MAC_SCSI,
- .end = IRQ_MAC_SCSI,
- }, {
- .flags = IORESOURCE_MEM,
- .start = 0x50010000,
- .end = 0x50011FFF,
- },
-};
-
static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
{
.flags = IORESOURCE_IRQ,
@@ -1064,11 +1050,6 @@ int __init mac_platform_init(void)
platform_device_register_simple("mac_scsi", 0,
mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
break;
- case MAC_SCSI_LATE:
- /* XXX PDMA support for PowerBook 500 series needs testing */
- platform_device_register_simple("mac_scsi", 0,
- mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
- break;
case MAC_SCSI_LC:
/* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
* Also from the Developer Notes for Classic II, LC III,
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index c68054361615..19e9d8eef1f2 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -26,33 +26,38 @@
#include <asm/machdep.h>
-/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
#define RTC_OFFSET 2082844800
static void (*rom_reset)(void);
#ifdef CONFIG_ADB_CUDA
-static long cuda_read_time(void)
+static time64_t cuda_read_time(void)
{
struct adb_request req;
- long time;
+ time64_t time;
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
while (!req.complete)
cuda_poll();
- time = (req.reply[3] << 24) | (req.reply[4] << 16) |
- (req.reply[5] << 8) | req.reply[6];
+ time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) |
+ (req.reply[5] << 8) | req.reply[6]);
+
return time - RTC_OFFSET;
}
-static void cuda_write_time(long data)
+static void cuda_write_time(time64_t time)
{
struct adb_request req;
+ u32 data = lower_32_bits(time + RTC_OFFSET);
- data += RTC_OFFSET;
if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
(data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -86,26 +91,27 @@ static void cuda_write_pram(int offset, __u8 data)
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU68K
-static long pmu_read_time(void)
+static time64_t pmu_read_time(void)
{
struct adb_request req;
- long time;
+ time64_t time;
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
while (!req.complete)
pmu_poll();
- time = (req.reply[1] << 24) | (req.reply[2] << 16) |
- (req.reply[3] << 8) | req.reply[4];
+ time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
+ (req.reply[3] << 8) | req.reply[4]);
+
return time - RTC_OFFSET;
}
-static void pmu_write_time(long data)
+static void pmu_write_time(time64_t time)
{
struct adb_request req;
+ u32 data = lower_32_bits(time + RTC_OFFSET);
- data += RTC_OFFSET;
if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
(data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -245,11 +251,11 @@ static void via_write_pram(int offset, __u8 data)
* is basically any machine with Mac II-style ADB.
*/
-static long via_read_time(void)
+static time64_t via_read_time(void)
{
union {
__u8 cdata[4];
- long idata;
+ __u32 idata;
} result, last_result;
int count = 1;
@@ -270,7 +276,7 @@ static long via_read_time(void)
via_pram_command(0x8D, &result.cdata[0]);
if (result.idata == last_result.idata)
- return result.idata - RTC_OFFSET;
+ return (time64_t)result.idata - RTC_OFFSET;
if (++count > 10)
break;
@@ -278,8 +284,8 @@ static long via_read_time(void)
last_result.idata = result.idata;
}
- pr_err("via_read_time: failed to read a stable value; got 0x%08lx then 0x%08lx\n",
- last_result.idata, result.idata);
+ pr_err("%s: failed to read a stable value; got 0x%08x then 0x%08x\n",
+ __func__, last_result.idata, result.idata);
return 0;
}
@@ -291,11 +297,11 @@ static long via_read_time(void)
* is basically any machine with Mac II-style ADB.
*/
-static void via_write_time(long time)
+static void via_write_time(time64_t time)
{
union {
__u8 cdata[4];
- long idata;
+ __u32 idata;
} data;
__u8 temp;
@@ -304,7 +310,7 @@ static void via_write_time(long time)
temp = 0x55;
via_pram_command(0x35, &temp);
- data.idata = time + RTC_OFFSET;
+ data.idata = lower_32_bits(time + RTC_OFFSET);
via_pram_command(0x01, &data.cdata[3]);
via_pram_command(0x05, &data.cdata[2]);
via_pram_command(0x09, &data.cdata[1]);
@@ -585,12 +591,15 @@ void mac_reset(void)
* This function translates seconds since 1970 into a proper date.
*
* Algorithm cribbed from glibc2.1, __offtime().
+ *
+ * This is roughly same as rtc_time64_to_tm(), which we should probably
+ * use here, but it's only available when CONFIG_RTC_LIB is enabled.
*/
#define SECS_PER_MINUTE (60)
#define SECS_PER_HOUR (SECS_PER_MINUTE * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-static void unmktime(unsigned long time, long offset,
+static void unmktime(time64_t time, long offset,
int *yearp, int *monp, int *dayp,
int *hourp, int *minp, int *secp)
{
@@ -602,11 +611,10 @@ static void unmktime(unsigned long time, long offset,
/* Leap years. */
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
- long int days, rem, y, wday, yday;
+ int days, rem, y, wday, yday;
const unsigned short int *ip;
- days = time / SECS_PER_DAY;
- rem = time % SECS_PER_DAY;
+ days = div_u64_rem(time, SECS_PER_DAY, &rem);
rem += offset;
while (rem < 0) {
rem += SECS_PER_DAY;
@@ -657,7 +665,7 @@ static void unmktime(unsigned long time, long offset,
int mac_hwclk(int op, struct rtc_time *t)
{
- unsigned long now;
+ time64_t now;
if (!op) { /* read */
switch (macintosh_config->adb_type) {
@@ -693,8 +701,8 @@ int mac_hwclk(int op, struct rtc_time *t)
__func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
- now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
- t->tm_hour, t->tm_min, t->tm_sec);
+ now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+ t->tm_hour, t->tm_min, t->tm_sec);
switch (macintosh_config->adb_type) {
case MAC_ADB_IOP:
@@ -719,19 +727,3 @@ int mac_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-/*
- * Set minutes/seconds in the hardware clock
- */
-
-int mac_set_clock_mmss (unsigned long nowtime)
-{
- struct rtc_time now;
-
- mac_hwclk(0, &now);
- now.tm_sec = nowtime % 60;
- now.tm_min = (nowtime / 60) % 60;
- mac_hwclk(1, &now);
-
- return 0;
-}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 8827b7f91402..38e2b272c220 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -71,7 +71,6 @@ void __init m68k_setup_node(int node)
pg_data_table[i] = pg_data_map + node;
}
#endif
- pg_data_map[node].bdata = bootmem_node_data + node;
node_set_online(node);
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 2925d795d71a..70dde040779b 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -153,31 +154,31 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
void __init cf_bootmem_alloc(void)
{
- unsigned long start_pfn;
unsigned long memstart;
/* _rambase and _ramend will be naturally page aligned */
m68k_memory[0].addr = _rambase;
m68k_memory[0].size = _ramend - _rambase;
+ memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+
/* compute total pages in system */
num_pages = PFN_DOWN(_ramend - _rambase);
/* page numbers */
memstart = PAGE_ALIGN(_ramstart);
min_low_pfn = PFN_DOWN(_rambase);
- start_pfn = PFN_DOWN(memstart);
max_pfn = max_low_pfn = PFN_DOWN(_ramend);
high_memory = (void *)_ramend;
+ /* Reserve kernel text/data/bss */
+ memblock_reserve(memstart, memstart - _rambase);
+
m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
module_fixup(NULL, __start_fixup, __stop_fixup);
- /* setup bootmem data */
+ /* setup node data */
m68k_setup_node(0);
- memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
- min_low_pfn, max_low_pfn);
- free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
}
/*
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index e490ecc7842c..4e17ecb5928a 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <asm/setup.h>
@@ -208,7 +209,7 @@ void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long min_addr, max_addr;
- unsigned long addr, size, end;
+ unsigned long addr;
int i;
#ifdef DEBUG
@@ -253,34 +254,20 @@ void __init paging_init(void)
min_low_pfn = availmem >> PAGE_SHIFT;
max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
- for (i = 0; i < m68k_num_memory; i++) {
- addr = m68k_memory[i].addr;
- end = addr + m68k_memory[i].size;
- m68k_setup_node(i);
- availmem = PAGE_ALIGN(availmem);
- availmem += init_bootmem_node(NODE_DATA(i),
- availmem >> PAGE_SHIFT,
- addr >> PAGE_SHIFT,
- end >> PAGE_SHIFT);
- }
+ /* Reserve kernel text/data/bss and the memory allocated in head.S */
+ memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
/*
* Map the physical memory available into the kernel virtual
- * address space. First initialize the bootmem allocator with
- * the memory we already mapped, so map_node() has something
- * to allocate.
+ * address space. Make sure memblock will not try to allocate
+ * pages beyond the memory we already mapped in head.S
*/
- addr = m68k_memory[0].addr;
- size = m68k_memory[0].size;
- free_bootmem_node(NODE_DATA(0), availmem,
- min(m68k_init_mapped_size, size) - (availmem - addr));
- map_node(0);
- if (size > m68k_init_mapped_size)
- free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
- size - m68k_init_mapped_size);
-
- for (i = 1; i < m68k_num_memory; i++)
+ memblock_set_bottom_up(true);
+
+ for (i = 0; i < m68k_num_memory; i++) {
+ m68k_setup_node(i);
map_node(i);
+ }
flush_tlb_all();
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index f8a710fd84cd..adea549d240e 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -40,7 +40,6 @@ static void mvme147_get_model(char *model);
extern void mvme147_sched_init(irq_handler_t handler);
extern u32 mvme147_gettimeoffset(void);
extern int mvme147_hwclk (int, struct rtc_time *);
-extern int mvme147_set_clock_mmss (unsigned long);
extern void mvme147_reset (void);
@@ -92,7 +91,6 @@ void __init config_mvme147(void)
mach_init_IRQ = mvme147_init_IRQ;
arch_gettimeoffset = mvme147_gettimeoffset;
mach_hwclk = mvme147_hwclk;
- mach_set_clock_mmss = mvme147_set_clock_mmss;
mach_reset = mvme147_reset;
mach_get_model = mvme147_get_model;
@@ -164,8 +162,3 @@ int mvme147_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-int mvme147_set_clock_mmss (unsigned long nowtime)
-{
- return 0;
-}
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 4ffd9ef98de4..6ee36a5b528d 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -46,7 +46,6 @@ static void mvme16x_get_model(char *model);
extern void mvme16x_sched_init(irq_handler_t handler);
extern u32 mvme16x_gettimeoffset(void);
extern int mvme16x_hwclk (int, struct rtc_time *);
-extern int mvme16x_set_clock_mmss (unsigned long);
extern void mvme16x_reset (void);
int bcd2int (unsigned char b);
@@ -280,7 +279,6 @@ void __init config_mvme16x(void)
mach_init_IRQ = mvme16x_init_IRQ;
arch_gettimeoffset = mvme16x_gettimeoffset;
mach_hwclk = mvme16x_hwclk;
- mach_set_clock_mmss = mvme16x_set_clock_mmss;
mach_reset = mvme16x_reset;
mach_get_model = mvme16x_get_model;
mach_get_hardware_list = mvme16x_get_hardware_list;
@@ -411,9 +409,3 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-int mvme16x_set_clock_mmss (unsigned long nowtime)
-{
- return 0;
-}
-
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 71c0867ecf20..96810d91da2b 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -43,7 +43,6 @@ extern void q40_sched_init(irq_handler_t handler);
static u32 q40_gettimeoffset(void);
static int q40_hwclk(int, struct rtc_time *);
static unsigned int q40_get_ss(void);
-static int q40_set_clock_mmss(unsigned long);
static int q40_get_rtc_pll(struct rtc_pll_info *pll);
static int q40_set_rtc_pll(struct rtc_pll_info *pll);
@@ -175,7 +174,6 @@ void __init config_q40(void)
mach_get_ss = q40_get_ss;
mach_get_rtc_pll = q40_get_rtc_pll;
mach_set_rtc_pll = q40_set_rtc_pll;
- mach_set_clock_mmss = q40_set_clock_mmss;
mach_reset = q40_reset;
mach_get_model = q40_get_model;
@@ -267,34 +265,6 @@ static unsigned int q40_get_ss(void)
return bcd2bin(Q40_RTC_SECS);
}
-/*
- * Set the minutes and seconds from seconds value 'nowtime'. Fail if
- * clock is out by > 30 minutes. Logic lifted from atari code.
- */
-
-static int q40_set_clock_mmss(unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-
- int rtc_minutes;
-
- rtc_minutes = bcd2bin(Q40_RTC_MINS);
-
- if ((rtc_minutes < real_minutes ?
- real_minutes - rtc_minutes :
- rtc_minutes - real_minutes) < 30) {
- Q40_RTC_CTRL |= Q40_RTC_WRITE;
- Q40_RTC_MINS = bin2bcd(real_minutes);
- Q40_RTC_SECS = bin2bcd(real_seconds);
- Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
- } else
- retval = -1;
-
- return retval;
-}
-
-
/* get and set PLL calibration of RTC clock */
#define Q40_RTC_PLL_MASK ((1<<5)-1)
#define Q40_RTC_PLL_SIGN (1<<5)
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 1d28d380e8cc..79a2bb857906 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -123,10 +123,6 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
availmem = memory_start;
m68k_setup_node(0);
- availmem += init_bootmem(start_page, num_pages);
- availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
-
- free_bootmem(__pa(availmem), memory_end - (availmem));
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 08c10c518f83..642a56e2a1ea 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -16,6 +16,7 @@ config MIPS
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select CPU_PM if CPU_IDLE
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
@@ -97,6 +98,7 @@ config MIPS_GENERIC
select HW_HAS_PCI
select IRQ_MIPS_CPU
select LIBFDT
+ select MIPS_AUTO_PFN_OFFSET
select MIPS_CPU_SCACHE
select MIPS_GIC
select MIPS_L1_CACHE_SHIFT_7
@@ -193,6 +195,7 @@ config ATH79
select CSRC_R4K
select DMA_NONCOHERENT
select GPIOLIB
+ select PINCTRL
select HAVE_CLK
select COMMON_CLK
select CLKDEV_LOOKUP
@@ -211,6 +214,8 @@ config ATH79
config BMIPS_GENERIC
bool "Broadcom Generic BMIPS kernel"
+ select ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+ select ARCH_HAS_PHYS_TO_DMA
select BOOT_RAW
select NO_EXCEPT_FILL
select USE_OF
@@ -438,7 +443,6 @@ config MACH_LOONGSON32
config MACH_LOONGSON64
bool "Loongson-2/3 family of machines"
- select ARCH_HAS_PHYS_TO_DMA
select SYS_SUPPORTS_ZBOOT
help
This enables the support of Loongson-2/3 family of machines.
@@ -662,11 +666,11 @@ config SGI_IP22
config SGI_IP27
bool "SGI IP27 (Origin200/2000)"
+ select ARCH_HAS_PHYS_TO_DMA
select FW_ARC
select FW_ARC64
select BOOT_ELF64
select DEFAULT_SGI_PARTITION
- select DMA_COHERENT
select SYS_HAS_EARLY_PRINTK
select HW_HAS_PCI
select NR_CPUS_DEFAULT_64
@@ -721,6 +725,7 @@ config SGI_IP28
config SGI_IP32
bool "SGI IP32 (O2)"
+ select ARCH_HAS_PHYS_TO_DMA
select FW_ARC
select FW_ARC32
select BOOT_ELF32
@@ -743,7 +748,6 @@ config SGI_IP32
config SIBYTE_CRHINE
bool "Sibyte BCM91120C-CRhine"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1120
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -753,7 +757,6 @@ config SIBYTE_CRHINE
config SIBYTE_CARMEL
bool "Sibyte BCM91120x-Carmel"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1120
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -763,7 +766,6 @@ config SIBYTE_CARMEL
config SIBYTE_CRHONE
bool "Sibyte BCM91125C-CRhone"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1125
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -774,7 +776,6 @@ config SIBYTE_CRHONE
config SIBYTE_RHONE
bool "Sibyte BCM91125E-Rhone"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1125H
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -784,7 +785,6 @@ config SIBYTE_RHONE
config SIBYTE_SWARM
bool "Sibyte BCM91250A-SWARM"
select BOOT_ELF32
- select DMA_COHERENT
select HAVE_PATA_PLATFORM
select SIBYTE_SB1250
select SWAP_IO_SPACE
@@ -797,7 +797,6 @@ config SIBYTE_SWARM
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
select BOOT_ELF32
- select DMA_COHERENT
select HAVE_PATA_PLATFORM
select SIBYTE_SB1250
select SWAP_IO_SPACE
@@ -809,7 +808,6 @@ config SIBYTE_LITTLESUR
config SIBYTE_SENTOSA
bool "Sibyte BCM91250E-Sentosa"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -819,7 +817,6 @@ config SIBYTE_SENTOSA
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
select BOOT_ELF32
- select DMA_COHERENT
select NR_CPUS_DEFAULT_4
select SIBYTE_BCM1x80
select SWAP_IO_SPACE
@@ -895,8 +892,8 @@ config CAVIUM_OCTEON_SOC
bool "Cavium Networks Octeon SoC based boards"
select CEVT_R4K
select ARCH_HAS_PHYS_TO_DMA
+ select HAS_RAPIDIO
select PHYS_ADDR_T_64BIT
- select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select EDAC_SUPPORT
@@ -945,7 +942,6 @@ config NLM_XLR_BOARD
select PHYS_ADDR_T_64BIT
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
- select DMA_COHERENT
select NR_CPUS_DEFAULT_32
select CEVT_R4K
select CSRC_R4K
@@ -973,7 +969,6 @@ config NLM_XLP_BOARD
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
- select DMA_COHERENT
select NR_CPUS_DEFAULT_32
select CEVT_R4K
select CSRC_R4K
@@ -992,7 +987,6 @@ config MIPS_PARAVIRT
bool "Para-Virtualized guest system"
select CEVT_R4K
select CSRC_R4K
- select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
@@ -1118,12 +1112,14 @@ config DMA_PERDEV_COHERENT
bool
select DMA_MAYBE_COHERENT
-config DMA_COHERENT
- bool
-
config DMA_NONCOHERENT
bool
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select NEED_DMA_MAP_STATE
+ select DMA_NONCOHERENT_MMAP
+ select DMA_NONCOHERENT_CACHE_SYNC
+ select DMA_NONCOHERENT_OPS
config SYS_HAS_EARLY_PRINTK
bool
@@ -1365,6 +1361,7 @@ choice
config CPU_LOONGSON3
bool "Loongson 3 CPU"
depends on SYS_HAS_CPU_LOONGSON3
+ select ARCH_HAS_PHYS_TO_DMA
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
@@ -1427,7 +1424,8 @@ config CPU_LOONGSON1B
select LEDS_GPIO_REGISTER
help
The Loongson 1B is a 32-bit SoC, which implements the MIPS32
- release 2 instruction set.
+ Release 1 instruction set and part of the MIPS32 Release 2
+ instruction set.
config CPU_LOONGSON1C
bool "Loongson 1C"
@@ -1436,7 +1434,8 @@ config CPU_LOONGSON1C
select LEDS_GPIO_REGISTER
help
The Loongson 1C is a 32-bit SoC, which implements the MIPS32
- release 2 instruction set.
+ Release 1 instruction set and part of the MIPS32 Release 2
+ instruction set.
config CPU_MIPS32_R1
bool "MIPS32 Release 1"
@@ -1831,11 +1830,12 @@ config CPU_LOONGSON2
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
+ select ARCH_HAS_PHYS_TO_DMA
config CPU_LOONGSON1
bool
select CPU_MIPS32
- select CPU_MIPSR2
+ select CPU_MIPSR1
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1979,12 +1979,6 @@ config SYS_HAS_CPU_XLR
config SYS_HAS_CPU_XLP
bool
-config MIPS_MALTA_PM
- depends on MIPS_MALTA
- depends on PCI
- bool
- default y
-
#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -2994,6 +2988,9 @@ config PGTABLE_LEVELS
default 3 if 64BIT && !PAGE_SIZE_64KB
default 2
+config MIPS_AUTO_PFN_OFFSET
+ bool
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -3115,10 +3112,13 @@ config ZONE_DMA32
source "drivers/pcmcia/Kconfig"
+config HAS_RAPIDIO
+ bool
+ default n
+
config RAPIDIO
tristate "RapidIO support"
- depends on PCI
- default n
+ depends on HAS_RAPIDIO || PCI
help
If you say Y here, the kernel will include drivers and
infrastructure code to support RapidIO interconnect devices.
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index e2122cca4ae2..5425df002a6b 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -122,12 +122,22 @@ cflags-y += -ffreestanding
# are used, so we kludge that here. A bug has been filed at
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413.
#
+# clang doesn't suffer from these issues and our checks against -dumpmachine
+# don't work so well when cross compiling, since without providing --target
+# clang's output will be based upon the build machine. So for clang we simply
+# unconditionally specify -EB or -EL as appropriate.
+#
+ifeq ($(cc-name),clang)
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -EL
+else
undef-all += -UMIPSEB -U_MIPSEB -U__MIPSEB -U__MIPSEB__
undef-all += -UMIPSEL -U_MIPSEL -U__MIPSEL -U__MIPSEL__
predef-be += -DMIPSEB -D_MIPSEB -D__MIPSEB -D__MIPSEB__
predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
+endif
cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
-fno-omit-frame-pointer
@@ -155,15 +165,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
-cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
- -Wa,-mips64 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
- -Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index fa75d75b5ba9..ddff9a02513d 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -34,6 +34,7 @@
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
@@ -60,7 +61,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index aab55aaf3d62..d625e6f99ae7 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -31,6 +31,7 @@
#include <mtd/mtd-abi.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <asm/mach-au1x00/au1xxx_eth.h>
@@ -58,7 +59,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index 0fc53e08a894..5f05b8714385 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -29,6 +29,7 @@
#include <linux/pm.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
@@ -55,7 +56,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c
index 203854ddd1bb..8d4b65c3268a 100644
--- a/arch/mips/alchemy/devboards/platform.c
+++ b/arch/mips/alchemy/devboards/platform.c
@@ -14,6 +14,7 @@
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
@@ -36,7 +37,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
if (alchemy_get_cputype() == ALCHEMY_CPU_AU1300)
alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c);
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
index 0137656107a9..6b64fd96dba8 100644
--- a/arch/mips/ar7/clock.c
+++ b/arch/mips/ar7/clock.c
@@ -476,3 +476,32 @@ void __init ar7_init_clocks(void)
/* adjust vbus clock rate */
vbus_clk.rate = bus_clk.rate / 2;
}
+
+/* dummy functions, should not be called */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ WARN_ON(clk);
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index dd53987a690f..2ec8d9ac91ec 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -25,6 +25,7 @@
#include <linux/string.h>
#include <linux/io.h>
#include <asm/bootinfo.h>
+#include <asm/setup.h>
#include <asm/mach-ar7/ar7.h>
#include <asm/mach-ar7/prom.h>
@@ -259,10 +260,9 @@ static inline void serial_out(int offset, int value)
writel(value, (void *)PORT(offset));
}
-int prom_putchar(char c)
+void prom_putchar(char c)
{
while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
;
serial_out(UART_TX, c);
- return 1;
}
diff --git a/arch/mips/ath25/Kconfig b/arch/mips/ath25/Kconfig
index 7070b4bcd01d..2c1dfd06c366 100644
--- a/arch/mips/ath25/Kconfig
+++ b/arch/mips/ath25/Kconfig
@@ -12,6 +12,7 @@ config SOC_AR2315
config PCI_AR2315
bool "Atheros AR2315 PCI controller support"
depends on SOC_AR2315
+ select ARCH_HAS_PHYS_TO_DMA
select HW_HAS_PCI
select PCI
default y
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
index 6d11ae581ea7..989e71015ee6 100644
--- a/arch/mips/ath25/board.c
+++ b/arch/mips/ath25/board.c
@@ -146,10 +146,10 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
pr_info("Fixing up empty mac addresses\n");
config->reset_config_gpio = 0xffff;
config->sys_led_gpio = 0xffff;
- random_ether_addr(config->wlan0_mac);
+ eth_random_addr(config->wlan0_mac);
config->wlan0_mac[0] &= ~0x06;
- random_ether_addr(config->enet0_mac);
- random_ether_addr(config->enet1_mac);
+ eth_random_addr(config->enet0_mac);
+ eth_random_addr(config->enet1_mac);
}
}
diff --git a/arch/mips/ath25/early_printk.c b/arch/mips/ath25/early_printk.c
index 36035b628161..d534761e9cda 100644
--- a/arch/mips/ath25/early_printk.c
+++ b/arch/mips/ath25/early_printk.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/serial_reg.h>
+#include <asm/setup.h>
#include "devices.h"
#include "ar2315_regs.h"
@@ -25,7 +26,7 @@ static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg)
return __raw_readl(base + 4 * reg);
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
static void __iomem *base;
@@ -38,7 +39,7 @@ void prom_putchar(unsigned char ch)
while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
;
- prom_uart_wr(base, UART_TX, ch);
+ prom_uart_wr(base, UART_TX, (unsigned char)ch);
while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
;
}
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 6b1000b6a6a6..cf9158e3c2d9 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -355,6 +355,91 @@ static void __init ar934x_clocks_init(void)
iounmap(dpll_base);
}
+static void __init qca953x_clocks_init(void)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(QCA953X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA953X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(QCA953X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA953X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += frac * (ref_rate >> 6) / ref_div;
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(QCA953X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA953X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4);
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(QCA953X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ cpu_rate = cpu_pll / (postdiv + 1);
+ else
+ cpu_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ddr_rate = ddr_pll / (postdiv + 1);
+ else
+ ddr_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_add_sys_clkdev("ref", ref_rate);
+ ath79_add_sys_clkdev("cpu", cpu_rate);
+ ath79_add_sys_clkdev("ddr", ddr_rate);
+ ath79_add_sys_clkdev("ahb", ahb_rate);
+
+ clk_add_alias("wdt", NULL, "ref", NULL);
+ clk_add_alias("uart", NULL, "ref", NULL);
+}
+
static void __init qca955x_clocks_init(void)
{
unsigned long ref_rate;
@@ -440,6 +525,110 @@ static void __init qca955x_clocks_init(void)
clk_add_alias("uart", NULL, "ref", NULL);
}
+static void __init qca956x_clocks_init(void)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, hfrac, lfrac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ /*
+ * QCA956x timer init workaround has to be applied right before setting
+ * up the clock. Else, there will be no jiffies
+ */
+ u32 misc;
+
+ misc = ath79_reset_rr(AR71XX_RESET_REG_MISC_INT_ENABLE);
+ misc |= MISC_INT_MIPS_SI_TIMERINT_MASK;
+ ath79_reset_wr(AR71XX_RESET_REG_MISC_INT_ENABLE, misc);
+
+ bootstrap = ath79_reset_rr(QCA956X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA956X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG_REFDIV_MASK;
+
+ pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG1_REG);
+ nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NINT_MASK;
+ hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK;
+ lfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+ cpu_pll += (hfrac >> 13) * ref_rate / ref_div;
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG_REFDIV_MASK;
+ pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG1_REG);
+ nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NINT_MASK;
+ hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK;
+ lfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+ ddr_pll += (hfrac >> 13) * ref_rate / ref_div;
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(QCA956X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL)
+ cpu_rate = ddr_pll / (postdiv + 1);
+ else
+ cpu_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL)
+ ddr_rate = cpu_pll / (postdiv + 1);
+ else
+ ddr_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_add_sys_clkdev("ref", ref_rate);
+ ath79_add_sys_clkdev("cpu", cpu_rate);
+ ath79_add_sys_clkdev("ddr", ddr_rate);
+ ath79_add_sys_clkdev("ahb", ahb_rate);
+
+ clk_add_alias("wdt", NULL, "ref", NULL);
+ clk_add_alias("uart", NULL, "ref", NULL);
+}
+
void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
@@ -450,8 +639,12 @@ void __init ath79_clocks_init(void)
ar933x_clocks_init();
else if (soc_is_ar934x())
ar934x_clocks_init();
+ else if (soc_is_qca953x())
+ qca953x_clocks_init();
else if (soc_is_qca955x())
qca955x_clocks_init();
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ qca956x_clocks_init();
else
BUG();
}
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index c782b10ddf50..cd6055f9e7a0 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -103,8 +103,12 @@ void ath79_device_reset_set(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca953x())
+ reg = QCA953X_RESET_REG_RESET_MODULE;
else if (soc_is_qca955x())
reg = QCA955X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ reg = QCA956X_RESET_REG_RESET_MODULE;
else
BUG();
@@ -131,8 +135,12 @@ void ath79_device_reset_clear(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca953x())
+ reg = QCA953X_RESET_REG_RESET_MODULE;
else if (soc_is_qca955x())
reg = QCA955X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ reg = QCA956X_RESET_REG_RESET_MODULE;
else
BUG();
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index d1adc59af5bf..4b1063117ef7 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -13,12 +13,13 @@
#include <linux/errno.h>
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
+#include <asm/setup.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ar933x_uart.h>
-static void (*_prom_putchar) (unsigned char);
+static void (*_prom_putchar)(char);
static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
{
@@ -33,31 +34,72 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-static void prom_putchar_ar71xx(unsigned char ch)
+static void prom_putchar_ar71xx(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
- __raw_writel(ch, base + UART_TX * 4);
+ __raw_writel((unsigned char)ch, base + UART_TX * 4);
prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
}
-static void prom_putchar_ar933x(unsigned char ch)
+static void prom_putchar_ar933x(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR933X_UART_BASE));
prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
AR933X_UART_DATA_TX_CSR);
- __raw_writel(AR933X_UART_DATA_TX_CSR | ch, base + AR933X_UART_DATA_REG);
+ __raw_writel(AR933X_UART_DATA_TX_CSR | (unsigned char)ch,
+ base + AR933X_UART_DATA_REG);
prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
AR933X_UART_DATA_TX_CSR);
}
-static void prom_putchar_dummy(unsigned char ch)
+static void prom_putchar_dummy(char ch)
{
/* nothing to do */
}
+static void prom_enable_uart(u32 id)
+{
+ void __iomem *gpio_base;
+ u32 uart_en;
+ u32 t;
+
+ switch (id) {
+ case REV_ID_MAJOR_AR71XX:
+ uart_en = AR71XX_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR7240:
+ case REV_ID_MAJOR_AR7241:
+ case REV_ID_MAJOR_AR7242:
+ uart_en = AR724X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR913X:
+ uart_en = AR913X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR9330:
+ case REV_ID_MAJOR_AR9331:
+ uart_en = AR933X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR9341:
+ case REV_ID_MAJOR_AR9342:
+ case REV_ID_MAJOR_AR9344:
+ /* TODO */
+ default:
+ return;
+ }
+
+ gpio_base = (void __iomem *)KSEG1ADDR(AR71XX_GPIO_BASE);
+ t = __raw_readl(gpio_base + AR71XX_GPIO_REG_FUNC);
+ t |= uart_en;
+ __raw_writel(t, gpio_base + AR71XX_GPIO_REG_FUNC);
+}
+
static void prom_putchar_init(void)
{
void __iomem *base;
@@ -76,8 +118,12 @@ static void prom_putchar_init(void)
case REV_ID_MAJOR_AR9341:
case REV_ID_MAJOR_AR9342:
case REV_ID_MAJOR_AR9344:
+ case REV_ID_MAJOR_QCA9533:
+ case REV_ID_MAJOR_QCA9533_V2:
case REV_ID_MAJOR_QCA9556:
case REV_ID_MAJOR_QCA9558:
+ case REV_ID_MAJOR_TP9343:
+ case REV_ID_MAJOR_QCA956X:
_prom_putchar = prom_putchar_ar71xx;
break;
@@ -88,11 +134,13 @@ static void prom_putchar_init(void)
default:
_prom_putchar = prom_putchar_dummy;
- break;
+ return;
}
+
+ prom_enable_uart(id);
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
if (!_prom_putchar)
prom_putchar_init();
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index f206dafbb0a3..4c7a93f4039a 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -40,6 +40,7 @@ static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
static void ath79_restart(char *command)
{
+ local_irq_disable();
ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
for (;;)
if (cpu_wait)
@@ -59,6 +60,7 @@ static void __init ath79_detect_sys_type(void)
u32 major;
u32 minor;
u32 rev = 0;
+ u32 ver = 1;
id = ath79_reset_rr(AR71XX_RESET_REG_REV_ID);
major = id & REV_ID_MAJOR_MASK;
@@ -151,6 +153,17 @@ static void __init ath79_detect_sys_type(void)
rev = id & AR934X_REV_ID_REVISION_MASK;
break;
+ case REV_ID_MAJOR_QCA9533_V2:
+ ver = 2;
+ ath79_soc_rev = 2;
+ /* drop through */
+
+ case REV_ID_MAJOR_QCA9533:
+ ath79_soc = ATH79_SOC_QCA9533;
+ chip = "9533";
+ rev = id & QCA953X_REV_ID_REVISION_MASK;
+ break;
+
case REV_ID_MAJOR_QCA9556:
ath79_soc = ATH79_SOC_QCA9556;
chip = "9556";
@@ -163,14 +176,30 @@ static void __init ath79_detect_sys_type(void)
rev = id & QCA955X_REV_ID_REVISION_MASK;
break;
+ case REV_ID_MAJOR_QCA956X:
+ ath79_soc = ATH79_SOC_QCA956X;
+ chip = "956X";
+ rev = id & QCA956X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_TP9343:
+ ath79_soc = ATH79_SOC_TP9343;
+ chip = "9343";
+ rev = id & QCA956X_REV_ID_REVISION_MASK;
+ break;
+
default:
panic("ath79: unknown SoC, id:0x%08x", id);
}
- ath79_soc_rev = rev;
+ if (ver == 1)
+ ath79_soc_rev = rev;
- if (soc_is_qca955x())
- sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s rev %u",
+ if (soc_is_qca953x() || soc_is_qca955x() || soc_is_qca956x())
+ sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s ver %u rev %u",
+ chip, ver, rev);
+ else if (soc_is_tp9343())
+ sprintf(ath79_sys_type, "Qualcomm Atheros TP%s rev %u",
chip, rev);
else
sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
diff --git a/arch/mips/bcm63xx/early_printk.c b/arch/mips/bcm63xx/early_printk.c
index 6092226a6d76..9e9ec27c282f 100644
--- a/arch/mips/bcm63xx/early_printk.c
+++ b/arch/mips/bcm63xx/early_printk.c
@@ -8,6 +8,7 @@
#include <bcm63xx_io.h>
#include <linux/serial_bcm63xx.h>
+#include <asm/setup.h>
static void wait_xfered(void)
{
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index 6dec30842b2f..3d13c77c125f 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -17,7 +17,7 @@
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <dma-coherence.h>
+#include <asm/bmips.h>
/*
* BCM338x has configurable address translation windows which allow the
@@ -40,7 +40,7 @@ static struct bmips_dma_range *bmips_dma_ranges;
#define FLUSH_RAC 0x100
-static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
{
struct bmips_dma_range *r;
@@ -52,17 +52,7 @@ static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
return pa;
}
-dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return bmips_phys_to_dma(dev, virt_to_phys(addr));
-}
-
-dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
- return bmips_phys_to_dma(dev, page_to_phys(page));
-}
-
-unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
struct bmips_dma_range *r;
@@ -74,6 +64,22 @@ unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr;
}
+void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+ void __iomem *cbr = BMIPS_GET_CBR();
+ u32 cfg;
+
+ if (boot_cpu_type() != CPU_BMIPS3300 &&
+ boot_cpu_type() != CPU_BMIPS4350 &&
+ boot_cpu_type() != CPU_BMIPS4380)
+ return;
+
+ /* Flush stale data out of the readahead cache */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+}
+
static int __init bmips_init_dma_ranges(void)
{
struct device_node *np =
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 3b6f687f177c..231fc5ce375e 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -202,13 +202,6 @@ void __init device_tree_init(void)
of_node_put(np);
}
-int __init plat_of_setup(void)
-{
- return __dt_register_buses("simple-bus", NULL);
-}
-
-arch_initcall(plat_of_setup);
-
static int __init plat_dev_init(void)
{
of_clk_init(NULL);
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index c22da16d67b8..35704c28a28b 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -105,28 +105,29 @@ $(obj)/uImage: $(obj)/uImage.$(suffix-y)
# Flattened Image Tree (.itb) images
#
-targets += vmlinux.itb
-targets += vmlinux.gz.itb
-targets += vmlinux.bz2.itb
-targets += vmlinux.lzma.itb
-targets += vmlinux.lzo.itb
-
ifeq ($(ADDR_BITS),32)
- itb_addr_cells = 1
+itb_addr_cells = 1
endif
ifeq ($(ADDR_BITS),64)
- itb_addr_cells = 2
+itb_addr_cells = 2
endif
+targets += vmlinux.its.S
+
quiet_cmd_its_cat = CAT $@
- cmd_its_cat = cat $^ >$@
+ cmd_its_cat = cat $(filter-out $(PHONY), $^) >$@
-$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS))
+$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE
$(call if_changed,its_cat)
+targets += vmlinux.its
+targets += vmlinux.gz.its
+targets += vmlinux.bz2.its
+targets += vmlinux.lzmo.its
+targets += vmlinux.lzo.its
+
quiet_cmd_cpp_its_S = ITS $@
- cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
- -D__ASSEMBLY__ \
+ cmd_cpp_its_S = $(CPP) -P -C -o $@ $< \
-DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
-DVMLINUX_BINARY="\"$(3)\"" \
-DVMLINUX_COMPRESSION="\"$(2)\"" \
@@ -136,19 +137,25 @@ quiet_cmd_cpp_its_S = ITS $@
-DADDR_CELLS=$(itb_addr_cells)
$(obj)/vmlinux.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
+ $(call if_changed,cpp_its_S,none,vmlinux.bin)
$(obj)/vmlinux.gz.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
+ $(call if_changed,cpp_its_S,gzip,vmlinux.bin.gz)
$(obj)/vmlinux.bz2.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
+ $(call if_changed,cpp_its_S,bzip2,vmlinux.bin.bz2)
$(obj)/vmlinux.lzma.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
+ $(call if_changed,cpp_its_S,lzma,vmlinux.bin.lzma)
$(obj)/vmlinux.lzo.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
+ $(call if_changed,cpp_its_S,lzo,vmlinux.bin.lzo)
+
+targets += vmlinux.itb
+targets += vmlinux.gz.itb
+targets += vmlinux.bz2.itb
+targets += vmlinux.lzma.itb
+targets += vmlinux.lzo.itb
quiet_cmd_itb-image = ITB $@
cmd_itb-image = \
@@ -162,14 +169,5 @@ quiet_cmd_itb-image = ITB $@
$(obj)/vmlinux.itb: $(obj)/vmlinux.its $(obj)/vmlinux.bin FORCE
$(call if_changed,itb-image,$<)
-$(obj)/vmlinux.gz.itb: $(obj)/vmlinux.gz.its $(obj)/vmlinux.bin.gz FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.bz2.itb: $(obj)/vmlinux.bz2.its $(obj)/vmlinux.bin.bz2 FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.lzma.itb: $(obj)/vmlinux.lzma.its $(obj)/vmlinux.bin.lzma FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.lzo.itb: $(obj)/vmlinux.lzo.its $(obj)/vmlinux.bin.lzo FORCE
+$(obj)/vmlinux.%.itb: $(obj)/vmlinux.%.its $(obj)/vmlinux.bin.% FORCE
$(call if_changed,itb-image,$<)
diff --git a/arch/mips/boot/compressed/uart-prom.c b/arch/mips/boot/compressed/uart-prom.c
index d6f0fee0a151..a8a0a32e05d1 100644
--- a/arch/mips/boot/compressed/uart-prom.c
+++ b/arch/mips/boot/compressed/uart-prom.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-
-extern void prom_putchar(unsigned char ch);
+#include <asm/setup.h>
void putc(char c)
{
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index aa4e8f75ff5d..ce93d57f1b4d 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -155,6 +155,25 @@
};
};
+ spi_gpio {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-chipselects = <2>;
+
+ gpio-miso = <&gpe 14 0>;
+ gpio-sck = <&gpe 15 0>;
+ gpio-mosi = <&gpe 17 0>;
+ cs-gpios = <&gpe 16 0
+ &gpe 18 0>;
+
+ spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
uart0: serial@10030000 {
compatible = "ingenic,jz4780-uart";
reg = <0x10030000 0x100>;
diff --git a/arch/mips/boot/dts/mscc/Makefile b/arch/mips/boot/dts/mscc/Makefile
index 3c6aed9f5439..9a9bb7ea0503 100644
--- a/arch/mips/boot/dts/mscc/Makefile
+++ b/arch/mips/boot/dts/mscc/Makefile
@@ -1,3 +1,3 @@
-dtb-$(CONFIG_LEGACY_BOARD_OCELOT) += ocelot_pcb123.dtb
+dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb
obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index 4f33dbc67348..f7eb612b46ba 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -91,6 +91,17 @@
status = "disabled";
};
+ spi: spi@101000 {
+ compatible = "mscc,ocelot-spi", "snps,dw-apb-ssi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x101000 0x100>, <0x3c 0x18>;
+ interrupts = <9>;
+ clocks = <&ahb_clk>;
+
+ status = "disabled";
+ };
+
switch@1010000 {
compatible = "mscc,vsc7514-switch";
reg = <0x1010000 0x10000>,
@@ -168,6 +179,9 @@
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&gpio 0 0 22>;
+ interrupt-controller;
+ interrupts = <13>;
+ #interrupt-cells = <2>;
uart_pins: uart-pins {
pins = "GPIO_6", "GPIO_7";
@@ -178,13 +192,18 @@
pins = "GPIO_12", "GPIO_13";
function = "uart2";
};
+
+ miim1: miim1 {
+ pins = "GPIO_14", "GPIO_15";
+ function = "miim1";
+ };
};
mdio0: mdio@107009c {
#address-cells = <1>;
#size-cells = <0>;
compatible = "mscc,ocelot-miim";
- reg = <0x107009c 0x36>, <0x10700f0 0x8>;
+ reg = <0x107009c 0x24>, <0x10700f0 0x8>;
interrupts = <14>;
status = "disabled";
@@ -201,5 +220,16 @@
reg = <3>;
};
};
+
+ mdio1: mdio@10700c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "mscc,ocelot-miim";
+ reg = <0x10700c0 0x24>;
+ interrupts = <15>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&miim1>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
index 4ccd65379059..2266027759f9 100644
--- a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
+++ b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
@@ -26,6 +26,16 @@
status = "okay";
};
+&spi {
+ status = "okay";
+
+ flash@0 {
+ compatible = "macronix,mx25l25635f", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
&mdio0 {
status = "okay";
};
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 1fe561c5f90e..61dcfa5b6ca7 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -161,7 +161,7 @@
usb_phy: usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index 3931033e47c8..7fccf6357225 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -22,11 +22,10 @@
};
gpio-keys {
- compatible = "gpio-keys-polled";
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <20>;
button@0 {
label = "reset";
linux,code = <KEY_RESTART>;
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index efd5f0722206..2bae201aa365 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -146,7 +146,7 @@
usb_phy: usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
index d4e4502daaa8..e7af2cf5f4c1 100644
--- a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
@@ -29,11 +29,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "reset";
diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
index 4f95ccf17c4c..d38aa73f1a2e 100644
--- a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
@@ -47,11 +47,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "jumpstart";
diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts
index f70f79c4d0d5..11778abacf66 100644
--- a/arch/mips/boot/dts/qca/ar9331_omega.dts
+++ b/arch/mips/boot/dts/qca/ar9331_omega.dts
@@ -29,11 +29,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "reset";
diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
index 748131aea22e..c8290d36cfbe 100644
--- a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
+++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
@@ -47,11 +47,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "wps";
diff --git a/arch/mips/boot/ecoff.h b/arch/mips/boot/ecoff.h
index b3e73c22c345..5be79ebfc3f8 100644
--- a/arch/mips/boot/ecoff.h
+++ b/arch/mips/boot/ecoff.h
@@ -2,14 +2,17 @@
/*
* Some ECOFF definitions.
*/
+
+#include <stdint.h>
+
typedef struct filehdr {
- unsigned short f_magic; /* magic number */
- unsigned short f_nscns; /* number of sections */
- long f_timdat; /* time & date stamp */
- long f_symptr; /* file pointer to symbolic header */
- long f_nsyms; /* sizeof(symbolic hdr) */
- unsigned short f_opthdr; /* sizeof(optional hdr) */
- unsigned short f_flags; /* flags */
+ uint16_t f_magic; /* magic number */
+ uint16_t f_nscns; /* number of sections */
+ int32_t f_timdat; /* time & date stamp */
+ int32_t f_symptr; /* file pointer to symbolic header */
+ int32_t f_nsyms; /* sizeof(symbolic hdr) */
+ uint16_t f_opthdr; /* sizeof(optional hdr) */
+ uint16_t f_flags; /* flags */
} FILHDR;
#define FILHSZ sizeof(FILHDR)
@@ -18,32 +21,32 @@ typedef struct filehdr {
typedef struct scnhdr {
char s_name[8]; /* section name */
- long s_paddr; /* physical address, aliased s_nlib */
- long s_vaddr; /* virtual address */
- long s_size; /* section size */
- long s_scnptr; /* file ptr to raw data for section */
- long s_relptr; /* file ptr to relocation */
- long s_lnnoptr; /* file ptr to gp histogram */
- unsigned short s_nreloc; /* number of relocation entries */
- unsigned short s_nlnno; /* number of gp histogram entries */
- long s_flags; /* flags */
+ int32_t s_paddr; /* physical address, aliased s_nlib */
+ int32_t s_vaddr; /* virtual address */
+ int32_t s_size; /* section size */
+ int32_t s_scnptr; /* file ptr to raw data for section */
+ int32_t s_relptr; /* file ptr to relocation */
+ int32_t s_lnnoptr; /* file ptr to gp histogram */
+ uint16_t s_nreloc; /* number of relocation entries */
+ uint16_t s_nlnno; /* number of gp histogram entries */
+ int32_t s_flags; /* flags */
} SCNHDR;
#define SCNHSZ sizeof(SCNHDR)
-#define SCNROUND ((long)16)
+#define SCNROUND ((int32_t)16)
typedef struct aouthdr {
- short magic; /* see above */
- short vstamp; /* version stamp */
- long tsize; /* text size in bytes, padded to DW bdry*/
- long dsize; /* initialized data " " */
- long bsize; /* uninitialized data " " */
- long entry; /* entry pt. */
- long text_start; /* base of text used for this file */
- long data_start; /* base of data used for this file */
- long bss_start; /* base of bss used for this file */
- long gprmask; /* general purpose register mask */
- long cprmask[4]; /* co-processor register masks */
- long gp_value; /* the gp value used for this object */
+ int16_t magic; /* see above */
+ int16_t vstamp; /* version stamp */
+ int32_t tsize; /* text size in bytes, padded to DW bdry*/
+ int32_t dsize; /* initialized data " " */
+ int32_t bsize; /* uninitialized data " " */
+ int32_t entry; /* entry pt. */
+ int32_t text_start; /* base of text used for this file */
+ int32_t data_start; /* base of data used for this file */
+ int32_t bss_start; /* base of bss used for this file */
+ int32_t gprmask; /* general purpose register mask */
+ int32_t cprmask[4]; /* co-processor register masks */
+ int32_t gp_value; /* the gp value used for this object */
} AOUTHDR;
#define AOUTHSZ sizeof(AOUTHDR)
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index 266c8137e859..6972b97235da 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -43,6 +43,8 @@
#include <limits.h>
#include <netinet/in.h>
#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
#include "ecoff.h"
@@ -55,8 +57,8 @@
/* -------------------------------------------------------------------- */
struct sect {
- unsigned long vaddr;
- unsigned long len;
+ uint32_t vaddr;
+ uint32_t len;
};
int *symTypeTable;
@@ -153,16 +155,16 @@ static char *saveRead(int file, off_t offset, off_t len, char *name)
}
#define swab16(x) \
- ((unsigned short)( \
- (((unsigned short)(x) & (unsigned short)0x00ffU) << 8) | \
- (((unsigned short)(x) & (unsigned short)0xff00U) >> 8) ))
+ ((uint16_t)( \
+ (((uint16_t)(x) & (uint16_t)0x00ffU) << 8) | \
+ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8) ))
#define swab32(x) \
((unsigned int)( \
- (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
- (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \
- (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \
- (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) ))
+ (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
+ (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
+ (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
+ (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24) ))
static void convert_elf_hdr(Elf32_Ehdr * e)
{
@@ -274,7 +276,7 @@ int main(int argc, char *argv[])
struct aouthdr eah;
struct scnhdr esecs[6];
int infile, outfile;
- unsigned long cur_vma = ULONG_MAX;
+ uint32_t cur_vma = UINT32_MAX;
int addflag = 0;
int nosecs;
@@ -518,7 +520,7 @@ int main(int argc, char *argv[])
for (i = 0; i < nosecs; i++) {
printf
- ("Section %d: %s phys %lx size %lx file offset %lx\n",
+ ("Section %d: %s phys %"PRIx32" size %"PRIx32"\t file offset %"PRIx32"\n",
i, esecs[i].s_name, esecs[i].s_paddr,
esecs[i].s_size, esecs[i].s_scnptr);
}
@@ -564,17 +566,16 @@ int main(int argc, char *argv[])
the section can be loaded before copying. */
if (ph[i].p_type == PT_LOAD && ph[i].p_filesz) {
if (cur_vma != ph[i].p_vaddr) {
- unsigned long gap =
- ph[i].p_vaddr - cur_vma;
+ uint32_t gap = ph[i].p_vaddr - cur_vma;
char obuf[1024];
if (gap > 65536) {
fprintf(stderr,
- "Intersegment gap (%ld bytes) too large.\n",
+ "Intersegment gap (%"PRId32" bytes) too large.\n",
gap);
exit(1);
}
fprintf(stderr,
- "Warning: %ld byte intersegment gap.\n",
+ "Warning: %d byte intersegment gap.\n",
gap);
memset(obuf, 0, sizeof obuf);
while (gap) {
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 7b335ab21697..236833be6fbe 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -11,9 +11,7 @@
* Copyright (C) 2010 Cavium Networks, Inc.
*/
#include <linux/dma-direct.h>
-#include <linux/scatterlist.h>
#include <linux/bootmem.h>
-#include <linux/export.h>
#include <linux/swiotlb.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -24,10 +22,16 @@
#include <asm/octeon/octeon.h>
#ifdef CONFIG_PCI
+#include <linux/pci.h>
#include <asm/octeon/pci-octeon.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
+struct octeon_dma_map_ops {
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+};
+
static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
{
if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
@@ -61,6 +65,11 @@ static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr;
}
+static const struct octeon_dma_map_ops octeon_gen1_ops = {
+ .phys_to_dma = octeon_gen1_phys_to_dma,
+ .dma_to_phys = octeon_gen1_dma_to_phys,
+};
+
static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return octeon_hole_phys_to_dma(paddr);
@@ -71,6 +80,11 @@ static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
return octeon_hole_dma_to_phys(daddr);
}
+static const struct octeon_dma_map_ops octeon_gen2_ops = {
+ .phys_to_dma = octeon_gen2_phys_to_dma,
+ .dma_to_phys = octeon_gen2_dma_to_phys,
+};
+
static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
@@ -93,6 +107,11 @@ static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr;
}
+static const struct octeon_dma_map_ops octeon_big_ops = {
+ .phys_to_dma = octeon_big_phys_to_dma,
+ .dma_to_phys = octeon_big_dma_to_phys,
+};
+
static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
phys_addr_t paddr)
{
@@ -121,105 +140,51 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
return daddr;
}
-#endif /* CONFIG_PCI */
-
-static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
- direction, attrs);
- mb();
-
- return daddr;
-}
-
-static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, unsigned long attrs)
-{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
- mb();
- return r;
-}
-
-static void octeon_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
- mb();
-}
-
-static void octeon_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
- mb();
-}
-
-static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
-
- mb();
+static const struct octeon_dma_map_ops octeon_small_ops = {
+ .phys_to_dma = octeon_small_phys_to_dma,
+ .dma_to_phys = octeon_small_dma_to_phys,
+};
- return ret;
-}
+static const struct octeon_dma_map_ops *octeon_pci_dma_ops;
-static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
+void __init octeon_pci_dma_init(void)
{
- return daddr;
+ switch (octeon_dma_bar_type) {
+ case OCTEON_DMA_BAR_TYPE_PCIE:
+ octeon_pci_dma_ops = &octeon_gen1_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_PCIE2:
+ octeon_pci_dma_ops = &octeon_gen2_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_BIG:
+ octeon_pci_dma_ops = &octeon_big_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_SMALL:
+ octeon_pci_dma_ops = &octeon_small_ops;
+ break;
+ default:
+ BUG();
+ }
}
-
-struct octeon_dma_map_ops {
- const struct dma_map_ops dma_map_ops;
- dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
- phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
-};
+#endif /* CONFIG_PCI */
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
- struct octeon_dma_map_ops,
- dma_map_ops);
-
- return ops->phys_to_dma(dev, paddr);
+#ifdef CONFIG_PCI
+ if (dev && dev_is_pci(dev))
+ return octeon_pci_dma_ops->phys_to_dma(dev, paddr);
+#endif
+ return paddr;
}
-EXPORT_SYMBOL(__phys_to_dma);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{
- struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
- struct octeon_dma_map_ops,
- dma_map_ops);
-
- return ops->dma_to_phys(dev, daddr);
+#ifdef CONFIG_PCI
+ if (dev && dev_is_pci(dev))
+ return octeon_pci_dma_ops->dma_to_phys(dev, daddr);
+#endif
+ return daddr;
}
-EXPORT_SYMBOL(__dma_to_phys);
-
-static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
- .dma_map_ops = {
- .alloc = octeon_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = octeon_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = octeon_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = octeon_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = octeon_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported
- },
- .phys_to_dma = octeon_unity_phys_to_dma,
- .dma_to_phys = octeon_unity_dma_to_phys
-};
char *octeon_swiotlb;
@@ -283,52 +248,4 @@ void __init plat_swiotlb_setup(void)
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
-
- mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
}
-
-#ifdef CONFIG_PCI
-static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
- .dma_map_ops = {
- .alloc = octeon_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = octeon_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = octeon_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = octeon_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = octeon_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported
- },
-};
-
-const struct dma_map_ops *octeon_pci_dma_map_ops;
-
-void __init octeon_pci_dma_init(void)
-{
- switch (octeon_dma_bar_type) {
- case OCTEON_DMA_BAR_TYPE_PCIE2:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_PCIE:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_BIG:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_SMALL:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
- break;
- default:
- BUG();
- }
- octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
-}
-#endif /* CONFIG_PCI */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index d18ed5af62f4..b8898e2b8a6f 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -42,9 +42,6 @@
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-dbg-defs.h>
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_asxx_enable(int block);
-
/**
* Probe RGMII ports and determine the number present
*
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
index 578283350776..a176358c5a21 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -39,10 +39,7 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pcsx-defs.h>
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+#include <asm/octeon/cvmx-pcsxx-defs.h>
/**
* Perform initialization required only once for an SGMII port.
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
index ef16aa00167b..2a574d272671 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -25,10 +25,6 @@
* Contact Cavium Networks for more information
***********************license end**************************************/
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_spxx_int_msk_enable(int index);
-void __cvmx_interrupt_stxx_int_msk_enable(int index);
-
/*
* Functions for SPI initialization, configuration,
* and monitoring.
@@ -41,6 +37,8 @@ void __cvmx_interrupt_stxx_int_msk_enable(int index);
#include <asm/octeon/cvmx-pip-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
/*
* CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 19d54e02c185..2bb6912a580d 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -39,12 +39,9 @@
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
#include <asm/octeon/cvmx-pcsxx-defs.h>
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
-
int __cvmx_helper_xaui_enumerate(int interface)
{
union cvmx_gmxx_hg2_control gmx_hg2_control;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index b3aec101a65d..8272d8c648ca 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -814,7 +814,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
if (cpumask_test_cpu(cpu, dest) && enable_one) {
- enable_one = 0;
+ enable_one = false;
__set_bit(cd->bit, pen);
} else {
__clear_bit(cd->bit, pen);
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 8505db478904..807cadaf554e 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
return 0;
pd = of_find_device_by_node(ehci_node);
+ of_node_put(ehci_node);
if (!pd)
return 0;
@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
return 0;
pd = of_find_device_by_node(ohci_node);
+ of_node_put(ohci_node);
if (!pd)
return 0;
@@ -1067,6 +1069,6 @@ end_led:
static int __init octeon_publish_devices(void)
{
- return of_platform_bus_probe(NULL, octeon_ids, NULL);
+ return of_platform_populate(NULL, octeon_ids, NULL, NULL);
}
arch_initcall(octeon_publish_devices);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index a8034d0dcade..c2426232db06 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -36,6 +36,7 @@
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
@@ -1108,7 +1109,7 @@ void __init plat_mem_setup(void)
* Emit one character to the boot UART. Exported for use by the
* watchdog timer.
*/
-int prom_putchar(char c)
+void prom_putchar(char c)
{
uint64_t lsrval;
@@ -1119,7 +1120,6 @@ int prom_putchar(char c)
/* Write the byte */
cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
- return 1;
}
EXPORT_SYMBOL(prom_putchar);
@@ -1154,11 +1154,7 @@ void __init prom_free_prom_memory(void)
}
void __init octeon_fill_mac_addresses(void);
-int octeon_prune_device_tree(void);
-extern const char __appended_dtb;
-extern const char __dtb_octeon_3xxx_begin;
-extern const char __dtb_octeon_68xx_begin;
void __init device_tree_init(void)
{
const void *fdt;
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index be23fd25eeaa..030ff9c205fb 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -92,6 +92,8 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_JZ4780=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_INGENIC=y
# CONFIG_HWMON is not set
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 26b1cd5ffbf5..684c9dcba126 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -43,9 +43,6 @@ CONFIG_NETFILTER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_SCSI=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index df8a9a15ca83..81058295d35f 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -317,6 +317,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 14df9ef15d40..5c10cddc39d3 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -328,6 +328,7 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index 25092e344574..bb694f5065f1 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -330,6 +330,7 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 210bf609f785..5b5306b80576 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -133,6 +133,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index e5934aa98397..85543599448f 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -133,6 +133,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index cb2ca11c1789..067bb84ac916 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -134,6 +134,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index be29fcec69fc..dfc78c3172a3 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -137,6 +137,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 40462d4c90a0..50a2288c69f8 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -132,6 +132,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 4e50176cb3df..99a19cf5f9ba 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -326,6 +326,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/fw/arc/arc_con.c b/arch/mips/fw/arc/arc_con.c
index 769d4b9ac82e..365e3913231e 100644
--- a/arch/mips/fw/arc/arc_con.c
+++ b/arch/mips/fw/arc/arc_con.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/fs.h>
+#include <asm/setup.h>
#include <asm/sgialib.h>
static void prom_console_write(struct console *co, const char *s,
diff --git a/arch/mips/fw/arc/promlib.c b/arch/mips/fw/arc/promlib.c
index 7e8ba5ce95be..be381307fbb0 100644
--- a/arch/mips/fw/arc/promlib.c
+++ b/arch/mips/fw/arc/promlib.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <asm/sgialib.h>
#include <asm/bcache.h>
+#include <asm/setup.h>
/*
* IP22 boardcache is not compatible with board caches. Thus we disable it
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 6aa264b9856a..8772617b64ce 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -19,6 +19,7 @@
#include <asm/mipsprom.h>
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
+#include <asm/setup.h>
/* special SNI prom calls */
/*
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
index ba9b2c8cce68..08e33c6b2539 100644
--- a/arch/mips/generic/Kconfig
+++ b/arch/mips/generic/Kconfig
@@ -35,13 +35,13 @@ config LEGACY_BOARD_OCELOT
depends on LEGACY_BOARD_SEAD3=n
select LEGACY_BOARDS
select MSCC_OCELOT
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
config MSCC_OCELOT
bool
select GPIOLIB
select MSCC_OCELOT_IRQ
- select SYS_HAS_EARLY_PRINTK
- select USE_GENERIC_EARLY_PRINTK_8250
comment "FIT/UHI Boards"
@@ -65,6 +65,14 @@ config FIT_IMAGE_FDT_XILFPGA
Enable this to include the FDT for the MIPSfpga platform
from Imagination Technologies in the FIT kernel image.
+config FIT_IMAGE_FDT_OCELOT_PCB123
+ bool "Include FDT for Microsemi Ocelot PCB123"
+ select MSCC_OCELOT
+ help
+ Enable this to include the FDT for the Ocelot PCB123 platform
+ from Microsemi in the FIT kernel image.
+ This requires u-boot on the platform.
+
config VIRT_BOARD_RANCHU
bool "Support Ranchu platform for Android emulator"
help
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
index 0dd0d5d460a5..879cb80396c8 100644
--- a/arch/mips/generic/Platform
+++ b/arch/mips/generic/Platform
@@ -16,4 +16,5 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
its-y := vmlinux.its.S
its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_OCELOT_PCB123) += board-ocelot_pcb123.its.S
its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S
diff --git a/arch/mips/generic/board-ocelot_pcb123.its.S b/arch/mips/generic/board-ocelot_pcb123.its.S
new file mode 100644
index 000000000000..5a7d5e1c878a
--- /dev/null
+++ b/arch/mips/generic/board-ocelot_pcb123.its.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/ {
+ images {
+ fdt@ocelot_pcb123 {
+ description = "MSCC Ocelot PCB123 Device Tree";
+ data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash@0 {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf@ocelot_pcb123 {
+ description = "Ocelot Linux kernel";
+ kernel = "kernel@0";
+ fdt = "fdt@ocelot_pcb123";
+ };
+ };
+};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 5ba6fcc26fa7..a106f8113842 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/fw/fw.h>
@@ -204,22 +203,11 @@ void __init arch_init_irq(void)
"mti,cpu-interrupt-controller");
if (!cpu_has_veic && !intc_node)
mips_cpu_irq_init();
+ of_node_put(intc_node);
irqchip_init();
}
-static int __init publish_devices(void)
-{
- if (!of_have_populated_dt())
- panic("Device-tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(publish_devices);
-
void __init prom_free_prom_memory(void)
{
}
diff --git a/arch/mips/generic/yamon-dt.c b/arch/mips/generic/yamon-dt.c
index b408dac722ac..7ba4ad5cc1d6 100644
--- a/arch/mips/generic/yamon-dt.c
+++ b/arch/mips/generic/yamon-dt.c
@@ -28,8 +28,6 @@ __init int yamon_dt_append_cmdline(void *fdt)
/* find or add chosen node */
chosen_off = fdt_path_offset(fdt, "/chosen");
if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_path_offset(fdt, "/chosen@0");
- if (chosen_off == -FDT_ERR_NOTFOUND)
chosen_off = fdt_add_subnode(fdt, 0, "chosen");
if (chosen_off < 0) {
pr_err("Unable to find or add DT chosen node: %d\n",
@@ -221,8 +219,6 @@ __init int yamon_dt_serial_config(void *fdt)
/* find or add chosen node */
chosen_off = fdt_path_offset(fdt, "/chosen");
if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_path_offset(fdt, "/chosen@0");
- if (chosen_off == -FDT_ERR_NOTFOUND)
chosen_off = fdt_add_subnode(fdt, 0, "chosen");
if (chosen_off < 0) {
pr_err("Unable to find or add DT chosen node: %d\n",
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 45d541baf359..58351e48421e 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -8,6 +8,7 @@ generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 0ab176bdb8e8..0269b3de8b51 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -22,6 +22,17 @@
#include <asm/cmpxchg.h>
#include <asm/war.h>
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#if R10000_LLSC_WAR
+# define __scbeqz "beqzl"
+#else
+# define __scbeqz "beqz"
+#endif
+
#define ATOMIC_INIT(i) { (i) }
/*
@@ -44,31 +55,18 @@
#define ATOMIC_OP(op, c_op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %0, %1 # atomic_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %0, %1 # atomic_" #op "\n" \
- " " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!temp)); \
} else { \
unsigned long flags; \
\
@@ -83,36 +81,20 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %1, %2 # atomic_" #op "_return \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!result)); \
- \
- result = temp; result c_op i; \
} else { \
unsigned long flags; \
\
@@ -131,36 +113,20 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %1, %2 # atomic_fetch_" #op " \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!result)); \
- \
- result = temp; \
} else { \
unsigned long flags; \
\
@@ -218,38 +184,17 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
smp_mb__before_llsc();
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_sub_if_positive\n"
- " subu %0, %1, %3 \n"
- " bltz %0, 1f \n"
- " sc %0, %2 \n"
- " .set noreorder \n"
- " beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set reorder \n"
- "1: \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp),
- "+" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
+ if (kernel_uses_llsc) {
int temp;
__asm__ __volatile__(
" .set "MIPS_ISA_LEVEL" \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
+ " move %1, %0 \n"
" bltz %0, 1f \n"
- " sc %0, %2 \n"
- " .set noreorder \n"
- " beqz %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set reorder \n"
+ " sc %1, %2 \n"
+ "\t" __scbeqz " %1, 1b \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
@@ -274,97 +219,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-/*
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-/*
- * atomic_dec_and_test - decrement by 1 and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
/*
* atomic_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*/
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
-/*
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1, (v))
-
-/*
- * atomic_dec - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1, (v))
-
-/*
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
#ifdef CONFIG_64BIT
#define ATOMIC64_INIT(i) { (i) }
@@ -386,31 +246,18 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_OP(op, c_op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %0, %1 # atomic64_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %0, %1 # atomic64_" #op "\n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!temp)); \
} else { \
unsigned long flags; \
\
@@ -425,37 +272,20 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "=" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
- : "memory"); \
- } while (unlikely(!result)); \
- \
- result = temp; result c_op i; \
} else { \
unsigned long flags; \
\
@@ -478,33 +308,16 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %1, %2 # atomic64_fetch_" #op "\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "=" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
- : "memory"); \
- } while (unlikely(!result)); \
- \
- result = temp; \
} else { \
unsigned long flags; \
\
@@ -563,38 +376,17 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
smp_mb__before_llsc();
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_sub_if_positive\n"
- " dsubu %0, %1, %3 \n"
- " bltz %0, 1f \n"
- " scd %0, %2 \n"
- " .set noreorder \n"
- " beqzl %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set reorder \n"
- "1: \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp),
- "=" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
+ if (kernel_uses_llsc) {
long temp;
__asm__ __volatile__(
" .set "MIPS_ISA_LEVEL" \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
+ " move %1, %0 \n"
" bltz %0, 1f \n"
- " scd %0, %2 \n"
- " .set noreorder \n"
- " beqz %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set reorder \n"
+ " scd %1, %2 \n"
+ "\t" __scbeqz " %1, 1b \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
@@ -620,99 +412,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns true iff @v was not @u.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-/*
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
-
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
-/*
- * atomic64_dec_and_test - decrement by 1 and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*/
#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
-/*
- * atomic64_inc - increment atomic variable
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic64_inc(v) atomic64_add(1, (v))
-
-/*
- * atomic64_dec - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic64_dec(v) atomic64_sub(1, (v))
-
-/*
- * atomic64_add_negative - add and test if negative
- * @v: pointer of type atomic64_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
-
#endif /* CONFIG_64BIT */
#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index b3e2975f83d3..bf6a8afd7ad2 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -123,22 +123,6 @@ static inline void bmips_write_zscm_reg(unsigned int offset, unsigned long data)
barrier();
}
-static inline void bmips_post_dma_flush(struct device *dev)
-{
- void __iomem *cbr = BMIPS_GET_CBR();
- u32 cfg;
-
- if (boot_cpu_type() != CPU_BMIPS3300 &&
- boot_cpu_type() != CPU_BMIPS4350 &&
- boot_cpu_type() != CPU_BMIPS4380)
- return;
-
- /* Flush stale data out of the readahead cache */
- cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
- __raw_readl(cbr + BMIPS_RAC_CONFIG);
-}
-
#endif /* !defined(__ASSEMBLY__) */
#endif /* _ASM_BMIPS_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 9cdb4e4ce258..0edba3e75747 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -14,39 +14,77 @@
#include <asm/isa-rev.h>
#include <cpu-feature-overrides.h>
+#define __ase(ase) (cpu_data[0].ases & (ase))
+#define __opt(opt) (cpu_data[0].options & (opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * Note that these should only be used in cases where a kernel built for an
+ * older ISA *cannot* run on a CPU which supports the feature in question. For
+ * example this may be used for features introduced with MIPSr6, since a kernel
+ * built for an older ISA cannot run on a MIPSr6 CPU. This should not be used
+ * for MIPSr2 features however, since a MIPSr1 or earlier kernel might run on a
+ * MIPSr2 CPU.
+ */
+#define __isa_ge_and_ase(isa, ase) ((MIPS_ISA_REV >= (isa)) && __ase(ase))
+#define __isa_ge_and_opt(isa, opt) ((MIPS_ISA_REV >= (isa)) && __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *or* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & then become required.
+ */
+#define __isa_ge_or_ase(isa, ase) ((MIPS_ISA_REV >= (isa)) || __ase(ase))
+#define __isa_ge_or_opt(isa, opt) ((MIPS_ISA_REV >= (isa)) || __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is < isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & are then removed - ie. no longer present in any CPU implementing
+ * the given ISA revision.
+ */
+#define __isa_lt_and_ase(isa, ase) ((MIPS_ISA_REV < (isa)) && __ase(ase))
+#define __isa_lt_and_opt(isa, opt) ((MIPS_ISA_REV < (isa)) && __opt(opt))
+
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
*/
#ifndef cpu_has_tlb
-#define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB)
+#define cpu_has_tlb __opt(MIPS_CPU_TLB)
#endif
#ifndef cpu_has_ftlb
-#define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB)
+#define cpu_has_ftlb __opt(MIPS_CPU_FTLB)
#endif
#ifndef cpu_has_tlbinv
-#define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV)
+#define cpu_has_tlbinv __opt(MIPS_CPU_TLBINV)
#endif
#ifndef cpu_has_segments
-#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS)
+#define cpu_has_segments __opt(MIPS_CPU_SEGMENTS)
#endif
#ifndef cpu_has_eva
-#define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA)
+#define cpu_has_eva __opt(MIPS_CPU_EVA)
#endif
#ifndef cpu_has_htw
-#define cpu_has_htw (cpu_data[0].options & MIPS_CPU_HTW)
+#define cpu_has_htw __opt(MIPS_CPU_HTW)
#endif
#ifndef cpu_has_ldpte
-#define cpu_has_ldpte (cpu_data[0].options & MIPS_CPU_LDPTE)
+#define cpu_has_ldpte __opt(MIPS_CPU_LDPTE)
#endif
#ifndef cpu_has_rixiex
-#define cpu_has_rixiex (cpu_data[0].options & MIPS_CPU_RIXIEX)
+#define cpu_has_rixiex __isa_ge_or_opt(6, MIPS_CPU_RIXIEX)
#endif
#ifndef cpu_has_maar
-#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR)
+#define cpu_has_maar __opt(MIPS_CPU_MAAR)
#endif
#ifndef cpu_has_rw_llb
-#define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB)
+#define cpu_has_rw_llb __isa_ge_or_opt(6, MIPS_CPU_RW_LLB)
#endif
/*
@@ -59,18 +97,18 @@
#define cpu_has_3kex (!cpu_has_4kex)
#endif
#ifndef cpu_has_4kex
-#define cpu_has_4kex (cpu_data[0].options & MIPS_CPU_4KEX)
+#define cpu_has_4kex __isa_ge_or_opt(1, MIPS_CPU_4KEX)
#endif
#ifndef cpu_has_3k_cache
-#define cpu_has_3k_cache (cpu_data[0].options & MIPS_CPU_3K_CACHE)
+#define cpu_has_3k_cache __isa_lt_and_opt(1, MIPS_CPU_3K_CACHE)
#endif
#define cpu_has_6k_cache 0
#define cpu_has_8k_cache 0
#ifndef cpu_has_4k_cache
-#define cpu_has_4k_cache (cpu_data[0].options & MIPS_CPU_4K_CACHE)
+#define cpu_has_4k_cache __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
#endif
#ifndef cpu_has_tx39_cache
-#define cpu_has_tx39_cache (cpu_data[0].options & MIPS_CPU_TX39_CACHE)
+#define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
@@ -83,92 +121,92 @@
#define raw_cpu_has_fpu cpu_has_fpu
#endif
#ifndef cpu_has_32fpr
-#define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR)
+#define cpu_has_32fpr __isa_ge_or_opt(1, MIPS_CPU_32FPR)
#endif
#ifndef cpu_has_counter
-#define cpu_has_counter (cpu_data[0].options & MIPS_CPU_COUNTER)
+#define cpu_has_counter __opt(MIPS_CPU_COUNTER)
#endif
#ifndef cpu_has_watch
-#define cpu_has_watch (cpu_data[0].options & MIPS_CPU_WATCH)
+#define cpu_has_watch __opt(MIPS_CPU_WATCH)
#endif
#ifndef cpu_has_divec
-#define cpu_has_divec (cpu_data[0].options & MIPS_CPU_DIVEC)
+#define cpu_has_divec __isa_ge_or_opt(1, MIPS_CPU_DIVEC)
#endif
#ifndef cpu_has_vce
-#define cpu_has_vce (cpu_data[0].options & MIPS_CPU_VCE)
+#define cpu_has_vce __opt(MIPS_CPU_VCE)
#endif
#ifndef cpu_has_cache_cdex_p
-#define cpu_has_cache_cdex_p (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_P)
+#define cpu_has_cache_cdex_p __opt(MIPS_CPU_CACHE_CDEX_P)
#endif
#ifndef cpu_has_cache_cdex_s
-#define cpu_has_cache_cdex_s (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_S)
+#define cpu_has_cache_cdex_s __opt(MIPS_CPU_CACHE_CDEX_S)
#endif
#ifndef cpu_has_prefetch
-#define cpu_has_prefetch (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#define cpu_has_prefetch __isa_ge_or_opt(1, MIPS_CPU_PREFETCH)
#endif
#ifndef cpu_has_mcheck
-#define cpu_has_mcheck (cpu_data[0].options & MIPS_CPU_MCHECK)
+#define cpu_has_mcheck __isa_ge_or_opt(1, MIPS_CPU_MCHECK)
#endif
#ifndef cpu_has_ejtag
-#define cpu_has_ejtag (cpu_data[0].options & MIPS_CPU_EJTAG)
+#define cpu_has_ejtag __opt(MIPS_CPU_EJTAG)
#endif
#ifndef cpu_has_llsc
-#define cpu_has_llsc (cpu_data[0].options & MIPS_CPU_LLSC)
+#define cpu_has_llsc __isa_ge_or_opt(1, MIPS_CPU_LLSC)
#endif
#ifndef cpu_has_bp_ghist
-#define cpu_has_bp_ghist (cpu_data[0].options & MIPS_CPU_BP_GHIST)
+#define cpu_has_bp_ghist __opt(MIPS_CPU_BP_GHIST)
#endif
#ifndef kernel_uses_llsc
#define kernel_uses_llsc cpu_has_llsc
#endif
#ifndef cpu_has_guestctl0ext
-#define cpu_has_guestctl0ext (cpu_data[0].options & MIPS_CPU_GUESTCTL0EXT)
+#define cpu_has_guestctl0ext __opt(MIPS_CPU_GUESTCTL0EXT)
#endif
#ifndef cpu_has_guestctl1
-#define cpu_has_guestctl1 (cpu_data[0].options & MIPS_CPU_GUESTCTL1)
+#define cpu_has_guestctl1 __opt(MIPS_CPU_GUESTCTL1)
#endif
#ifndef cpu_has_guestctl2
-#define cpu_has_guestctl2 (cpu_data[0].options & MIPS_CPU_GUESTCTL2)
+#define cpu_has_guestctl2 __opt(MIPS_CPU_GUESTCTL2)
#endif
#ifndef cpu_has_guestid
-#define cpu_has_guestid (cpu_data[0].options & MIPS_CPU_GUESTID)
+#define cpu_has_guestid __opt(MIPS_CPU_GUESTID)
#endif
#ifndef cpu_has_drg
-#define cpu_has_drg (cpu_data[0].options & MIPS_CPU_DRG)
+#define cpu_has_drg __opt(MIPS_CPU_DRG)
#endif
#ifndef cpu_has_mips16
-#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
+#define cpu_has_mips16 __isa_lt_and_ase(6, MIPS_ASE_MIPS16)
#endif
#ifndef cpu_has_mips16e2
-#define cpu_has_mips16e2 (cpu_data[0].ases & MIPS_ASE_MIPS16E2)
+#define cpu_has_mips16e2 __isa_lt_and_ase(6, MIPS_ASE_MIPS16E2)
#endif
#ifndef cpu_has_mdmx
-#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
+#define cpu_has_mdmx __isa_lt_and_ase(6, MIPS_ASE_MDMX)
#endif
#ifndef cpu_has_mips3d
-#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D)
+#define cpu_has_mips3d __isa_lt_and_ase(6, MIPS_ASE_MIPS3D)
#endif
#ifndef cpu_has_smartmips
-#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
+#define cpu_has_smartmips __isa_lt_and_ase(6, MIPS_ASE_SMARTMIPS)
#endif
#ifndef cpu_has_rixi
-#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
+#define cpu_has_rixi __isa_ge_or_opt(6, MIPS_CPU_RIXI)
#endif
#ifndef cpu_has_mmips
# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
-# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+# define cpu_has_mmips __opt(MIPS_CPU_MICROMIPS)
# else
# define cpu_has_mmips 0
# endif
#endif
#ifndef cpu_has_lpa
-#define cpu_has_lpa (cpu_data[0].options & MIPS_CPU_LPA)
+#define cpu_has_lpa __opt(MIPS_CPU_LPA)
#endif
#ifndef cpu_has_mvh
-#define cpu_has_mvh (cpu_data[0].options & MIPS_CPU_MVH)
+#define cpu_has_mvh __opt(MIPS_CPU_MVH)
#endif
#ifndef cpu_has_xpa
#define cpu_has_xpa (cpu_has_lpa && cpu_has_mvh)
@@ -338,32 +376,32 @@
#endif
#ifndef cpu_has_dsp
-#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
+#define cpu_has_dsp __ase(MIPS_ASE_DSP)
#endif
#ifndef cpu_has_dsp2
-#define cpu_has_dsp2 (cpu_data[0].ases & MIPS_ASE_DSP2P)
+#define cpu_has_dsp2 __ase(MIPS_ASE_DSP2P)
#endif
#ifndef cpu_has_dsp3
-#define cpu_has_dsp3 (cpu_data[0].ases & MIPS_ASE_DSP3)
+#define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
#endif
#ifndef cpu_has_mipsmt
-#define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT)
+#define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
#endif
#ifndef cpu_has_vp
-#define cpu_has_vp (cpu_data[0].options & MIPS_CPU_VP)
+#define cpu_has_vp __isa_ge_and_opt(6, MIPS_CPU_VP)
#endif
#ifndef cpu_has_userlocal
-#define cpu_has_userlocal (cpu_data[0].options & MIPS_CPU_ULRI)
+#define cpu_has_userlocal __isa_ge_or_opt(6, MIPS_CPU_ULRI)
#endif
#ifdef CONFIG_32BIT
# ifndef cpu_has_nofpuex
-# define cpu_has_nofpuex (cpu_data[0].options & MIPS_CPU_NOFPUEX)
+# define cpu_has_nofpuex __isa_lt_and_opt(1, MIPS_CPU_NOFPUEX)
# endif
# ifndef cpu_has_64bits
# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
@@ -405,19 +443,19 @@
#endif
#if defined(CONFIG_CPU_MIPSR2_IRQ_VI) && !defined(cpu_has_vint)
-# define cpu_has_vint (cpu_data[0].options & MIPS_CPU_VINT)
+# define cpu_has_vint __opt(MIPS_CPU_VINT)
#elif !defined(cpu_has_vint)
# define cpu_has_vint 0
#endif
#if defined(CONFIG_CPU_MIPSR2_IRQ_EI) && !defined(cpu_has_veic)
-# define cpu_has_veic (cpu_data[0].options & MIPS_CPU_VEIC)
+# define cpu_has_veic __opt(MIPS_CPU_VEIC)
#elif !defined(cpu_has_veic)
# define cpu_has_veic 0
#endif
#ifndef cpu_has_inclusive_pcaches
-#define cpu_has_inclusive_pcaches (cpu_data[0].options & MIPS_CPU_INCLUSIVE_CACHES)
+#define cpu_has_inclusive_pcaches __opt(MIPS_CPU_INCLUSIVE_CACHES)
#endif
#ifndef cpu_dcache_line_size
@@ -438,63 +476,63 @@
#endif
#ifndef cpu_has_perf_cntr_intr_bit
-#define cpu_has_perf_cntr_intr_bit (cpu_data[0].options & MIPS_CPU_PCI)
+#define cpu_has_perf_cntr_intr_bit __opt(MIPS_CPU_PCI)
#endif
#ifndef cpu_has_vz
-#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
+#define cpu_has_vz __ase(MIPS_ASE_VZ)
#endif
#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
-# define cpu_has_msa (cpu_data[0].ases & MIPS_ASE_MSA)
+# define cpu_has_msa __ase(MIPS_ASE_MSA)
#elif !defined(cpu_has_msa)
# define cpu_has_msa 0
#endif
#ifndef cpu_has_ufr
-# define cpu_has_ufr (cpu_data[0].options & MIPS_CPU_UFR)
+# define cpu_has_ufr __opt(MIPS_CPU_UFR)
#endif
#ifndef cpu_has_fre
-# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
+# define cpu_has_fre __opt(MIPS_CPU_FRE)
#endif
#ifndef cpu_has_cdmm
-# define cpu_has_cdmm (cpu_data[0].options & MIPS_CPU_CDMM)
+# define cpu_has_cdmm __opt(MIPS_CPU_CDMM)
#endif
#ifndef cpu_has_small_pages
-# define cpu_has_small_pages (cpu_data[0].options & MIPS_CPU_SP)
+# define cpu_has_small_pages __opt(MIPS_CPU_SP)
#endif
#ifndef cpu_has_nan_legacy
-#define cpu_has_nan_legacy (cpu_data[0].options & MIPS_CPU_NAN_LEGACY)
+#define cpu_has_nan_legacy __isa_lt_and_opt(6, MIPS_CPU_NAN_LEGACY)
#endif
#ifndef cpu_has_nan_2008
-#define cpu_has_nan_2008 (cpu_data[0].options & MIPS_CPU_NAN_2008)
+#define cpu_has_nan_2008 __isa_ge_or_opt(6, MIPS_CPU_NAN_2008)
#endif
#ifndef cpu_has_ebase_wg
-# define cpu_has_ebase_wg (cpu_data[0].options & MIPS_CPU_EBASE_WG)
+# define cpu_has_ebase_wg __opt(MIPS_CPU_EBASE_WG)
#endif
#ifndef cpu_has_badinstr
-# define cpu_has_badinstr (cpu_data[0].options & MIPS_CPU_BADINSTR)
+# define cpu_has_badinstr __isa_ge_or_opt(6, MIPS_CPU_BADINSTR)
#endif
#ifndef cpu_has_badinstrp
-# define cpu_has_badinstrp (cpu_data[0].options & MIPS_CPU_BADINSTRP)
+# define cpu_has_badinstrp __isa_ge_or_opt(6, MIPS_CPU_BADINSTRP)
#endif
#ifndef cpu_has_contextconfig
-# define cpu_has_contextconfig (cpu_data[0].options & MIPS_CPU_CTXTC)
+# define cpu_has_contextconfig __opt(MIPS_CPU_CTXTC)
#endif
#ifndef cpu_has_perf
-# define cpu_has_perf (cpu_data[0].options & MIPS_CPU_PERF)
+# define cpu_has_perf __opt(MIPS_CPU_PERF)
#endif
-#if defined(CONFIG_SMP) && (MIPS_ISA_REV >= 6)
+#ifdef CONFIG_SMP
/*
* Some systems share FTLB RAMs between threads within a core (siblings in
* kernel parlance). This means that FTLB entries may become invalid at almost
@@ -507,7 +545,7 @@
*/
# ifndef cpu_has_shared_ftlb_ram
# define cpu_has_shared_ftlb_ram \
- (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_RAM)
+ __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_RAM)
# endif
/*
@@ -524,9 +562,9 @@
*/
# ifndef cpu_has_shared_ftlb_entries
# define cpu_has_shared_ftlb_entries \
- (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_ENTRIES)
+ __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_ENTRIES)
# endif
-#endif /* SMP && MIPS_ISA_REV >= 6 */
+#endif /* SMP */
#ifndef cpu_has_shared_ftlb_ram
# define cpu_has_shared_ftlb_ram 0
@@ -537,7 +575,7 @@
#ifdef CONFIG_MIPS_MT_SMP
# define cpu_has_mipsmt_pertccounters \
- (cpu_data[0].options & MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
+ __isa_lt_and_opt(6, MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
#else
# define cpu_has_mipsmt_pertccounters 0
#endif /* CONFIG_MIPS_MT_SMP */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 5b9d02ef4f60..dacbdb84516a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -225,31 +225,32 @@
* Definitions for 7:0 on legacy processors
*/
-#define PRID_REV_TX4927 0x0022
-#define PRID_REV_TX4937 0x0030
-#define PRID_REV_R4400 0x0040
-#define PRID_REV_R3000A 0x0030
-#define PRID_REV_R3000 0x0020
-#define PRID_REV_R2000A 0x0010
-#define PRID_REV_TX3912 0x0010
-#define PRID_REV_TX3922 0x0030
-#define PRID_REV_TX3927 0x0040
-#define PRID_REV_VR4111 0x0050
-#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
-#define PRID_REV_VR4121 0x0060
-#define PRID_REV_VR4122 0x0070
-#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
-#define PRID_REV_VR4130 0x0080
-#define PRID_REV_34K_V1_0_2 0x0022
-#define PRID_REV_LOONGSON1B 0x0020
-#define PRID_REV_LOONGSON1C 0x0020 /* Same as Loongson-1B */
-#define PRID_REV_LOONGSON2E 0x0002
-#define PRID_REV_LOONGSON2F 0x0003
-#define PRID_REV_LOONGSON3A_R1 0x0005
-#define PRID_REV_LOONGSON3B_R1 0x0006
-#define PRID_REV_LOONGSON3B_R2 0x0007
-#define PRID_REV_LOONGSON3A_R2 0x0008
-#define PRID_REV_LOONGSON3A_R3 0x0009
+#define PRID_REV_TX4927 0x0022
+#define PRID_REV_TX4937 0x0030
+#define PRID_REV_R4400 0x0040
+#define PRID_REV_R3000A 0x0030
+#define PRID_REV_R3000 0x0020
+#define PRID_REV_R2000A 0x0010
+#define PRID_REV_TX3912 0x0010
+#define PRID_REV_TX3922 0x0030
+#define PRID_REV_TX3927 0x0040
+#define PRID_REV_VR4111 0x0050
+#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
+#define PRID_REV_VR4121 0x0060
+#define PRID_REV_VR4122 0x0070
+#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
+#define PRID_REV_VR4130 0x0080
+#define PRID_REV_34K_V1_0_2 0x0022
+#define PRID_REV_LOONGSON1B 0x0020
+#define PRID_REV_LOONGSON1C 0x0020 /* Same as Loongson-1B */
+#define PRID_REV_LOONGSON2E 0x0002
+#define PRID_REV_LOONGSON2F 0x0003
+#define PRID_REV_LOONGSON3A_R1 0x0005
+#define PRID_REV_LOONGSON3B_R1 0x0006
+#define PRID_REV_LOONGSON3B_R2 0x0007
+#define PRID_REV_LOONGSON3A_R2 0x0008
+#define PRID_REV_LOONGSON3A_R3_0 0x0009
+#define PRID_REV_LOONGSON3A_R3_1 0x000d
/*
* Older processors used to encode processor version and revision in two
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
index 72d0eab02afc..8eda48748ed5 100644
--- a/arch/mips/include/asm/dma-coherence.h
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -21,10 +21,10 @@ enum coherent_io_user_state {
extern enum coherent_io_user_state coherentio;
extern int hw_coherentio;
#else
-#ifdef CONFIG_DMA_COHERENT
-#define coherentio IO_COHERENCE_ENABLED
-#else
+#ifdef CONFIG_DMA_NONCOHERENT
#define coherentio IO_COHERENCE_DISABLED
+#else
+#define coherentio IO_COHERENCE_ENABLED
#endif
#define hw_coherentio 0
#endif /* CONFIG_DMA_MAYBE_COHERENT */
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
index f32f15530aba..b5c240806e1b 100644
--- a/arch/mips/include/asm/dma-direct.h
+++ b/arch/mips/include/asm/dma-direct.h
@@ -1 +1,16 @@
-#include <asm/dma-coherence.h>
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MIPS_DMA_DIRECT_H
+#define _MIPS_DMA_DIRECT_H 1
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+#endif /* _MIPS_DMA_DIRECT_H */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 886e75a383f2..e81c4e97ff1a 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -2,19 +2,21 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#include <linux/scatterlist.h>
-#include <asm/dma-coherence.h>
-#include <asm/cache.h>
+#include <linux/swiotlb.h>
-#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
-#include <dma-coherence.h>
-#endif
-
-extern const struct dma_map_ops *mips_dma_map_ops;
+extern const struct dma_map_ops jazz_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return mips_dma_map_ops;
+#if defined(CONFIG_MACH_JAZZ)
+ return &jazz_dma_ops;
+#elif defined(CONFIG_SWIOTLB)
+ return &swiotlb_dma_ops;
+#elif defined(CONFIG_DMA_NONCOHERENT_OPS)
+ return &dma_noncoherent_ops;
+#else
+ return &dma_direct_ops;
+#endif
}
#define arch_setup_dma_ops arch_setup_dma_ops
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index cea8ad864b3f..54c730aed327 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -12,6 +12,8 @@
#ifndef _ASM_IO_H
#define _ASM_IO_H
+#define ARCH_HAS_IOREMAP_WC
+
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -141,14 +143,14 @@ static inline void * phys_to_virt(unsigned long address)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
-static inline unsigned long isa_virt_to_bus(volatile void * address)
+static inline unsigned long isa_virt_to_bus(volatile void *address)
{
- return (unsigned long)address - PAGE_OFFSET;
+ return virt_to_phys(address);
}
-static inline void * isa_bus_to_virt(unsigned long address)
+static inline void *isa_bus_to_virt(unsigned long address)
{
- return (void *)(address + PAGE_OFFSET);
+ return phys_to_virt(address);
}
#define isa_page_to_bus page_to_phys
@@ -278,15 +280,25 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
#define ioremap_cache ioremap_cachable
/*
- * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
- * requests a cachable mapping, ioremap_uncached_accelerated requests a
- * mapping using the uncached accelerated mode which isn't supported on
- * all processors.
+ * ioremap_wc - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_wc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * but accelerated by means of write-combining feature. It is specifically
+ * useful for PCIe prefetchable windows, which may vastly improve a
+ * communications performance. If it was determined on boot stage, what
+ * CPU CCA doesn't support UCA, the method shall fall-back to the
+ * _CACHE_UNCACHED option (see cpu_probe() method).
*/
-#define ioremap_cacheable_cow(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
-#define ioremap_uncached_accelerated(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
+#define ioremap_wc(offset, size) \
+ __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
static inline void iounmap(const volatile void __iomem *addr)
{
@@ -590,7 +602,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
*
* This API used to be exported; it now is for arch code internal use only.
*/
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
@@ -609,7 +621,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#define dma_cache_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
/*
* Read a 32-bit register that requires a 64-bit read cycle on the bus.
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index ad1a99948f27..a72dfbf1babb 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -68,16 +68,6 @@ struct prev_kprobe {
unsigned long saved_epc;
};
-#define MAX_JPROBES_STACK_SIZE 128
-#define MAX_JPROBES_STACK_ADDR \
- (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 - sizeof(struct pt_regs))
-
-#define MIN_JPROBES_STACK_SIZE(ADDR) \
- ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
- ? MAX_JPROBES_STACK_ADDR - (ADDR) \
- : MAX_JPROBES_STACK_SIZE)
-
-
#define SKIP_DELAYSLOT 0x0001
/* per-cpu kprobe control block */
@@ -86,12 +76,9 @@ struct kprobe_ctlblk {
unsigned long kprobe_old_SR;
unsigned long kprobe_saved_SR;
unsigned long kprobe_saved_epc;
- unsigned long jprobe_saved_sp;
- struct pt_regs jprobe_saved_regs;
/* Per-thread fields, used while emulating branches */
unsigned long flags;
unsigned long target_epc;
- u8 jprobes_stack[MAX_JPROBES_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h
index 660ab64c0fc9..a004d94dfbdd 100644
--- a/arch/mips/include/asm/mach-ar7/spaces.h
+++ b/arch/mips/include/asm/mach-ar7/spaces.h
@@ -17,9 +17,6 @@
#define PAGE_OFFSET _AC(0x94000000, UL)
#define PHYS_OFFSET _AC(0x14000000, UL)
-#define UNCAC_BASE _AC(0xb4000000, UL) /* 0xa0000000 + PHYS_OFFSET */
-#define IO_BASE UNCAC_BASE
-
#include <asm/mach-generic/spaces.h>
#endif /* __ASM_AR7_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ath25/dma-coherence.h b/arch/mips/include/asm/mach-ath25/dma-coherence.h
deleted file mode 100644
index d5defdde32db..000000000000
--- a/arch/mips/include/asm/mach-ath25/dma-coherence.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- *
- */
-#ifndef __ASM_MACH_ATH25_DMA_COHERENCE_H
-#define __ASM_MACH_ATH25_DMA_COHERENCE_H
-
-#include <linux/device.h>
-
-/*
- * We need some arbitrary non-zero value to be programmed to the BAR1 register
- * of PCI host controller to enable DMA. The same value should be used as the
- * offset to calculate the physical address of DMA buffer for PCI devices.
- */
-#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
-
-static inline dma_addr_t ath25_dev_offset(struct device *dev)
-{
-#ifdef CONFIG_PCI
- extern struct bus_type pci_bus_type;
-
- if (dev && dev->bus == &pci_bus_type)
- return AR2315_PCI_HOST_SDRAM_BASEADDR;
-#endif
- return 0;
-}
-
-static inline dma_addr_t
-plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return virt_to_phys(addr) + ath25_dev_offset(dev);
-}
-
-static inline dma_addr_t
-plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
- return page_to_phys(page) + ath25_dev_offset(dev);
-}
-
-static inline unsigned long
-plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr - ath25_dev_offset(dev);
-}
-
-static inline void
-plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_COHERENT
- return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
- return 0;
-#endif
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index d99ca862dae3..284b4fa23e03 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -20,6 +20,10 @@
#include <linux/bitops.h>
#define AR71XX_APB_BASE 0x18000000
+#define AR71XX_GE0_BASE 0x19000000
+#define AR71XX_GE0_SIZE 0x10000
+#define AR71XX_GE1_BASE 0x1a000000
+#define AR71XX_GE1_SIZE 0x10000
#define AR71XX_EHCI_BASE 0x1b000000
#define AR71XX_EHCI_SIZE 0x1000
#define AR71XX_OHCI_BASE 0x1c000000
@@ -39,6 +43,8 @@
#define AR71XX_PLL_SIZE 0x100
#define AR71XX_RESET_BASE (AR71XX_APB_BASE + 0x00060000)
#define AR71XX_RESET_SIZE 0x100
+#define AR71XX_MII_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR71XX_MII_SIZE 0x100
#define AR71XX_PCI_MEM_BASE 0x10000000
#define AR71XX_PCI_MEM_SIZE 0x07000000
@@ -81,18 +87,39 @@
#define AR933X_UART_BASE (AR71XX_APB_BASE + 0x00020000)
#define AR933X_UART_SIZE 0x14
+#define AR933X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR933X_GMAC_SIZE 0x04
#define AR933X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define AR933X_WMAC_SIZE 0x20000
#define AR933X_EHCI_BASE 0x1b000000
#define AR933X_EHCI_SIZE 0x1000
+#define AR934X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR934X_GMAC_SIZE 0x14
#define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define AR934X_WMAC_SIZE 0x20000
#define AR934X_EHCI_BASE 0x1b000000
#define AR934X_EHCI_SIZE 0x200
+#define AR934X_NFC_BASE 0x1b000200
+#define AR934X_NFC_SIZE 0xb8
#define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
#define AR934X_SRIF_SIZE 0x1000
+#define QCA953X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA953X_GMAC_SIZE 0x14
+#define QCA953X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA953X_WMAC_SIZE 0x20000
+#define QCA953X_EHCI_BASE 0x1b000000
+#define QCA953X_EHCI_SIZE 0x200
+#define QCA953X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
+#define QCA953X_SRIF_SIZE 0x1000
+
+#define QCA953X_PCI_CFG_BASE0 0x14000000
+#define QCA953X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA953X_PCI_CRP_BASE0 (AR71XX_APB_BASE + 0x000c0000)
+#define QCA953X_PCI_MEM_BASE0 0x10000000
+#define QCA953X_PCI_MEM_SIZE 0x02000000
+
#define QCA955X_PCI_MEM_BASE0 0x10000000
#define QCA955X_PCI_MEM_BASE1 0x12000000
#define QCA955X_PCI_MEM_SIZE 0x02000000
@@ -106,11 +133,72 @@
#define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
#define QCA955X_PCI_CTRL_SIZE 0x100
+#define QCA955X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA955X_GMAC_SIZE 0x40
#define QCA955X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define QCA955X_WMAC_SIZE 0x20000
#define QCA955X_EHCI0_BASE 0x1b000000
#define QCA955X_EHCI1_BASE 0x1b400000
#define QCA955X_EHCI_SIZE 0x1000
+#define QCA955X_NFC_BASE 0x1b800200
+#define QCA955X_NFC_SIZE 0xb8
+
+#define QCA956X_PCI_MEM_BASE1 0x12000000
+#define QCA956X_PCI_MEM_SIZE 0x02000000
+#define QCA956X_PCI_CFG_BASE1 0x16000000
+#define QCA956X_PCI_CFG_SIZE 0x1000
+#define QCA956X_PCI_CRP_BASE1 (AR71XX_APB_BASE + 0x00250000)
+#define QCA956X_PCI_CRP_SIZE 0x1000
+#define QCA956X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA956X_PCI_CTRL_SIZE 0x100
+
+#define QCA956X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA956X_WMAC_SIZE 0x20000
+#define QCA956X_EHCI0_BASE 0x1b000000
+#define QCA956X_EHCI1_BASE 0x1b400000
+#define QCA956X_EHCI_SIZE 0x200
+#define QCA956X_GMAC_SGMII_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SGMII_SIZE 0x64
+#define QCA956X_PLL_BASE (AR71XX_APB_BASE + 0x00050000)
+#define QCA956X_PLL_SIZE 0x50
+#define QCA956X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SIZE 0x64
+
+/*
+ * Hidden Registers
+ */
+#define QCA956X_MAC_CFG_BASE 0xb9000000
+#define QCA956X_MAC_CFG_SIZE 0x64
+
+#define QCA956X_MAC_CFG1_REG 0x00
+#define QCA956X_MAC_CFG1_SOFT_RST BIT(31)
+#define QCA956X_MAC_CFG1_RX_RST BIT(19)
+#define QCA956X_MAC_CFG1_TX_RST BIT(18)
+#define QCA956X_MAC_CFG1_LOOPBACK BIT(8)
+#define QCA956X_MAC_CFG1_RX_EN BIT(2)
+#define QCA956X_MAC_CFG1_TX_EN BIT(0)
+
+#define QCA956X_MAC_CFG2_REG 0x04
+#define QCA956X_MAC_CFG2_IF_1000 BIT(9)
+#define QCA956X_MAC_CFG2_IF_10_100 BIT(8)
+#define QCA956X_MAC_CFG2_HUGE_FRAME_EN BIT(5)
+#define QCA956X_MAC_CFG2_LEN_CHECK BIT(4)
+#define QCA956X_MAC_CFG2_PAD_CRC_EN BIT(2)
+#define QCA956X_MAC_CFG2_FDX BIT(0)
+
+#define QCA956X_MAC_MII_MGMT_CFG_REG 0x20
+#define QCA956X_MGMT_CFG_CLK_DIV_20 0x07
+
+#define QCA956X_MAC_FIFO_CFG0_REG 0x48
+#define QCA956X_MAC_FIFO_CFG1_REG 0x4c
+#define QCA956X_MAC_FIFO_CFG2_REG 0x50
+#define QCA956X_MAC_FIFO_CFG3_REG 0x54
+#define QCA956X_MAC_FIFO_CFG4_REG 0x58
+#define QCA956X_MAC_FIFO_CFG5_REG 0x5c
+
+#define QCA956X_DAM_RESET_OFFSET 0xb90001bc
+#define QCA956X_DAM_RESET_SIZE 0x4
+#define QCA956X_INLINE_CHKSUM_ENG BIT(27)
/*
* DDR_CTRL block
@@ -149,6 +237,12 @@
#define AR934X_DDR_REG_FLUSH_PCIE 0xa8
#define AR934X_DDR_REG_FLUSH_WMAC 0xac
+#define QCA953X_DDR_REG_FLUSH_GE0 0x9c
+#define QCA953X_DDR_REG_FLUSH_GE1 0xa0
+#define QCA953X_DDR_REG_FLUSH_USB 0xa4
+#define QCA953X_DDR_REG_FLUSH_PCIE 0xa8
+#define QCA953X_DDR_REG_FLUSH_WMAC 0xac
+
/*
* PLL block
*/
@@ -166,9 +260,15 @@
#define AR71XX_AHB_DIV_SHIFT 20
#define AR71XX_AHB_DIV_MASK 0x7
+#define AR71XX_ETH0_PLL_SHIFT 17
+#define AR71XX_ETH1_PLL_SHIFT 19
+
#define AR724X_PLL_REG_CPU_CONFIG 0x00
#define AR724X_PLL_REG_PCIE_CONFIG 0x10
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS BIT(16)
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET BIT(25)
+
#define AR724X_PLL_FB_SHIFT 0
#define AR724X_PLL_FB_MASK 0x3ff
#define AR724X_PLL_REF_DIV_SHIFT 10
@@ -178,6 +278,8 @@
#define AR724X_DDR_DIV_SHIFT 22
#define AR724X_DDR_DIV_MASK 0x3
+#define AR7242_PLL_REG_ETH0_INT_CLOCK 0x2c
+
#define AR913X_PLL_REG_CPU_CONFIG 0x00
#define AR913X_PLL_REG_ETH_CONFIG 0x04
#define AR913X_PLL_REG_ETH0_INT_CLOCK 0x14
@@ -190,6 +292,9 @@
#define AR913X_AHB_DIV_SHIFT 19
#define AR913X_AHB_DIV_MASK 0x1
+#define AR913X_ETH0_PLL_SHIFT 20
+#define AR913X_ETH1_PLL_SHIFT 22
+
#define AR933X_PLL_CPU_CONFIG_REG 0x00
#define AR933X_PLL_CLOCK_CTRL_REG 0x08
@@ -211,6 +316,8 @@
#define AR934X_PLL_CPU_CONFIG_REG 0x00
#define AR934X_PLL_DDR_CONFIG_REG 0x04
#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG 0x08
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_REG 0x24
+#define AR934X_PLL_ETH_XMII_CONTROL_REG 0x2c
#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
@@ -243,9 +350,52 @@
#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL BIT(6)
+
+#define QCA953X_PLL_CPU_CONFIG_REG 0x00
+#define QCA953X_PLL_DDR_CONFIG_REG 0x04
+#define QCA953X_PLL_CLK_CTRL_REG 0x08
+#define QCA953X_PLL_SWITCH_CLOCK_CONTROL_REG 0x24
+#define QCA953X_PLL_ETH_XMII_CONTROL_REG 0x2c
+#define QCA953X_PLL_ETH_SGMII_CONTROL_REG 0x48
+
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define QCA953X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define QCA953X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define QCA953X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define QCA953X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
#define QCA955X_PLL_CPU_CONFIG_REG 0x00
#define QCA955X_PLL_DDR_CONFIG_REG 0x04
#define QCA955X_PLL_CLK_CTRL_REG 0x08
+#define QCA955X_PLL_ETH_XMII_CONTROL_REG 0x28
+#define QCA955X_PLL_ETH_SGMII_CONTROL_REG 0x48
+#define QCA955X_PLL_ETH_SGMII_SERDES_REG 0x4c
#define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
#define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
@@ -278,6 +428,81 @@
#define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
#define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+#define QCA955X_PLL_ETH_SGMII_SERDES_LOCK_DETECT BIT(2)
+#define QCA955X_PLL_ETH_SGMII_SERDES_PLL_REFCLK BIT(1)
+#define QCA955X_PLL_ETH_SGMII_SERDES_EN_PLL BIT(0)
+
+#define QCA956X_PLL_CPU_CONFIG_REG 0x00
+#define QCA956X_PLL_CPU_CONFIG1_REG 0x04
+#define QCA956X_PLL_DDR_CONFIG_REG 0x08
+#define QCA956X_PLL_DDR_CONFIG1_REG 0x0c
+#define QCA956X_PLL_CLK_CTRL_REG 0x10
+#define QCA956X_PLL_SWITCH_CLOCK_CONTROL_REG 0x28
+#define QCA956X_PLL_ETH_XMII_CONTROL_REG 0x30
+#define QCA956X_PLL_ETH_SGMII_SERDES_REG 0x4c
+
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT 0
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK 0x1f
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT 5
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK 0x1fff
+#define QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT 18
+#define QCA956X_PLL_CPU_CONFIG1_NINT_MASK 0x1ff
+
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT 0
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK 0x1f
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT 5
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK 0x1fff
+#define QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT 18
+#define QCA956X_PLL_DDR_CONFIG1_NINT_MASK 0x1ff
+
+#define QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL BIT(20)
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL BIT(21)
+#define QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_I2C_CLK_SELB BIT(5)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_1 BIT(6)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_UART1_CLK_SEL BIT(7)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_SHIFT 8
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_MASK 0xf
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EN_PLL_TOP BIT(12)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_2 BIT(13)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_1 BIT(14)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_2 BIT(15)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCH_FUNC_TST_MODE BIT(16)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EEE_ENABLE BIT(17)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_OEN_CLK125M_PLL BIT(18)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCHCLK_SEL BIT(19)
+
+#define QCA956X_PLL_ETH_XMII_TX_INVERT BIT(1)
+#define QCA956X_PLL_ETH_XMII_GIGE BIT(25)
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_SHIFT 28
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_MASK 0x3
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_SHIFT 26
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_MASK 3
+
+#define QCA956X_PLL_ETH_SGMII_SERDES_LOCK_DETECT BIT(2)
+#define QCA956X_PLL_ETH_SGMII_SERDES_PLL_REFCLK BIT(1)
+#define QCA956X_PLL_ETH_SGMII_SERDES_EN_PLL BIT(0)
+
/*
* USB_CONFIG block
*/
@@ -317,10 +542,19 @@
#define AR934X_RESET_REG_BOOTSTRAP 0xb0
#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+#define QCA953X_RESET_REG_RESET_MODULE 0x1c
+#define QCA953X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA953X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+
#define QCA955X_RESET_REG_RESET_MODULE 0x1c
#define QCA955X_RESET_REG_BOOTSTRAP 0xb0
#define QCA955X_RESET_REG_EXT_INT_STATUS 0xac
+#define QCA956X_RESET_REG_RESET_MODULE 0x1c
+#define QCA956X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA956X_RESET_REG_EXT_INT_STATUS 0xac
+
+#define MISC_INT_MIPS_SI_TIMERINT_MASK BIT(28)
#define MISC_INT_ETHSW BIT(12)
#define MISC_INT_TIMER4 BIT(10)
#define MISC_INT_TIMER3 BIT(9)
@@ -370,16 +604,123 @@
#define AR913X_RESET_USB_HOST BIT(5)
#define AR913X_RESET_USB_PHY BIT(4)
+#define AR933X_RESET_GE1_MDIO BIT(23)
+#define AR933X_RESET_GE0_MDIO BIT(22)
+#define AR933X_RESET_GE1_MAC BIT(13)
#define AR933X_RESET_WMAC BIT(11)
+#define AR933X_RESET_GE0_MAC BIT(9)
#define AR933X_RESET_USB_HOST BIT(5)
#define AR933X_RESET_USB_PHY BIT(4)
#define AR933X_RESET_USBSUS_OVERRIDE BIT(3)
+#define AR934X_RESET_HOST BIT(31)
+#define AR934X_RESET_SLIC BIT(30)
+#define AR934X_RESET_HDMA BIT(29)
+#define AR934X_RESET_EXTERNAL BIT(28)
+#define AR934X_RESET_RTC BIT(27)
+#define AR934X_RESET_PCIE_EP_INT BIT(26)
+#define AR934X_RESET_CHKSUM_ACC BIT(25)
+#define AR934X_RESET_FULL_CHIP BIT(24)
+#define AR934X_RESET_GE1_MDIO BIT(23)
+#define AR934X_RESET_GE0_MDIO BIT(22)
+#define AR934X_RESET_CPU_NMI BIT(21)
+#define AR934X_RESET_CPU_COLD BIT(20)
+#define AR934X_RESET_HOST_RESET_INT BIT(19)
+#define AR934X_RESET_PCIE_EP BIT(18)
+#define AR934X_RESET_UART1 BIT(17)
+#define AR934X_RESET_DDR BIT(16)
+#define AR934X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define AR934X_RESET_NANDF BIT(14)
+#define AR934X_RESET_GE1_MAC BIT(13)
+#define AR934X_RESET_ETH_SWITCH_ANALOG BIT(12)
#define AR934X_RESET_USB_PHY_ANALOG BIT(11)
+#define AR934X_RESET_HOST_DMA_INT BIT(10)
+#define AR934X_RESET_GE0_MAC BIT(9)
+#define AR934X_RESET_ETH_SWITCH BIT(8)
+#define AR934X_RESET_PCIE_PHY BIT(7)
+#define AR934X_RESET_PCIE BIT(6)
#define AR934X_RESET_USB_HOST BIT(5)
#define AR934X_RESET_USB_PHY BIT(4)
#define AR934X_RESET_USBSUS_OVERRIDE BIT(3)
-
+#define AR934X_RESET_LUT BIT(2)
+#define AR934X_RESET_MBOX BIT(1)
+#define AR934X_RESET_I2S BIT(0)
+
+#define QCA953X_RESET_USB_EXT_PWR BIT(29)
+#define QCA953X_RESET_EXTERNAL BIT(28)
+#define QCA953X_RESET_RTC BIT(27)
+#define QCA953X_RESET_FULL_CHIP BIT(24)
+#define QCA953X_RESET_GE1_MDIO BIT(23)
+#define QCA953X_RESET_GE0_MDIO BIT(22)
+#define QCA953X_RESET_CPU_NMI BIT(21)
+#define QCA953X_RESET_CPU_COLD BIT(20)
+#define QCA953X_RESET_DDR BIT(16)
+#define QCA953X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA953X_RESET_GE1_MAC BIT(13)
+#define QCA953X_RESET_ETH_SWITCH_ANALOG BIT(12)
+#define QCA953X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA953X_RESET_GE0_MAC BIT(9)
+#define QCA953X_RESET_ETH_SWITCH BIT(8)
+#define QCA953X_RESET_PCIE_PHY BIT(7)
+#define QCA953X_RESET_PCIE BIT(6)
+#define QCA953X_RESET_USB_HOST BIT(5)
+#define QCA953X_RESET_USB_PHY BIT(4)
+#define QCA953X_RESET_USBSUS_OVERRIDE BIT(3)
+
+#define QCA955X_RESET_HOST BIT(31)
+#define QCA955X_RESET_SLIC BIT(30)
+#define QCA955X_RESET_HDMA BIT(29)
+#define QCA955X_RESET_EXTERNAL BIT(28)
+#define QCA955X_RESET_RTC BIT(27)
+#define QCA955X_RESET_PCIE_EP_INT BIT(26)
+#define QCA955X_RESET_CHKSUM_ACC BIT(25)
+#define QCA955X_RESET_FULL_CHIP BIT(24)
+#define QCA955X_RESET_GE1_MDIO BIT(23)
+#define QCA955X_RESET_GE0_MDIO BIT(22)
+#define QCA955X_RESET_CPU_NMI BIT(21)
+#define QCA955X_RESET_CPU_COLD BIT(20)
+#define QCA955X_RESET_HOST_RESET_INT BIT(19)
+#define QCA955X_RESET_PCIE_EP BIT(18)
+#define QCA955X_RESET_UART1 BIT(17)
+#define QCA955X_RESET_DDR BIT(16)
+#define QCA955X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA955X_RESET_NANDF BIT(14)
+#define QCA955X_RESET_GE1_MAC BIT(13)
+#define QCA955X_RESET_SGMII_ANALOG BIT(12)
+#define QCA955X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA955X_RESET_HOST_DMA_INT BIT(10)
+#define QCA955X_RESET_GE0_MAC BIT(9)
+#define QCA955X_RESET_SGMII BIT(8)
+#define QCA955X_RESET_PCIE_PHY BIT(7)
+#define QCA955X_RESET_PCIE BIT(6)
+#define QCA955X_RESET_USB_HOST BIT(5)
+#define QCA955X_RESET_USB_PHY BIT(4)
+#define QCA955X_RESET_USBSUS_OVERRIDE BIT(3)
+#define QCA955X_RESET_LUT BIT(2)
+#define QCA955X_RESET_MBOX BIT(1)
+#define QCA955X_RESET_I2S BIT(0)
+
+#define QCA956X_RESET_EXTERNAL BIT(28)
+#define QCA956X_RESET_FULL_CHIP BIT(24)
+#define QCA956X_RESET_GE1_MDIO BIT(23)
+#define QCA956X_RESET_GE0_MDIO BIT(22)
+#define QCA956X_RESET_CPU_NMI BIT(21)
+#define QCA956X_RESET_CPU_COLD BIT(20)
+#define QCA956X_RESET_DMA BIT(19)
+#define QCA956X_RESET_DDR BIT(16)
+#define QCA956X_RESET_GE1_MAC BIT(13)
+#define QCA956X_RESET_SGMII_ANALOG BIT(12)
+#define QCA956X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA956X_RESET_GE0_MAC BIT(9)
+#define QCA956X_RESET_SGMII BIT(8)
+#define QCA956X_RESET_USB_HOST BIT(5)
+#define QCA956X_RESET_USB_PHY BIT(4)
+#define QCA956X_RESET_USBSUS_OVERRIDE BIT(3)
+#define QCA956X_RESET_SWITCH_ANALOG BIT(2)
+#define QCA956X_RESET_SWITCH BIT(0)
+
+#define AR933X_BOOTSTRAP_MDIO_GPIO_EN BIT(18)
+#define AR933X_BOOTSTRAP_EEPBUSY BIT(4)
#define AR933X_BOOTSTRAP_REF_CLK_40 BIT(0)
#define AR934X_BOOTSTRAP_SW_OPTION8 BIT(23)
@@ -398,8 +739,17 @@
#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
#define AR934X_BOOTSTRAP_DDR1 BIT(0)
+#define QCA953X_BOOTSTRAP_SW_OPTION2 BIT(12)
+#define QCA953X_BOOTSTRAP_SW_OPTION1 BIT(11)
+#define QCA953X_BOOTSTRAP_EJTAG_MODE BIT(5)
+#define QCA953X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define QCA953X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define QCA953X_BOOTSTRAP_DDR1 BIT(0)
+
#define QCA955X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define QCA956X_BOOTSTRAP_REF_CLK_40 BIT(2)
+
#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
@@ -418,6 +768,24 @@
AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
AR934X_PCIE_WMAC_INT_PCIE_RC3)
+#define QCA953X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
+#define QCA953X_PCIE_WMAC_INT_WMAC_TX BIT(1)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXHP BIT(3)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC BIT(4)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC0 BIT(5)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC1 BIT(6)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC2 BIT(7)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC3 BIT(8)
+#define QCA953X_PCIE_WMAC_INT_WMAC_ALL \
+ (QCA953X_PCIE_WMAC_INT_WMAC_MISC | QCA953X_PCIE_WMAC_INT_WMAC_TX | \
+ QCA953X_PCIE_WMAC_INT_WMAC_RXLP | QCA953X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define QCA953X_PCIE_WMAC_INT_PCIE_ALL \
+ (QCA953X_PCIE_WMAC_INT_PCIE_RC | QCA953X_PCIE_WMAC_INT_PCIE_RC0 | \
+ QCA953X_PCIE_WMAC_INT_PCIE_RC1 | QCA953X_PCIE_WMAC_INT_PCIE_RC2 | \
+ QCA953X_PCIE_WMAC_INT_PCIE_RC3)
+
#define QCA955X_EXT_INT_WMAC_MISC BIT(0)
#define QCA955X_EXT_INT_WMAC_TX BIT(1)
#define QCA955X_EXT_INT_WMAC_RXLP BIT(2)
@@ -449,6 +817,37 @@
QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \
QCA955X_EXT_INT_PCIE_RC2_INT3)
+#define QCA956X_EXT_INT_WMAC_MISC BIT(0)
+#define QCA956X_EXT_INT_WMAC_TX BIT(1)
+#define QCA956X_EXT_INT_WMAC_RXLP BIT(2)
+#define QCA956X_EXT_INT_WMAC_RXHP BIT(3)
+#define QCA956X_EXT_INT_PCIE_RC1 BIT(4)
+#define QCA956X_EXT_INT_PCIE_RC1_INT0 BIT(5)
+#define QCA956X_EXT_INT_PCIE_RC1_INT1 BIT(6)
+#define QCA956X_EXT_INT_PCIE_RC1_INT2 BIT(7)
+#define QCA956X_EXT_INT_PCIE_RC1_INT3 BIT(8)
+#define QCA956X_EXT_INT_PCIE_RC2 BIT(12)
+#define QCA956X_EXT_INT_PCIE_RC2_INT0 BIT(13)
+#define QCA956X_EXT_INT_PCIE_RC2_INT1 BIT(14)
+#define QCA956X_EXT_INT_PCIE_RC2_INT2 BIT(15)
+#define QCA956X_EXT_INT_PCIE_RC2_INT3 BIT(16)
+#define QCA956X_EXT_INT_USB1 BIT(24)
+#define QCA956X_EXT_INT_USB2 BIT(28)
+
+#define QCA956X_EXT_INT_WMAC_ALL \
+ (QCA956X_EXT_INT_WMAC_MISC | QCA956X_EXT_INT_WMAC_TX | \
+ QCA956X_EXT_INT_WMAC_RXLP | QCA956X_EXT_INT_WMAC_RXHP)
+
+#define QCA956X_EXT_INT_PCIE_RC1_ALL \
+ (QCA956X_EXT_INT_PCIE_RC1 | QCA956X_EXT_INT_PCIE_RC1_INT0 | \
+ QCA956X_EXT_INT_PCIE_RC1_INT1 | QCA956X_EXT_INT_PCIE_RC1_INT2 | \
+ QCA956X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA956X_EXT_INT_PCIE_RC2_ALL \
+ (QCA956X_EXT_INT_PCIE_RC2 | QCA956X_EXT_INT_PCIE_RC2_INT0 | \
+ QCA956X_EXT_INT_PCIE_RC2_INT1 | QCA956X_EXT_INT_PCIE_RC2_INT2 | \
+ QCA956X_EXT_INT_PCIE_RC2_INT3)
+
#define REV_ID_MAJOR_MASK 0xfff0
#define REV_ID_MAJOR_AR71XX 0x00a0
#define REV_ID_MAJOR_AR913X 0x00b0
@@ -460,8 +859,12 @@
#define REV_ID_MAJOR_AR9341 0x0120
#define REV_ID_MAJOR_AR9342 0x1120
#define REV_ID_MAJOR_AR9344 0x2120
+#define REV_ID_MAJOR_QCA9533 0x0140
+#define REV_ID_MAJOR_QCA9533_V2 0x0160
#define REV_ID_MAJOR_QCA9556 0x0130
#define REV_ID_MAJOR_QCA9558 0x1130
+#define REV_ID_MAJOR_TP9343 0x0150
+#define REV_ID_MAJOR_QCA956X 0x1150
#define AR71XX_REV_ID_MINOR_MASK 0x3
#define AR71XX_REV_ID_MINOR_AR7130 0x0
@@ -482,8 +885,12 @@
#define AR934X_REV_ID_REVISION_MASK 0xf
+#define QCA953X_REV_ID_REVISION_MASK 0xf
+
#define QCA955X_REV_ID_REVISION_MASK 0xf
+#define QCA956X_REV_ID_REVISION_MASK 0xf
+
/*
* SPI block
*/
@@ -521,15 +928,63 @@
#define AR71XX_GPIO_REG_INT_ENABLE 0x24
#define AR71XX_GPIO_REG_FUNC 0x28
+#define AR934X_GPIO_REG_OUT_FUNC0 0x2c
+#define AR934X_GPIO_REG_OUT_FUNC1 0x30
+#define AR934X_GPIO_REG_OUT_FUNC2 0x34
+#define AR934X_GPIO_REG_OUT_FUNC3 0x38
+#define AR934X_GPIO_REG_OUT_FUNC4 0x3c
+#define AR934X_GPIO_REG_OUT_FUNC5 0x40
#define AR934X_GPIO_REG_FUNC 0x6c
+#define QCA953X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA953X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA953X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA953X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA953X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA953X_GPIO_REG_IN_ENABLE0 0x44
+#define QCA953X_GPIO_REG_FUNC 0x6c
+
+#define QCA953X_GPIO_OUT_MUX_SPI_CS1 10
+#define QCA953X_GPIO_OUT_MUX_SPI_CS2 11
+#define QCA953X_GPIO_OUT_MUX_SPI_CS0 9
+#define QCA953X_GPIO_OUT_MUX_SPI_CLK 8
+#define QCA953X_GPIO_OUT_MUX_SPI_MOSI 12
+#define QCA953X_GPIO_OUT_MUX_LED_LINK1 41
+#define QCA953X_GPIO_OUT_MUX_LED_LINK2 42
+#define QCA953X_GPIO_OUT_MUX_LED_LINK3 43
+#define QCA953X_GPIO_OUT_MUX_LED_LINK4 44
+#define QCA953X_GPIO_OUT_MUX_LED_LINK5 45
+
+#define QCA955X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA955X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA955X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA955X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA955X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA955X_GPIO_REG_OUT_FUNC5 0x40
+#define QCA955X_GPIO_REG_FUNC 0x6c
+
+#define QCA956X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA956X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA956X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA956X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA956X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA956X_GPIO_REG_OUT_FUNC5 0x40
+#define QCA956X_GPIO_REG_IN_ENABLE0 0x44
+#define QCA956X_GPIO_REG_IN_ENABLE3 0x50
+#define QCA956X_GPIO_REG_FUNC 0x6c
+
+#define QCA956X_GPIO_OUT_MUX_GE0_MDO 32
+#define QCA956X_GPIO_OUT_MUX_GE0_MDC 33
+
#define AR71XX_GPIO_COUNT 16
#define AR7240_GPIO_COUNT 18
#define AR7241_GPIO_COUNT 20
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
#define AR934X_GPIO_COUNT 23
+#define QCA953X_GPIO_COUNT 18
#define QCA955X_GPIO_COUNT 24
+#define QCA956X_GPIO_COUNT 23
/*
* SRIF block
@@ -552,4 +1007,318 @@
#define AR934X_SRIF_DPLL2_OUTDIV_SHIFT 13
#define AR934X_SRIF_DPLL2_OUTDIV_MASK 0x7
+#define QCA953X_SRIF_CPU_DPLL1_REG 0x1c0
+#define QCA953X_SRIF_CPU_DPLL2_REG 0x1c4
+#define QCA953X_SRIF_CPU_DPLL3_REG 0x1c8
+
+#define QCA953X_SRIF_DDR_DPLL1_REG 0x240
+#define QCA953X_SRIF_DDR_DPLL2_REG 0x244
+#define QCA953X_SRIF_DDR_DPLL3_REG 0x248
+
+#define QCA953X_SRIF_DPLL1_REFDIV_SHIFT 27
+#define QCA953X_SRIF_DPLL1_REFDIV_MASK 0x1f
+#define QCA953X_SRIF_DPLL1_NINT_SHIFT 18
+#define QCA953X_SRIF_DPLL1_NINT_MASK 0x1ff
+#define QCA953X_SRIF_DPLL1_NFRAC_MASK 0x0003ffff
+
+#define QCA953X_SRIF_DPLL2_LOCAL_PLL BIT(30)
+#define QCA953X_SRIF_DPLL2_OUTDIV_SHIFT 13
+#define QCA953X_SRIF_DPLL2_OUTDIV_MASK 0x7
+
+#define AR71XX_GPIO_FUNC_STEREO_EN BIT(17)
+#define AR71XX_GPIO_FUNC_SLIC_EN BIT(16)
+#define AR71XX_GPIO_FUNC_SPI_CS2_EN BIT(13)
+#define AR71XX_GPIO_FUNC_SPI_CS1_EN BIT(12)
+#define AR71XX_GPIO_FUNC_UART_EN BIT(8)
+#define AR71XX_GPIO_FUNC_USB_OC_EN BIT(4)
+#define AR71XX_GPIO_FUNC_USB_CLK_EN BIT(0)
+
+#define AR724X_GPIO_FUNC_GE0_MII_CLK_EN BIT(19)
+#define AR724X_GPIO_FUNC_SPI_EN BIT(18)
+#define AR724X_GPIO_FUNC_SPI_CS_EN2 BIT(14)
+#define AR724X_GPIO_FUNC_SPI_CS_EN1 BIT(13)
+#define AR724X_GPIO_FUNC_CLK_OBS5_EN BIT(12)
+#define AR724X_GPIO_FUNC_CLK_OBS4_EN BIT(11)
+#define AR724X_GPIO_FUNC_CLK_OBS3_EN BIT(10)
+#define AR724X_GPIO_FUNC_CLK_OBS2_EN BIT(9)
+#define AR724X_GPIO_FUNC_CLK_OBS1_EN BIT(8)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED4_EN BIT(7)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED3_EN BIT(6)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED2_EN BIT(5)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED1_EN BIT(4)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED0_EN BIT(3)
+#define AR724X_GPIO_FUNC_UART_RTS_CTS_EN BIT(2)
+#define AR724X_GPIO_FUNC_UART_EN BIT(1)
+#define AR724X_GPIO_FUNC_JTAG_DISABLE BIT(0)
+
+#define AR913X_GPIO_FUNC_WMAC_LED_EN BIT(22)
+#define AR913X_GPIO_FUNC_EXP_PORT_CS_EN BIT(21)
+#define AR913X_GPIO_FUNC_I2S_REFCLKEN BIT(20)
+#define AR913X_GPIO_FUNC_I2S_MCKEN BIT(19)
+#define AR913X_GPIO_FUNC_I2S1_EN BIT(18)
+#define AR913X_GPIO_FUNC_I2S0_EN BIT(17)
+#define AR913X_GPIO_FUNC_SLIC_EN BIT(16)
+#define AR913X_GPIO_FUNC_UART_RTSCTS_EN BIT(9)
+#define AR913X_GPIO_FUNC_UART_EN BIT(8)
+#define AR913X_GPIO_FUNC_USB_CLK_EN BIT(4)
+
+#define AR933X_GPIO_FUNC_SPDIF2TCK BIT(31)
+#define AR933X_GPIO_FUNC_SPDIF_EN BIT(30)
+#define AR933X_GPIO_FUNC_I2SO_22_18_EN BIT(29)
+#define AR933X_GPIO_FUNC_I2S_MCK_EN BIT(27)
+#define AR933X_GPIO_FUNC_I2SO_EN BIT(26)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_DUPL BIT(25)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_COLL BIT(24)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_ACT BIT(23)
+#define AR933X_GPIO_FUNC_SPI_EN BIT(18)
+#define AR933X_GPIO_FUNC_SPI_CS_EN2 BIT(14)
+#define AR933X_GPIO_FUNC_SPI_CS_EN1 BIT(13)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED4_EN BIT(7)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED3_EN BIT(6)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED2_EN BIT(5)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED1_EN BIT(4)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED0_EN BIT(3)
+#define AR933X_GPIO_FUNC_UART_RTS_CTS_EN BIT(2)
+#define AR933X_GPIO_FUNC_UART_EN BIT(1)
+#define AR933X_GPIO_FUNC_JTAG_DISABLE BIT(0)
+
+#define AR934X_GPIO_FUNC_CLK_OBS7_EN BIT(9)
+#define AR934X_GPIO_FUNC_CLK_OBS6_EN BIT(8)
+#define AR934X_GPIO_FUNC_CLK_OBS5_EN BIT(7)
+#define AR934X_GPIO_FUNC_CLK_OBS4_EN BIT(6)
+#define AR934X_GPIO_FUNC_CLK_OBS3_EN BIT(5)
+#define AR934X_GPIO_FUNC_CLK_OBS2_EN BIT(4)
+#define AR934X_GPIO_FUNC_CLK_OBS1_EN BIT(3)
+#define AR934X_GPIO_FUNC_CLK_OBS0_EN BIT(2)
+#define AR934X_GPIO_FUNC_JTAG_DISABLE BIT(1)
+
+#define AR934X_GPIO_OUT_GPIO 0
+#define AR934X_GPIO_OUT_SPI_CS1 7
+#define AR934X_GPIO_OUT_LED_LINK0 41
+#define AR934X_GPIO_OUT_LED_LINK1 42
+#define AR934X_GPIO_OUT_LED_LINK2 43
+#define AR934X_GPIO_OUT_LED_LINK3 44
+#define AR934X_GPIO_OUT_LED_LINK4 45
+#define AR934X_GPIO_OUT_EXT_LNA0 46
+#define AR934X_GPIO_OUT_EXT_LNA1 47
+
+#define QCA955X_GPIO_FUNC_CLK_OBS7_EN BIT(9)
+#define QCA955X_GPIO_FUNC_CLK_OBS6_EN BIT(8)
+#define QCA955X_GPIO_FUNC_CLK_OBS5_EN BIT(7)
+#define QCA955X_GPIO_FUNC_CLK_OBS4_EN BIT(6)
+#define QCA955X_GPIO_FUNC_CLK_OBS3_EN BIT(5)
+#define QCA955X_GPIO_FUNC_CLK_OBS2_EN BIT(4)
+#define QCA955X_GPIO_FUNC_CLK_OBS1_EN BIT(3)
+#define QCA955X_GPIO_FUNC_JTAG_DISABLE BIT(1)
+
+#define QCA955X_GPIO_OUT_GPIO 0
+#define QCA955X_MII_EXT_MDI 1
+#define QCA955X_SLIC_DATA_OUT 3
+#define QCA955X_SLIC_PCM_FS 4
+#define QCA955X_SLIC_PCM_CLK 5
+#define QCA955X_SPI_CLK 8
+#define QCA955X_SPI_CS_0 9
+#define QCA955X_SPI_CS_1 10
+#define QCA955X_SPI_CS_2 11
+#define QCA955X_SPI_MISO 12
+#define QCA955X_I2S_CLK 13
+#define QCA955X_I2S_WS 14
+#define QCA955X_I2S_SD 15
+#define QCA955X_I2S_MCK 16
+#define QCA955X_SPDIF_OUT 17
+#define QCA955X_UART1_TD 18
+#define QCA955X_UART1_RTS 19
+#define QCA955X_UART1_RD 20
+#define QCA955X_UART1_CTS 21
+#define QCA955X_UART0_SOUT 22
+#define QCA955X_SPDIF2_OUT 23
+#define QCA955X_LED_SGMII_SPEED0 24
+#define QCA955X_LED_SGMII_SPEED1 25
+#define QCA955X_LED_SGMII_DUPLEX 26
+#define QCA955X_LED_SGMII_LINK_UP 27
+#define QCA955X_SGMII_SPEED0_INVERT 28
+#define QCA955X_SGMII_SPEED1_INVERT 29
+#define QCA955X_SGMII_DUPLEX_INVERT 30
+#define QCA955X_SGMII_LINK_UP_INVERT 31
+#define QCA955X_GE1_MII_MDO 32
+#define QCA955X_GE1_MII_MDC 33
+#define QCA955X_SWCOM2 38
+#define QCA955X_SWCOM3 39
+#define QCA955X_MAC2_GPIO 40
+#define QCA955X_MAC3_GPIO 41
+#define QCA955X_ATT_LED 42
+#define QCA955X_PWR_LED 43
+#define QCA955X_TX_FRAME 44
+#define QCA955X_RX_CLEAR_EXTERNAL 45
+#define QCA955X_LED_NETWORK_EN 46
+#define QCA955X_LED_POWER_EN 47
+#define QCA955X_WMAC_GLUE_WOW 68
+#define QCA955X_RX_CLEAR_EXTENSION 70
+#define QCA955X_CP_NAND_CS1 73
+#define QCA955X_USB_SUSPEND 74
+#define QCA955X_ETH_TX_ERR 75
+#define QCA955X_DDR_DQ_OE 76
+#define QCA955X_CLKREQ_N_EP 77
+#define QCA955X_CLKREQ_N_RC 78
+#define QCA955X_CLK_OBS0 79
+#define QCA955X_CLK_OBS1 80
+#define QCA955X_CLK_OBS2 81
+#define QCA955X_CLK_OBS3 82
+#define QCA955X_CLK_OBS4 83
+#define QCA955X_CLK_OBS5 84
+
+/*
+ * MII_CTRL block
+ */
+#define AR71XX_MII_REG_MII0_CTRL 0x00
+#define AR71XX_MII_REG_MII1_CTRL 0x04
+
+#define AR71XX_MII_CTRL_IF_MASK 3
+#define AR71XX_MII_CTRL_SPEED_SHIFT 4
+#define AR71XX_MII_CTRL_SPEED_MASK 3
+#define AR71XX_MII_CTRL_SPEED_10 0
+#define AR71XX_MII_CTRL_SPEED_100 1
+#define AR71XX_MII_CTRL_SPEED_1000 2
+
+#define AR71XX_MII0_CTRL_IF_GMII 0
+#define AR71XX_MII0_CTRL_IF_MII 1
+#define AR71XX_MII0_CTRL_IF_RGMII 2
+#define AR71XX_MII0_CTRL_IF_RMII 3
+
+#define AR71XX_MII1_CTRL_IF_RGMII 0
+#define AR71XX_MII1_CTRL_IF_RMII 1
+
+/*
+ * AR933X GMAC interface
+ */
+#define AR933X_GMAC_REG_ETH_CFG 0x00
+
+#define AR933X_ETH_CFG_RGMII_GE0 BIT(0)
+#define AR933X_ETH_CFG_MII_GE0 BIT(1)
+#define AR933X_ETH_CFG_GMII_GE0 BIT(2)
+#define AR933X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define AR933X_ETH_CFG_MII_GE0_SLAVE BIT(4)
+#define AR933X_ETH_CFG_MII_GE0_ERR_EN BIT(5)
+#define AR933X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define AR933X_ETH_CFG_SW_PHY_ADDR_SWAP BIT(8)
+#define AR933X_ETH_CFG_RMII_GE0 BIT(9)
+#define AR933X_ETH_CFG_RMII_GE0_SPD_10 0
+#define AR933X_ETH_CFG_RMII_GE0_SPD_100 BIT(10)
+
+/*
+ * AR934X GMAC Interface
+ */
+#define AR934X_GMAC_REG_ETH_CFG 0x00
+
+#define AR934X_ETH_CFG_RGMII_GMAC0 BIT(0)
+#define AR934X_ETH_CFG_MII_GMAC0 BIT(1)
+#define AR934X_ETH_CFG_GMII_GMAC0 BIT(2)
+#define AR934X_ETH_CFG_MII_GMAC0_MASTER BIT(3)
+#define AR934X_ETH_CFG_MII_GMAC0_SLAVE BIT(4)
+#define AR934X_ETH_CFG_MII_GMAC0_ERR_EN BIT(5)
+#define AR934X_ETH_CFG_SW_ONLY_MODE BIT(6)
+#define AR934X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define AR934X_ETH_CFG_SW_APB_ACCESS BIT(9)
+#define AR934X_ETH_CFG_RMII_GMAC0 BIT(10)
+#define AR933X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define AR934X_ETH_CFG_RMII_GMAC0_MASTER BIT(12)
+#define AR933X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+#define AR934X_ETH_CFG_RXD_DELAY BIT(14)
+#define AR934X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define AR934X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define AR934X_ETH_CFG_RDV_DELAY BIT(16)
+#define AR934X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define AR934X_ETH_CFG_RDV_DELAY_SHIFT 16
+
+/*
+ * QCA953X GMAC Interface
+ */
+#define QCA953X_GMAC_REG_ETH_CFG 0x00
+
+#define QCA953X_ETH_CFG_SW_ONLY_MODE BIT(6)
+#define QCA953X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define QCA953X_ETH_CFG_SW_APB_ACCESS BIT(9)
+#define QCA953X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+
+/*
+ * QCA955X GMAC Interface
+ */
+
+#define QCA955X_GMAC_REG_ETH_CFG 0x00
+#define QCA955X_GMAC_REG_SGMII_SERDES 0x18
+
+#define QCA955X_ETH_CFG_RGMII_EN BIT(0)
+#define QCA955X_ETH_CFG_MII_GE0 BIT(1)
+#define QCA955X_ETH_CFG_GMII_GE0 BIT(2)
+#define QCA955X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define QCA955X_ETH_CFG_MII_GE0_SLAVE BIT(4)
+#define QCA955X_ETH_CFG_GE0_ERR_EN BIT(5)
+#define QCA955X_ETH_CFG_GE0_SGMII BIT(6)
+#define QCA955X_ETH_CFG_RMII_GE0 BIT(10)
+#define QCA955X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define QCA955X_ETH_CFG_RMII_GE0_MASTER BIT(12)
+#define QCA955X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define QCA955X_ETH_CFG_RDV_DELAY BIT(16)
+#define QCA955X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RDV_DELAY_SHIFT 16
+#define QCA955X_ETH_CFG_TXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXD_DELAY_SHIFT 18
+#define QCA955X_ETH_CFG_TXE_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXE_DELAY_SHIFT 20
+
+#define QCA955X_SGMII_SERDES_LOCK_DETECT_STATUS BIT(15)
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+/*
+ * QCA956X GMAC Interface
+ */
+
+#define QCA956X_GMAC_REG_ETH_CFG 0x00
+#define QCA956X_GMAC_REG_SGMII_RESET 0x14
+#define QCA956X_GMAC_REG_SGMII_SERDES 0x18
+#define QCA956X_GMAC_REG_MR_AN_CONTROL 0x1c
+#define QCA956X_GMAC_REG_SGMII_CONFIG 0x34
+#define QCA956X_GMAC_REG_SGMII_DEBUG 0x58
+
+#define QCA956X_ETH_CFG_RGMII_EN BIT(0)
+#define QCA956X_ETH_CFG_GE0_SGMII BIT(6)
+#define QCA956X_ETH_CFG_SW_ONLY_MODE BIT(7)
+#define QCA956X_ETH_CFG_SW_PHY_SWAP BIT(8)
+#define QCA956X_ETH_CFG_SW_PHY_ADDR_SWAP BIT(9)
+#define QCA956X_ETH_CFG_SW_APB_ACCESS BIT(10)
+#define QCA956X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+#define QCA956X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA956X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define QCA956X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA956X_ETH_CFG_RDV_DELAY_SHIFT 16
+
+#define QCA956X_SGMII_RESET_RX_CLK_N_RESET 0x0
+#define QCA956X_SGMII_RESET_RX_CLK_N BIT(0)
+#define QCA956X_SGMII_RESET_TX_CLK_N BIT(1)
+#define QCA956X_SGMII_RESET_RX_125M_N BIT(2)
+#define QCA956X_SGMII_RESET_TX_125M_N BIT(3)
+#define QCA956X_SGMII_RESET_HW_RX_125M_N BIT(4)
+
+#define QCA956X_SGMII_SERDES_CDR_BW_MASK 0x3
+#define QCA956X_SGMII_SERDES_CDR_BW_SHIFT 1
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_MASK 0x7
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_SHIFT 4
+#define QCA956X_SGMII_SERDES_PLL_BW BIT(8)
+#define QCA956X_SGMII_SERDES_VCO_FAST BIT(9)
+#define QCA956X_SGMII_SERDES_VCO_SLOW BIT(10)
+#define QCA956X_SGMII_SERDES_LOCK_DETECT_STATUS BIT(15)
+#define QCA956X_SGMII_SERDES_EN_SIGNAL_DETECT BIT(16)
+#define QCA956X_SGMII_SERDES_FIBER_SDO BIT(17)
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+#define QCA956X_SGMII_SERDES_VCO_REG_SHIFT 27
+#define QCA956X_SGMII_SERDES_VCO_REG_MASK 0xf
+
+#define QCA956X_MR_AN_CONTROL_AN_ENABLE BIT(12)
+#define QCA956X_MR_AN_CONTROL_PHY_RESET BIT(15)
+
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_SHIFT 0
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_MASK 0x7
+
#endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 441faa92c3cd..73dcd63b8243 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -32,8 +32,11 @@ enum ath79_soc_type {
ATH79_SOC_AR9341,
ATH79_SOC_AR9342,
ATH79_SOC_AR9344,
+ ATH79_SOC_QCA9533,
ATH79_SOC_QCA9556,
ATH79_SOC_QCA9558,
+ ATH79_SOC_TP9343,
+ ATH79_SOC_QCA956X,
};
extern enum ath79_soc_type ath79_soc;
@@ -100,6 +103,16 @@ static inline int soc_is_ar934x(void)
return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
}
+static inline int soc_is_qca9533(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9533;
+}
+
+static inline int soc_is_qca953x(void)
+{
+ return soc_is_qca9533();
+}
+
static inline int soc_is_qca9556(void)
{
return ath79_soc == ATH79_SOC_QCA9556;
@@ -115,6 +128,26 @@ static inline int soc_is_qca955x(void)
return soc_is_qca9556() || soc_is_qca9558();
}
+static inline int soc_is_tp9343(void)
+{
+ return ath79_soc == ATH79_SOC_TP9343;
+}
+
+static inline int soc_is_qca9561(void)
+{
+ return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca9563(void)
+{
+ return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca956x(void)
+{
+ return soc_is_qca9561() || soc_is_qca9563();
+}
+
void ath79_ddr_wb_flush(unsigned int reg);
void ath79_ddr_set_pci_windows(void);
@@ -134,6 +167,7 @@ static inline u32 ath79_pll_rr(unsigned reg)
static inline void ath79_reset_wr(unsigned reg, u32 val)
{
__raw_writel(val, ath79_reset_base + reg);
+ (void) __raw_readl(ath79_reset_base + reg); /* flush */
}
static inline u32 ath79_reset_rr(unsigned reg)
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index 0089a740e5ae..026ad90c8ac0 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -36,6 +36,7 @@
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
#define cpu_has_mips32r1 1
#define cpu_has_mips32r2 1
@@ -43,6 +44,7 @@
#define cpu_has_mips64r2 0
#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
#define cpu_has_64bits 0
#define cpu_has_64bit_zero_reg 0
@@ -51,5 +53,9 @@
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 1
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
#endif /* __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bmips/dma-coherence.h b/arch/mips/include/asm/mach-bmips/dma-coherence.h
deleted file mode 100644
index d29781f02285..000000000000
--- a/arch/mips/include/asm/mach-bmips/dma-coherence.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2009 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_MACH_BMIPS_DMA_COHERENCE_H
-#define __ASM_MACH_BMIPS_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-#include <asm/cpu-type.h>
-#include <asm/cpu.h>
-
-struct device;
-
-extern dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size);
-extern dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page);
-extern unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr);
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0;
-}
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#endif /* __ASM_MACH_BMIPS_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
deleted file mode 100644
index 6eb1ee548b11..000000000000
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- *
- * Similar to mach-generic/dma-coherence.h except
- * plat_device_is_coherent hard coded to return 1.
- *
- */
-#ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
-#define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
-
-#include <linux/bug.h>
-
-struct device;
-
-extern void octeon_pci_dma_init(void);
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- BUG();
- return 0;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- BUG();
- return 0;
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- BUG();
- return 0;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- BUG();
- return 0;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-
-struct dma_map_ops;
-extern const struct dma_map_ops *octeon_pci_dma_map_ops;
-extern char *octeon_swiotlb;
-
-#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
deleted file mode 100644
index 8ad7a40ca786..000000000000
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_GENERIC_DMA_COHERENCE_H
-#define __ASM_MACH_GENERIC_DMA_COHERENCE_H
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- return virt_to_phys(addr);
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- return page_to_phys(page);
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return dma_addr;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_PERDEV_COHERENT
- return dev->archdata.dma_coherent;
-#else
- switch (coherentio) {
- default:
- case IO_COHERENCE_DEFAULT:
- return hw_coherentio;
- case IO_COHERENCE_ENABLED:
- return 1;
- case IO_COHERENCE_DISABLED:
- return 0;
- }
-#endif
-}
-
-#ifndef plat_post_dma_flush
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-#endif
-
-#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-generic/kmalloc.h b/arch/mips/include/asm/mach-generic/kmalloc.h
index 74207c7bd00d..649a98338886 100644
--- a/arch/mips/include/asm/mach-generic/kmalloc.h
+++ b/arch/mips/include/asm/mach-generic/kmalloc.h
@@ -2,8 +2,7 @@
#ifndef __ASM_MACH_GENERIC_KMALLOC_H
#define __ASM_MACH_GENERIC_KMALLOC_H
-
-#ifndef CONFIG_DMA_COHERENT
+#ifdef CONFIG_DMA_NONCOHERENT
/*
* Total overkill for most systems but need as a safe default.
* Set this one if any device in the system might do non-coherent DMA.
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 952b0fdfda0e..ee5ebe98f6cf 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -17,9 +17,13 @@
/*
* This gives the physical RAM offset.
*/
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET _AC(0, UL)
-#endif
+#ifndef __ASSEMBLY__
+# if defined(CONFIG_MIPS_AUTO_PFN_OFFSET)
+# define PHYS_OFFSET ((unsigned long)PFN_PHYS(ARCH_PFN_OFFSET))
+# elif !defined(PHYS_OFFSET)
+# define PHYS_OFFSET _AC(0, UL)
+# endif
+#endif /* __ASSEMBLY__ */
#ifdef CONFIG_32BIT
#ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
deleted file mode 100644
index 04d862020ac9..000000000000
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_IP27_DMA_COHERENCE_H
-#define __ASM_MACH_IP27_DMA_COHERENCE_H
-
-#include <asm/pci/bridge.h>
-
-#define pdev_to_baddr(pdev, addr) \
- (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
-#define dev_to_baddr(dev, addr) \
- pdev_to_baddr(to_pci_dev(dev), (addr))
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr));
-
- return pa;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
-
- return pa;
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return dma_addr & ~(0xffUL << 56);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 1; /* IP27 non-coherent mode is unsupported */
-}
-
-#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
deleted file mode 100644
index 7bdf212587a0..000000000000
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_IP32_DMA_COHERENCE_H
-#define __ASM_MACH_IP32_DMA_COHERENCE_H
-
-#include <asm/ip32/crime.h>
-
-struct device;
-
-/*
- * Few notes.
- * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
- * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
- * native-endian)
- * 3. All other devices see memory as one big chunk at 0x40000000
- * 4. Non-PCI devices will pass NULL as struct device*
- *
- * Thus we translate differently, depending on device.
- */
-
-#define RAM_OFFSET_MASK 0x3fffffffUL
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
-
- if (dev == NULL)
- pa += CRIME_HI_MEM_BASE;
-
- return pa;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- dma_addr_t pa;
-
- pa = page_to_phys(page) & RAM_OFFSET_MASK;
-
- if (dev == NULL)
- pa += CRIME_HI_MEM_BASE;
-
- return pa;
-}
-
-/* This is almost certainly wrong but it's what dma-ip32.c used to use */
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- unsigned long addr = dma_addr & RAM_OFFSET_MASK;
-
- if (dma_addr >= 256*1024*1024)
- addr += CRIME_HI_MEM_BASE;
-
- return addr;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0; /* IP32 is non-coherent */
-}
-
-#endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
deleted file mode 100644
index dc347c25c343..000000000000
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MACH_JAZZ_DMA_COHERENCE_H
-#define __ASM_MACH_JAZZ_DMA_COHERENCE_H
-
-#include <asm/jazzdma.h>
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return vdma_alloc(virt_to_phys(addr), size);
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- return vdma_alloc(page_to_phys(page), PAGE_SIZE);
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return vdma_log2phys(dma_addr);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
- vdma_free(dma_addr);
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0;
-}
-
-#endif /* __ASM_MACH_JAZZ_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
deleted file mode 100644
index 64fc44dec0a8..000000000000
--- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006, 07 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
- * Author: Fuxin Zhang, zhangfx@lemote.com
- *
- */
-#ifndef __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
-#define __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
-
-#ifdef CONFIG_SWIOTLB
-#include <linux/swiotlb.h>
-#endif
-
-struct device;
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-extern dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-extern phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
-#ifdef CONFIG_CPU_LOONGSON3
- return __phys_to_dma(dev, virt_to_phys(addr));
-#else
- return virt_to_phys(addr) | 0x80000000;
-#endif
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
-#ifdef CONFIG_CPU_LOONGSON3
- return __phys_to_dma(dev, page_to_phys(page));
-#else
- return page_to_phys(page) | 0x80000000;
-#endif
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
-#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
- return __dma_to_phys(dev, dma_addr);
-#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
- return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
-#else
- return dma_addr & 0x7fffffff;
-#endif
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_NONCOHERENT
- return 0;
-#else
- return 1;
-#endif /* CONFIG_DMA_NONCOHERENT */
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-#endif /* __ASM_MACH_LOONGSON64_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index 8393bc548987..312739117bb0 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -19,18 +19,18 @@
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
- mfc0 t0, $16, 3
+ mfc0 t0, CP0_CONFIG3
or t0, (0x1 << 7)
- mtc0 t0, $16, 3
+ mtc0 t0, CP0_CONFIG3
/* Set ELPA on LOONGSON3 pagegrain */
- mfc0 t0, $5, 1
+ mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
- mtc0 t0, $5, 1
+ mtc0 t0, CP0_PAGEGRAIN
#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
- mfc0 t0, $16, 6
+ mfc0 t0, CP0_CONFIG6
or t0, 0x100
- mtc0 t0, $16, 6
+ mtc0 t0, CP0_CONFIG6
#endif
_ehb
.set pop
@@ -45,18 +45,18 @@
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
- mfc0 t0, $16, 3
+ mfc0 t0, CP0_CONFIG3
or t0, (0x1 << 7)
- mtc0 t0, $16, 3
+ mtc0 t0, CP0_CONFIG3
/* Set ELPA on LOONGSON3 pagegrain */
- mfc0 t0, $5, 1
+ mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
- mtc0 t0, $5, 1
+ mtc0 t0, CP0_PAGEGRAIN
#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
- mfc0 t0, $16, 6
+ mfc0 t0, CP0_CONFIG6
or t0, 0x100
- mtc0 t0, $16, 6
+ mtc0 t0, CP0_CONFIG6
#endif
_ehb
.set pop
diff --git a/arch/mips/include/asm/mach-pic32/spaces.h b/arch/mips/include/asm/mach-pic32/spaces.h
index 046a0a9aa8b3..a1b9783b76ea 100644
--- a/arch/mips/include/asm/mach-pic32/spaces.h
+++ b/arch/mips/include/asm/mach-pic32/spaces.h
@@ -16,7 +16,6 @@
#ifdef CONFIG_PIC32MZDA
#define PHYS_OFFSET _AC(0x08000000, UL)
-#define UNCAC_BASE _AC(0xa8000000, UL)
#endif
#include <asm/mach-generic/spaces.h>
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index ae461d91cd1f..01df9ad62fb8 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -16,6 +16,7 @@
#include <linux/linkage.h>
#include <linux/types.h>
#include <asm/hazards.h>
+#include <asm/isa-rev.h>
#include <asm/war.h>
/*
@@ -51,6 +52,7 @@
#define CP0_GLOBALNUMBER $3, 1
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
+#define CP0_PAGEGRAIN $5, 1
#define CP0_SEGCTL0 $5, 2
#define CP0_SEGCTL1 $5, 3
#define CP0_SEGCTL2 $5, 4
@@ -77,6 +79,7 @@
#define CP0_CONFIG $16
#define CP0_CONFIG3 $16, 3
#define CP0_CONFIG5 $16, 5
+#define CP0_CONFIG6 $16, 6
#define CP0_LLADDR $17
#define CP0_WATCHLO $18
#define CP0_WATCHHI $19
@@ -1481,32 +1484,38 @@ do { \
#define __write_64bit_c0_split(source, sel, val) \
do { \
- unsigned long long __tmp; \
+ unsigned long long __tmp = (val); \
unsigned long __flags; \
\
local_irq_save(__flags); \
- if (sel == 0) \
+ if (MIPS_ISA_REV >= 2) \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\t" MIPS_ISA_LEVEL "\n\t" \
+ "dins\t%L0, %M0, 32, 32\n\t" \
+ "dmtc0\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
+ else if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dsll\t%L0, %L1, 32\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
- "dsll\t%M0, %M1, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source "\n\t" \
".set\tmips0" \
- : "=&r,r" (__tmp) \
- : "r,0" (val)); \
+ : "+r" (__tmp)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dsll\t%L0, %L1, 32\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
- "dsll\t%M0, %M1, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source ", " #sel "\n\t" \
".set\tmips0" \
- : "=&r,r" (__tmp) \
- : "r,0" (val)); \
+ : "+r" (__tmp)); \
local_irq_restore(__flags); \
} while (0)
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index da2004cef2d5..b509371a6b0c 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -126,8 +126,6 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
for_each_possible_cpu(i)
cpu_context(i, mm) = 0;
- atomic_set(&mm->context.fp_mode_switching, 0);
-
mm->context.bd_emupage_allocmap = NULL;
spin_lock_init(&mm->context.bd_emupage_lock);
init_waitqueue_head(&mm->context.bd_emupage_queue);
diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h
index 5604db3d1836..d79c68fa78d9 100644
--- a/arch/mips/include/asm/netlogic/xlr/fmn.h
+++ b/arch/mips/include/asm/netlogic/xlr/fmn.h
@@ -301,8 +301,6 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code,
for (i = 0; i < 8; i++) {
nlm_msgsnd(dest);
status = nlm_read_c2_status0();
- if ((status & 0x2) == 1)
- pr_info("Send pending fail!\n");
if ((status & 0x4) == 0)
return 0;
}
diff --git a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
index a1e21a3854cf..1eef155979f3 100644
--- a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -55,6 +55,8 @@
#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_asxx_enable(int block);
+
union cvmx_asxx_gmii_rx_clk_set {
uint64_t u64;
struct cvmx_asxx_gmii_rx_clk_set_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
index 6e61792d9248..1d18be8cdddc 100644
--- a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
@@ -1,10014 +1,176 @@
-/***********************license start***************
- * Author: Cavium Networks
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Octeon CIU definitions
*
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ */
#ifndef __CVMX_CIU_DEFS_H__
#define __CVMX_CIU_DEFS_H__
-#define CVMX_CIU_BIST (CVMX_ADD_IO_SEG(0x0001070000000730ull))
-#define CVMX_CIU_BLOCK_INT (CVMX_ADD_IO_SEG(0x00010700000007C0ull))
-#define CVMX_CIU_DINT (CVMX_ADD_IO_SEG(0x0001070000000720ull))
-#define CVMX_CIU_EN2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x000107000000A600ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_IOX_INT_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CE00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_IOX_INT_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AE00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x000107000000A000ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP2_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000C800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP2_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000A800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x000107000000A200ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CA00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AA00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x000107000000A400ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CC00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AC00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_FUSE (CVMX_ADD_IO_SEG(0x0001070000000728ull))
-#define CVMX_CIU_GSTOP (CVMX_ADD_IO_SEG(0x0001070000000710ull))
-#define CVMX_CIU_INT33_SUM0 (CVMX_ADD_IO_SEG(0x0001070000000110ull))
-#define CVMX_CIU_INTX_EN0(offset) (CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1(offset) (CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN4_0(offset) (CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1(offset) (CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_SUM0(offset) (CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8)
-#define CVMX_CIU_INTX_SUM4(offset) (CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_INT_DBG_SEL (CVMX_ADD_IO_SEG(0x00010700000007D0ull))
-#define CVMX_CIU_INT_SUM1 (CVMX_ADD_IO_SEG(0x0001070000000108ull))
-static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned long offset)
+#include <asm/bitfield.h>
+
+#define CVMX_CIU_ADDR(addr, coreid, coremask, offset) \
+ (CVMX_ADD_IO_SEG(0x0001070000000000ull + addr##ull) + \
+ (((coreid) & (coremask)) * offset))
+
+#define CVMX_CIU_EN2_PPX_IP4(c) CVMX_CIU_ADDR(0xA400, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1C(c) CVMX_CIU_ADDR(0xCC00, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1S(c) CVMX_CIU_ADDR(0xAC00, c, 0x0F, 8)
+#define CVMX_CIU_FUSE CVMX_CIU_ADDR(0x0728, 0, 0x00, 0)
+#define CVMX_CIU_INT_SUM1 CVMX_CIU_ADDR(0x0108, 0, 0x00, 0)
+#define CVMX_CIU_INTX_EN0(c) CVMX_CIU_ADDR(0x0200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1C(c) CVMX_CIU_ADDR(0x2200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1S(c) CVMX_CIU_ADDR(0x6200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1(c) CVMX_CIU_ADDR(0x0208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1C(c) CVMX_CIU_ADDR(0x2208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1S(c) CVMX_CIU_ADDR(0x6208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_SUM0(c) CVMX_CIU_ADDR(0x0000, c, 0x3F, 8)
+#define CVMX_CIU_NMI CVMX_CIU_ADDR(0x0718, 0, 0x00, 0)
+#define CVMX_CIU_PCI_INTA CVMX_CIU_ADDR(0x0750, 0, 0x00, 0)
+#define CVMX_CIU_PP_BIST_STAT CVMX_CIU_ADDR(0x07E0, 0, 0x00, 0)
+#define CVMX_CIU_PP_DBG CVMX_CIU_ADDR(0x0708, 0, 0x00, 0)
+#define CVMX_CIU_PP_RST CVMX_CIU_ADDR(0x0700, 0, 0x00, 0)
+#define CVMX_CIU_QLM0 CVMX_CIU_ADDR(0x0780, 0, 0x00, 0)
+#define CVMX_CIU_QLM1 CVMX_CIU_ADDR(0x0788, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGC CVMX_CIU_ADDR(0x0768, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGD CVMX_CIU_ADDR(0x0770, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_BIST CVMX_CIU_ADDR(0x0738, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST1 CVMX_CIU_ADDR(0x0758, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST CVMX_CIU_ADDR(0x0748, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_RST CVMX_CIU_ADDR(0x0740, 0, 0x00, 0)
+#define CVMX_CIU_SUM2_PPX_IP4(c) CVMX_CIU_ADDR(0x8C00, c, 0x0F, 8)
+#define CVMX_CIU_TIM_MULTI_CAST CVMX_CIU_ADDR(0xC200, 0, 0x00, 0)
+#define CVMX_CIU_TIMX(c) CVMX_CIU_ADDR(0x0480, c, 0x0F, 8)
+
+static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned int coreid)
{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100600ull) + (offset) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
+ if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+ return CVMX_CIU_ADDR(0x100100600, coreid, 0x0F, 8);
+ else
+ return CVMX_CIU_ADDR(0x000000680, coreid, 0x0F, 8);
}
-static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned long offset)
+static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned int coreid)
{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100400ull) + (offset) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
+ if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+ return CVMX_CIU_ADDR(0x100100400, coreid, 0x0F, 8);
+ else
+ return CVMX_CIU_ADDR(0x000000600, coreid, 0x0F, 8);
}
-#define CVMX_CIU_NMI (CVMX_ADD_IO_SEG(0x0001070000000718ull))
-#define CVMX_CIU_PCI_INTA (CVMX_ADD_IO_SEG(0x0001070000000750ull))
-#define CVMX_CIU_PP_BIST_STAT (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
-#define CVMX_CIU_PP_DBG (CVMX_ADD_IO_SEG(0x0001070000000708ull))
-static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset)
+static inline uint64_t CVMX_CIU_PP_POKEX(unsigned int coreid)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100200ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x100100200, coreid, 0x0F, 8);
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001010000030000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x000030000, coreid, 0x0F, 8) -
+ 0x60000000000ull;
+ default:
+ return CVMX_CIU_ADDR(0x000000580, coreid, 0x0F, 8);
}
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
}
-#define CVMX_CIU_PP_RST (CVMX_ADD_IO_SEG(0x0001070000000700ull))
-#define CVMX_CIU_QLM0 (CVMX_ADD_IO_SEG(0x0001070000000780ull))
-#define CVMX_CIU_QLM1 (CVMX_ADD_IO_SEG(0x0001070000000788ull))
-#define CVMX_CIU_QLM2 (CVMX_ADD_IO_SEG(0x0001070000000790ull))
-#define CVMX_CIU_QLM3 (CVMX_ADD_IO_SEG(0x0001070000000798ull))
-#define CVMX_CIU_QLM4 (CVMX_ADD_IO_SEG(0x00010700000007A0ull))
-#define CVMX_CIU_QLM_DCOK (CVMX_ADD_IO_SEG(0x0001070000000760ull))
-#define CVMX_CIU_QLM_JTGC (CVMX_ADD_IO_SEG(0x0001070000000768ull))
-#define CVMX_CIU_QLM_JTGD (CVMX_ADD_IO_SEG(0x0001070000000770ull))
-#define CVMX_CIU_SOFT_BIST (CVMX_ADD_IO_SEG(0x0001070000000738ull))
-#define CVMX_CIU_SOFT_PRST (CVMX_ADD_IO_SEG(0x0001070000000748ull))
-#define CVMX_CIU_SOFT_PRST1 (CVMX_ADD_IO_SEG(0x0001070000000758ull))
-#define CVMX_CIU_SOFT_PRST2 (CVMX_ADD_IO_SEG(0x00010700000007D8ull))
-#define CVMX_CIU_SOFT_PRST3 (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
-#define CVMX_CIU_SOFT_RST (CVMX_ADD_IO_SEG(0x0001070000000740ull))
-#define CVMX_CIU_SUM1_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008600ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_SUM1_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008000ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM1_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008200ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM1_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008400ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008E00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_SUM2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008A00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008C00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_TIM_MULTI_CAST (CVMX_ADD_IO_SEG(0x000107000000C200ull))
-static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset)
+static inline uint64_t CVMX_CIU_WDOGX(unsigned int coreid)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x100100000, coreid, 0x0F, 8);
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001010000020000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x000020000, coreid, 0x0F, 8) -
+ 0x60000000000ull;
+ default:
+ return CVMX_CIU_ADDR(0x000000500, coreid, 0x0F, 8);
}
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
}
-union cvmx_ciu_bist {
- uint64_t u64;
- struct cvmx_ciu_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_7_63:57;
- uint64_t bist:7;
-#else
- uint64_t bist:7;
- uint64_t reserved_7_63:57;
-#endif
- } s;
- struct cvmx_ciu_bist_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t bist:4;
-#else
- uint64_t bist:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn30xx;
- struct cvmx_ciu_bist_cn30xx cn31xx;
- struct cvmx_ciu_bist_cn30xx cn38xx;
- struct cvmx_ciu_bist_cn30xx cn38xxp2;
- struct cvmx_ciu_bist_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t bist:2;
-#else
- uint64_t bist:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn50xx;
- struct cvmx_ciu_bist_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t bist:3;
-#else
- uint64_t bist:3;
- uint64_t reserved_3_63:61;
-#endif
- } cn52xx;
- struct cvmx_ciu_bist_cn52xx cn52xxp1;
- struct cvmx_ciu_bist_cn30xx cn56xx;
- struct cvmx_ciu_bist_cn30xx cn56xxp1;
- struct cvmx_ciu_bist_cn30xx cn58xx;
- struct cvmx_ciu_bist_cn30xx cn58xxp1;
- struct cvmx_ciu_bist_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t bist:6;
-#else
- uint64_t bist:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn61xx;
- struct cvmx_ciu_bist_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t bist:5;
-#else
- uint64_t bist:5;
- uint64_t reserved_5_63:59;
-#endif
- } cn63xx;
- struct cvmx_ciu_bist_cn63xx cn63xxp1;
- struct cvmx_ciu_bist_cn61xx cn66xx;
- struct cvmx_ciu_bist_s cn68xx;
- struct cvmx_ciu_bist_s cn68xxp1;
- struct cvmx_ciu_bist_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_block_int {
- uint64_t u64;
- struct cvmx_ciu_block_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_43_59:17;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_34_39:6;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t reserved_34_39:6;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_59:17;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_ciu_block_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t reserved_31_40:10;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_40:10;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn61xx;
- struct cvmx_ciu_block_int_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_34_39:6;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_23_24:2;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t reserved_2_2:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t reserved_2_2:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_23_24:2;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t reserved_34_39:6;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn63xx;
- struct cvmx_ciu_block_int_cn63xx cn63xxp1;
- struct cvmx_ciu_block_int_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_43_59:17;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_33_39:7;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t reserved_33_39:7;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_59:17;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn66xx;
- struct cvmx_ciu_block_int_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t reserved_31_40:10;
- uint64_t iob:1;
- uint64_t reserved_27_29:3;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_23_24:2;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_6_8:3;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t reserved_2_2:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t reserved_2_2:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t reserved_6_8:3;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_23_24:2;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_29:3;
- uint64_t iob:1;
- uint64_t reserved_31_40:10;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_dint {
- uint64_t u64;
- struct cvmx_ciu_dint_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t dint:32;
-#else
- uint64_t dint:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_dint_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t dint:1;
-#else
- uint64_t dint:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_dint_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t dint:2;
-#else
- uint64_t dint:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_dint_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t dint:16;
-#else
- uint64_t dint:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_dint_cn38xx cn38xxp2;
- struct cvmx_ciu_dint_cn31xx cn50xx;
- struct cvmx_ciu_dint_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t dint:4;
-#else
- uint64_t dint:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_dint_cn52xx cn52xxp1;
- struct cvmx_ciu_dint_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t dint:12;
-#else
- uint64_t dint:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_dint_cn56xx cn56xxp1;
- struct cvmx_ciu_dint_cn38xx cn58xx;
- struct cvmx_ciu_dint_cn38xx cn58xxp1;
- struct cvmx_ciu_dint_cn52xx cn61xx;
- struct cvmx_ciu_dint_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t dint:6;
-#else
- uint64_t dint:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_dint_cn63xx cn63xxp1;
- struct cvmx_ciu_dint_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t dint:10;
-#else
- uint64_t dint:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_dint_s cn68xx;
- struct cvmx_ciu_dint_s cn68xxp1;
- struct cvmx_ciu_dint_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_s cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_fuse {
- uint64_t u64;
- struct cvmx_ciu_fuse_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t fuse:32;
-#else
- uint64_t fuse:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_fuse_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t fuse:1;
-#else
- uint64_t fuse:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_fuse_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t fuse:2;
-#else
- uint64_t fuse:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_fuse_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t fuse:16;
-#else
- uint64_t fuse:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_fuse_cn38xx cn38xxp2;
- struct cvmx_ciu_fuse_cn31xx cn50xx;
- struct cvmx_ciu_fuse_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t fuse:4;
-#else
- uint64_t fuse:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_fuse_cn52xx cn52xxp1;
- struct cvmx_ciu_fuse_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t fuse:12;
-#else
- uint64_t fuse:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_fuse_cn56xx cn56xxp1;
- struct cvmx_ciu_fuse_cn38xx cn58xx;
- struct cvmx_ciu_fuse_cn38xx cn58xxp1;
- struct cvmx_ciu_fuse_cn52xx cn61xx;
- struct cvmx_ciu_fuse_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t fuse:6;
-#else
- uint64_t fuse:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_fuse_cn63xx cn63xxp1;
- struct cvmx_ciu_fuse_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t fuse:10;
-#else
- uint64_t fuse:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_fuse_s cn68xx;
- struct cvmx_ciu_fuse_s cn68xxp1;
- struct cvmx_ciu_fuse_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_gstop {
- uint64_t u64;
- struct cvmx_ciu_gstop_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t gstop:1;
-#else
- uint64_t gstop:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_gstop_s cn30xx;
- struct cvmx_ciu_gstop_s cn31xx;
- struct cvmx_ciu_gstop_s cn38xx;
- struct cvmx_ciu_gstop_s cn38xxp2;
- struct cvmx_ciu_gstop_s cn50xx;
- struct cvmx_ciu_gstop_s cn52xx;
- struct cvmx_ciu_gstop_s cn52xxp1;
- struct cvmx_ciu_gstop_s cn56xx;
- struct cvmx_ciu_gstop_s cn56xxp1;
- struct cvmx_ciu_gstop_s cn58xx;
- struct cvmx_ciu_gstop_s cn58xxp1;
- struct cvmx_ciu_gstop_s cn61xx;
- struct cvmx_ciu_gstop_s cn63xx;
- struct cvmx_ciu_gstop_s cn63xxp1;
- struct cvmx_ciu_gstop_s cn66xx;
- struct cvmx_ciu_gstop_s cn68xx;
- struct cvmx_ciu_gstop_s cn68xxp1;
- struct cvmx_ciu_gstop_s cnf71xx;
-};
-
-union cvmx_ciu_intx_en0 {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_en0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_en0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_en0_cn30xx cn50xx;
- struct cvmx_ciu_intx_en0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_en0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en0_cn38xx cn58xx;
- struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_en0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en0_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en0_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en0_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en0_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1 {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t wdog:1;
-#else
- uint64_t wdog:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_en1_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_en1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_en1_cn31xx cn50xx;
- struct cvmx_ciu_intx_en1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_intx_en1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en1_cn38xx cn58xx;
- struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_en1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en1_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_w1c_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_w1c_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en1_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_w1s_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_w1s_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0 {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_en4_0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_en4_0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en4_0_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_en4_0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1 {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_en4_1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_intx_en4_1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en4_1_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_en4_1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_sum0 {
- uint64_t u64;
- struct cvmx_ciu_intx_sum0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_sum0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_sum0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_sum0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_sum0_cn30xx cn50xx;
- struct cvmx_ciu_intx_sum0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_sum0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
- struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_sum0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn63xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_sum0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_sum0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_sum4 {
- uint64_t u64;
- struct cvmx_ciu_intx_sum4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_sum4_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_sum4_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_sum4_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_sum4_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_sum4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn63xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_sum4_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_sum4_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_int33_sum0 {
- uint64_t u64;
- struct cvmx_ciu_int33_sum0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_int33_sum0_s cn61xx;
- struct cvmx_ciu_int33_sum0_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_int33_sum0_cn63xx cn63xxp1;
- struct cvmx_ciu_int33_sum0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_int33_sum0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_int_dbg_sel {
- uint64_t u64;
- struct cvmx_ciu_int_dbg_sel_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_5_7:3;
- uint64_t pp:5;
-#else
- uint64_t pp:5;
- uint64_t reserved_5_7:3;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } s;
- struct cvmx_ciu_int_dbg_sel_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_4_7:4;
- uint64_t pp:4;
-#else
- uint64_t pp:4;
- uint64_t reserved_4_7:4;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } cn61xx;
- struct cvmx_ciu_int_dbg_sel_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_3_7:5;
- uint64_t pp:3;
-#else
- uint64_t pp:3;
- uint64_t reserved_3_7:5;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } cn63xx;
- struct cvmx_ciu_int_dbg_sel_cn61xx cn66xx;
- struct cvmx_ciu_int_dbg_sel_s cn68xx;
- struct cvmx_ciu_int_dbg_sel_s cn68xxp1;
- struct cvmx_ciu_int_dbg_sel_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_int_sum1 {
- uint64_t u64;
- struct cvmx_ciu_int_sum1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_int_sum1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t wdog:1;
-#else
- uint64_t wdog:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_int_sum1_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_int_sum1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_int_sum1_cn38xx cn38xxp2;
- struct cvmx_ciu_int_sum1_cn31xx cn50xx;
- struct cvmx_ciu_int_sum1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_int_sum1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_int_sum1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
- struct cvmx_ciu_int_sum1_cn38xx cn58xx;
- struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
- struct cvmx_ciu_int_sum1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_int_sum1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_int_sum1_cn63xx cn63xxp1;
- struct cvmx_ciu_int_sum1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_int_sum1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_37_46:10;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_46:10;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_mbox_clrx {
- uint64_t u64;
- struct cvmx_ciu_mbox_clrx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bits:32;
-#else
- uint64_t bits:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_mbox_clrx_s cn30xx;
- struct cvmx_ciu_mbox_clrx_s cn31xx;
- struct cvmx_ciu_mbox_clrx_s cn38xx;
- struct cvmx_ciu_mbox_clrx_s cn38xxp2;
- struct cvmx_ciu_mbox_clrx_s cn50xx;
- struct cvmx_ciu_mbox_clrx_s cn52xx;
- struct cvmx_ciu_mbox_clrx_s cn52xxp1;
- struct cvmx_ciu_mbox_clrx_s cn56xx;
- struct cvmx_ciu_mbox_clrx_s cn56xxp1;
- struct cvmx_ciu_mbox_clrx_s cn58xx;
- struct cvmx_ciu_mbox_clrx_s cn58xxp1;
- struct cvmx_ciu_mbox_clrx_s cn61xx;
- struct cvmx_ciu_mbox_clrx_s cn63xx;
- struct cvmx_ciu_mbox_clrx_s cn63xxp1;
- struct cvmx_ciu_mbox_clrx_s cn66xx;
- struct cvmx_ciu_mbox_clrx_s cn68xx;
- struct cvmx_ciu_mbox_clrx_s cn68xxp1;
- struct cvmx_ciu_mbox_clrx_s cnf71xx;
-};
-
-union cvmx_ciu_mbox_setx {
- uint64_t u64;
- struct cvmx_ciu_mbox_setx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bits:32;
-#else
- uint64_t bits:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_mbox_setx_s cn30xx;
- struct cvmx_ciu_mbox_setx_s cn31xx;
- struct cvmx_ciu_mbox_setx_s cn38xx;
- struct cvmx_ciu_mbox_setx_s cn38xxp2;
- struct cvmx_ciu_mbox_setx_s cn50xx;
- struct cvmx_ciu_mbox_setx_s cn52xx;
- struct cvmx_ciu_mbox_setx_s cn52xxp1;
- struct cvmx_ciu_mbox_setx_s cn56xx;
- struct cvmx_ciu_mbox_setx_s cn56xxp1;
- struct cvmx_ciu_mbox_setx_s cn58xx;
- struct cvmx_ciu_mbox_setx_s cn58xxp1;
- struct cvmx_ciu_mbox_setx_s cn61xx;
- struct cvmx_ciu_mbox_setx_s cn63xx;
- struct cvmx_ciu_mbox_setx_s cn63xxp1;
- struct cvmx_ciu_mbox_setx_s cn66xx;
- struct cvmx_ciu_mbox_setx_s cn68xx;
- struct cvmx_ciu_mbox_setx_s cn68xxp1;
- struct cvmx_ciu_mbox_setx_s cnf71xx;
-};
-
-union cvmx_ciu_nmi {
- uint64_t u64;
- struct cvmx_ciu_nmi_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t nmi:32;
-#else
- uint64_t nmi:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_nmi_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t nmi:1;
-#else
- uint64_t nmi:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_nmi_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t nmi:2;
-#else
- uint64_t nmi:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_nmi_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t nmi:16;
-#else
- uint64_t nmi:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_nmi_cn38xx cn38xxp2;
- struct cvmx_ciu_nmi_cn31xx cn50xx;
- struct cvmx_ciu_nmi_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t nmi:4;
-#else
- uint64_t nmi:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_nmi_cn52xx cn52xxp1;
- struct cvmx_ciu_nmi_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t nmi:12;
-#else
- uint64_t nmi:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_nmi_cn56xx cn56xxp1;
- struct cvmx_ciu_nmi_cn38xx cn58xx;
- struct cvmx_ciu_nmi_cn38xx cn58xxp1;
- struct cvmx_ciu_nmi_cn52xx cn61xx;
- struct cvmx_ciu_nmi_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t nmi:6;
-#else
- uint64_t nmi:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_nmi_cn63xx cn63xxp1;
- struct cvmx_ciu_nmi_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t nmi:10;
-#else
- uint64_t nmi:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_nmi_s cn68xx;
- struct cvmx_ciu_nmi_s cn68xxp1;
- struct cvmx_ciu_nmi_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_pci_inta {
- uint64_t u64;
- struct cvmx_ciu_pci_inta_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t intr:2;
-#else
- uint64_t intr:2;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_ciu_pci_inta_s cn30xx;
- struct cvmx_ciu_pci_inta_s cn31xx;
- struct cvmx_ciu_pci_inta_s cn38xx;
- struct cvmx_ciu_pci_inta_s cn38xxp2;
- struct cvmx_ciu_pci_inta_s cn50xx;
- struct cvmx_ciu_pci_inta_s cn52xx;
- struct cvmx_ciu_pci_inta_s cn52xxp1;
- struct cvmx_ciu_pci_inta_s cn56xx;
- struct cvmx_ciu_pci_inta_s cn56xxp1;
- struct cvmx_ciu_pci_inta_s cn58xx;
- struct cvmx_ciu_pci_inta_s cn58xxp1;
- struct cvmx_ciu_pci_inta_s cn61xx;
- struct cvmx_ciu_pci_inta_s cn63xx;
- struct cvmx_ciu_pci_inta_s cn63xxp1;
- struct cvmx_ciu_pci_inta_s cn66xx;
- struct cvmx_ciu_pci_inta_s cn68xx;
- struct cvmx_ciu_pci_inta_s cn68xxp1;
- struct cvmx_ciu_pci_inta_s cnf71xx;
-};
-
-union cvmx_ciu_pp_bist_stat {
- uint64_t u64;
- struct cvmx_ciu_pp_bist_stat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t pp_bist:32;
-#else
- uint64_t pp_bist:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_bist_stat_s cn68xx;
- struct cvmx_ciu_pp_bist_stat_s cn68xxp1;
-};
-
-union cvmx_ciu_pp_dbg {
- uint64_t u64;
- struct cvmx_ciu_pp_dbg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ppdbg:32;
-#else
- uint64_t ppdbg:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_dbg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t ppdbg:1;
-#else
- uint64_t ppdbg:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_pp_dbg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t ppdbg:2;
-#else
- uint64_t ppdbg:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_pp_dbg_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t ppdbg:16;
-#else
- uint64_t ppdbg:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_pp_dbg_cn38xx cn38xxp2;
- struct cvmx_ciu_pp_dbg_cn31xx cn50xx;
- struct cvmx_ciu_pp_dbg_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t ppdbg:4;
-#else
- uint64_t ppdbg:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1;
- struct cvmx_ciu_pp_dbg_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t ppdbg:12;
-#else
- uint64_t ppdbg:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
- struct cvmx_ciu_pp_dbg_cn38xx cn58xx;
- struct cvmx_ciu_pp_dbg_cn38xx cn58xxp1;
- struct cvmx_ciu_pp_dbg_cn52xx cn61xx;
- struct cvmx_ciu_pp_dbg_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t ppdbg:6;
-#else
- uint64_t ppdbg:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_pp_dbg_cn63xx cn63xxp1;
- struct cvmx_ciu_pp_dbg_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t ppdbg:10;
-#else
- uint64_t ppdbg:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_pp_dbg_s cn68xx;
- struct cvmx_ciu_pp_dbg_s cn68xxp1;
- struct cvmx_ciu_pp_dbg_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_pp_pokex {
- uint64_t u64;
- struct cvmx_ciu_pp_pokex_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t poke:64;
-#else
- uint64_t poke:64;
-#endif
- } s;
- struct cvmx_ciu_pp_pokex_s cn30xx;
- struct cvmx_ciu_pp_pokex_s cn31xx;
- struct cvmx_ciu_pp_pokex_s cn38xx;
- struct cvmx_ciu_pp_pokex_s cn38xxp2;
- struct cvmx_ciu_pp_pokex_s cn50xx;
- struct cvmx_ciu_pp_pokex_s cn52xx;
- struct cvmx_ciu_pp_pokex_s cn52xxp1;
- struct cvmx_ciu_pp_pokex_s cn56xx;
- struct cvmx_ciu_pp_pokex_s cn56xxp1;
- struct cvmx_ciu_pp_pokex_s cn58xx;
- struct cvmx_ciu_pp_pokex_s cn58xxp1;
- struct cvmx_ciu_pp_pokex_s cn61xx;
- struct cvmx_ciu_pp_pokex_s cn63xx;
- struct cvmx_ciu_pp_pokex_s cn63xxp1;
- struct cvmx_ciu_pp_pokex_s cn66xx;
- struct cvmx_ciu_pp_pokex_s cn68xx;
- struct cvmx_ciu_pp_pokex_s cn68xxp1;
- struct cvmx_ciu_pp_pokex_s cnf71xx;
-};
-
-union cvmx_ciu_pp_rst {
- uint64_t u64;
- struct cvmx_ciu_pp_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t rst:31;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:31;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_rst_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_pp_rst_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t rst:1;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:1;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_pp_rst_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t rst:15;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:15;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_pp_rst_cn38xx cn38xxp2;
- struct cvmx_ciu_pp_rst_cn31xx cn50xx;
- struct cvmx_ciu_pp_rst_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t rst:3;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:3;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_pp_rst_cn52xx cn52xxp1;
- struct cvmx_ciu_pp_rst_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t rst:11;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:11;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
- struct cvmx_ciu_pp_rst_cn38xx cn58xx;
- struct cvmx_ciu_pp_rst_cn38xx cn58xxp1;
- struct cvmx_ciu_pp_rst_cn52xx cn61xx;
- struct cvmx_ciu_pp_rst_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t rst:5;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:5;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_pp_rst_cn63xx cn63xxp1;
- struct cvmx_ciu_pp_rst_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t rst:9;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:9;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_pp_rst_s cn68xx;
- struct cvmx_ciu_pp_rst_s cn68xxp1;
- struct cvmx_ciu_pp_rst_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_qlm0 {
- uint64_t u64;
- struct cvmx_ciu_qlm0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm0_s cn61xx;
- struct cvmx_ciu_qlm0_s cn63xx;
- struct cvmx_ciu_qlm0_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm0_s cn66xx;
- struct cvmx_ciu_qlm0_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn68xx;
- struct cvmx_ciu_qlm0_cn68xx cn68xxp1;
- struct cvmx_ciu_qlm0_s cnf71xx;
-};
-
-union cvmx_ciu_qlm1 {
- uint64_t u64;
- struct cvmx_ciu_qlm1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm1_s cn61xx;
- struct cvmx_ciu_qlm1_s cn63xx;
- struct cvmx_ciu_qlm1_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm1_s cn66xx;
- struct cvmx_ciu_qlm1_s cn68xx;
- struct cvmx_ciu_qlm1_s cn68xxp1;
- struct cvmx_ciu_qlm1_s cnf71xx;
-};
-
-union cvmx_ciu_qlm2 {
- uint64_t u64;
- struct cvmx_ciu_qlm2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm2_cn61xx cn63xx;
- struct cvmx_ciu_qlm2_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm2_cn61xx cn66xx;
- struct cvmx_ciu_qlm2_s cn68xx;
- struct cvmx_ciu_qlm2_s cn68xxp1;
- struct cvmx_ciu_qlm2_cn61xx cnf71xx;
-};
-union cvmx_ciu_qlm3 {
+union cvmx_ciu_qlm {
uint64_t u64;
- struct cvmx_ciu_qlm3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
+ struct cvmx_ciu_qlm_s {
+ __BITFIELD_FIELD(uint64_t g2bypass:1,
+ __BITFIELD_FIELD(uint64_t reserved_53_62:10,
+ __BITFIELD_FIELD(uint64_t g2deemph:5,
+ __BITFIELD_FIELD(uint64_t reserved_45_47:3,
+ __BITFIELD_FIELD(uint64_t g2margin:5,
+ __BITFIELD_FIELD(uint64_t reserved_32_39:8,
+ __BITFIELD_FIELD(uint64_t txbypass:1,
+ __BITFIELD_FIELD(uint64_t reserved_21_30:10,
+ __BITFIELD_FIELD(uint64_t txdeemph:5,
+ __BITFIELD_FIELD(uint64_t reserved_13_15:3,
+ __BITFIELD_FIELD(uint64_t txmargin:5,
+ __BITFIELD_FIELD(uint64_t reserved_4_7:4,
+ __BITFIELD_FIELD(uint64_t lane_en:4,
+ ;)))))))))))))
} s;
- struct cvmx_ciu_qlm3_s cn68xx;
- struct cvmx_ciu_qlm3_s cn68xxp1;
-};
-
-union cvmx_ciu_qlm4 {
- uint64_t u64;
- struct cvmx_ciu_qlm4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm4_s cn68xx;
- struct cvmx_ciu_qlm4_s cn68xxp1;
-};
-
-union cvmx_ciu_qlm_dcok {
- uint64_t u64;
- struct cvmx_ciu_qlm_dcok_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t qlm_dcok:4;
-#else
- uint64_t qlm_dcok:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu_qlm_dcok_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t qlm_dcok:2;
-#else
- uint64_t qlm_dcok:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_dcok_s cn56xx;
- struct cvmx_ciu_qlm_dcok_s cn56xxp1;
};
union cvmx_ciu_qlm_jtgc {
uint64_t u64;
struct cvmx_ciu_qlm_jtgc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t bypass_ext:1;
- uint64_t reserved_11_15:5;
- uint64_t clk_div:3;
- uint64_t reserved_7_7:1;
- uint64_t mux_sel:3;
- uint64_t bypass:4;
-#else
- uint64_t bypass:4;
- uint64_t mux_sel:3;
- uint64_t reserved_7_7:1;
- uint64_t clk_div:3;
- uint64_t reserved_11_15:5;
- uint64_t bypass_ext:1;
- uint64_t reserved_17_63:47;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_17_63:47,
+ __BITFIELD_FIELD(uint64_t bypass_ext:1,
+ __BITFIELD_FIELD(uint64_t reserved_11_15:5,
+ __BITFIELD_FIELD(uint64_t clk_div:3,
+ __BITFIELD_FIELD(uint64_t reserved_7_7:1,
+ __BITFIELD_FIELD(uint64_t mux_sel:3,
+ __BITFIELD_FIELD(uint64_t bypass:4,
+ ;)))))))
} s;
- struct cvmx_ciu_qlm_jtgc_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_5_7:3;
- uint64_t mux_sel:1;
- uint64_t reserved_2_3:2;
- uint64_t bypass:2;
-#else
- uint64_t bypass:2;
- uint64_t reserved_2_3:2;
- uint64_t mux_sel:1;
- uint64_t reserved_5_7:3;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_jtgc_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_6_7:2;
- uint64_t mux_sel:2;
- uint64_t bypass:4;
-#else
- uint64_t bypass:4;
- uint64_t mux_sel:2;
- uint64_t reserved_6_7:2;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn56xx;
- struct cvmx_ciu_qlm_jtgc_cn56xx cn56xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_6_7:2;
- uint64_t mux_sel:2;
- uint64_t reserved_3_3:1;
- uint64_t bypass:3;
-#else
- uint64_t bypass:3;
- uint64_t reserved_3_3:1;
- uint64_t mux_sel:2;
- uint64_t reserved_6_7:2;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn63xx;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn63xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn66xx;
- struct cvmx_ciu_qlm_jtgc_s cn68xx;
- struct cvmx_ciu_qlm_jtgc_s cn68xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx cnf71xx;
};
union cvmx_ciu_qlm_jtgd {
uint64_t u64;
struct cvmx_ciu_qlm_jtgd_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_45_60:16;
- uint64_t select:5;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:5;
- uint64_t reserved_45_60:16;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } s;
- struct cvmx_ciu_qlm_jtgd_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_42_60:19;
- uint64_t select:2;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:2;
- uint64_t reserved_42_60:19;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_jtgd_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_44_60:17;
- uint64_t select:4;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:4;
- uint64_t reserved_44_60:17;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_qlm_jtgd_cn56xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_37_60:24;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_60:24;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn56xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_43_60:18;
- uint64_t select:3;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:3;
- uint64_t reserved_43_60:18;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn63xx;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn63xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn66xx;
- struct cvmx_ciu_qlm_jtgd_s cn68xx;
- struct cvmx_ciu_qlm_jtgd_s cn68xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_soft_bist {
- uint64_t u64;
- struct cvmx_ciu_soft_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_bist:1;
-#else
- uint64_t soft_bist:1;
- uint64_t reserved_1_63:63;
-#endif
+ __BITFIELD_FIELD(uint64_t capture:1,
+ __BITFIELD_FIELD(uint64_t shift:1,
+ __BITFIELD_FIELD(uint64_t update:1,
+ __BITFIELD_FIELD(uint64_t reserved_45_60:16,
+ __BITFIELD_FIELD(uint64_t select:5,
+ __BITFIELD_FIELD(uint64_t reserved_37_39:3,
+ __BITFIELD_FIELD(uint64_t shft_cnt:5,
+ __BITFIELD_FIELD(uint64_t shft_reg:32,
+ ;))))))))
} s;
- struct cvmx_ciu_soft_bist_s cn30xx;
- struct cvmx_ciu_soft_bist_s cn31xx;
- struct cvmx_ciu_soft_bist_s cn38xx;
- struct cvmx_ciu_soft_bist_s cn38xxp2;
- struct cvmx_ciu_soft_bist_s cn50xx;
- struct cvmx_ciu_soft_bist_s cn52xx;
- struct cvmx_ciu_soft_bist_s cn52xxp1;
- struct cvmx_ciu_soft_bist_s cn56xx;
- struct cvmx_ciu_soft_bist_s cn56xxp1;
- struct cvmx_ciu_soft_bist_s cn58xx;
- struct cvmx_ciu_soft_bist_s cn58xxp1;
- struct cvmx_ciu_soft_bist_s cn61xx;
- struct cvmx_ciu_soft_bist_s cn63xx;
- struct cvmx_ciu_soft_bist_s cn63xxp1;
- struct cvmx_ciu_soft_bist_s cn66xx;
- struct cvmx_ciu_soft_bist_s cn68xx;
- struct cvmx_ciu_soft_bist_s cn68xxp1;
- struct cvmx_ciu_soft_bist_s cnf71xx;
};
union cvmx_ciu_soft_prst {
uint64_t u64;
struct cvmx_ciu_soft_prst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t host64:1;
- uint64_t npi:1;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t npi:1;
- uint64_t host64:1;
- uint64_t reserved_3_63:61;
-#endif
- } s;
- struct cvmx_ciu_soft_prst_s cn30xx;
- struct cvmx_ciu_soft_prst_s cn31xx;
- struct cvmx_ciu_soft_prst_s cn38xx;
- struct cvmx_ciu_soft_prst_s cn38xxp2;
- struct cvmx_ciu_soft_prst_s cn50xx;
- struct cvmx_ciu_soft_prst_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn52xx;
- struct cvmx_ciu_soft_prst_cn52xx cn52xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn56xx;
- struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
- struct cvmx_ciu_soft_prst_s cn58xx;
- struct cvmx_ciu_soft_prst_s cn58xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn61xx;
- struct cvmx_ciu_soft_prst_cn52xx cn63xx;
- struct cvmx_ciu_soft_prst_cn52xx cn63xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn66xx;
- struct cvmx_ciu_soft_prst_cn52xx cn68xx;
- struct cvmx_ciu_soft_prst_cn52xx cn68xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_soft_prst1 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst1_s cn52xx;
- struct cvmx_ciu_soft_prst1_s cn52xxp1;
- struct cvmx_ciu_soft_prst1_s cn56xx;
- struct cvmx_ciu_soft_prst1_s cn56xxp1;
- struct cvmx_ciu_soft_prst1_s cn61xx;
- struct cvmx_ciu_soft_prst1_s cn63xx;
- struct cvmx_ciu_soft_prst1_s cn63xxp1;
- struct cvmx_ciu_soft_prst1_s cn66xx;
- struct cvmx_ciu_soft_prst1_s cn68xx;
- struct cvmx_ciu_soft_prst1_s cn68xxp1;
- struct cvmx_ciu_soft_prst1_s cnf71xx;
-};
-
-union cvmx_ciu_soft_prst2 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst2_s cn66xx;
-};
-
-union cvmx_ciu_soft_prst3 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst3_s cn66xx;
-};
-
-union cvmx_ciu_soft_rst {
- uint64_t u64;
- struct cvmx_ciu_soft_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_rst:1;
-#else
- uint64_t soft_rst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_rst_s cn30xx;
- struct cvmx_ciu_soft_rst_s cn31xx;
- struct cvmx_ciu_soft_rst_s cn38xx;
- struct cvmx_ciu_soft_rst_s cn38xxp2;
- struct cvmx_ciu_soft_rst_s cn50xx;
- struct cvmx_ciu_soft_rst_s cn52xx;
- struct cvmx_ciu_soft_rst_s cn52xxp1;
- struct cvmx_ciu_soft_rst_s cn56xx;
- struct cvmx_ciu_soft_rst_s cn56xxp1;
- struct cvmx_ciu_soft_rst_s cn58xx;
- struct cvmx_ciu_soft_rst_s cn58xxp1;
- struct cvmx_ciu_soft_rst_s cn61xx;
- struct cvmx_ciu_soft_rst_s cn63xx;
- struct cvmx_ciu_soft_rst_s cn63xxp1;
- struct cvmx_ciu_soft_rst_s cn66xx;
- struct cvmx_ciu_soft_rst_s cn68xx;
- struct cvmx_ciu_soft_rst_s cn68xxp1;
- struct cvmx_ciu_soft_rst_s cnf71xx;
-};
-
-union cvmx_ciu_sum1_iox_int {
- uint64_t u64;
- struct cvmx_ciu_sum1_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_3_63:61,
+ __BITFIELD_FIELD(uint64_t host64:1,
+ __BITFIELD_FIELD(uint64_t npi:1,
+ __BITFIELD_FIELD(uint64_t soft_prst:1,
+ ;))))
} s;
- struct cvmx_ciu_sum1_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_iox_int_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_iox_int_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip2_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip2_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip3_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip3_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip4_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip4_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum2_iox_int {
- uint64_t u64;
- struct cvmx_ciu_sum2_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_iox_int_cn61xx cn66xx;
- struct cvmx_ciu_sum2_iox_int_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip2_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip2_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip3_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip3_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip4_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip4_s cnf71xx;
};
union cvmx_ciu_timx {
uint64_t u64;
struct cvmx_ciu_timx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_37_63:27;
- uint64_t one_shot:1;
- uint64_t len:36;
-#else
- uint64_t len:36;
- uint64_t one_shot:1;
- uint64_t reserved_37_63:27;
-#endif
- } s;
- struct cvmx_ciu_timx_s cn30xx;
- struct cvmx_ciu_timx_s cn31xx;
- struct cvmx_ciu_timx_s cn38xx;
- struct cvmx_ciu_timx_s cn38xxp2;
- struct cvmx_ciu_timx_s cn50xx;
- struct cvmx_ciu_timx_s cn52xx;
- struct cvmx_ciu_timx_s cn52xxp1;
- struct cvmx_ciu_timx_s cn56xx;
- struct cvmx_ciu_timx_s cn56xxp1;
- struct cvmx_ciu_timx_s cn58xx;
- struct cvmx_ciu_timx_s cn58xxp1;
- struct cvmx_ciu_timx_s cn61xx;
- struct cvmx_ciu_timx_s cn63xx;
- struct cvmx_ciu_timx_s cn63xxp1;
- struct cvmx_ciu_timx_s cn66xx;
- struct cvmx_ciu_timx_s cn68xx;
- struct cvmx_ciu_timx_s cn68xxp1;
- struct cvmx_ciu_timx_s cnf71xx;
-};
-
-union cvmx_ciu_tim_multi_cast {
- uint64_t u64;
- struct cvmx_ciu_tim_multi_cast_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t en:1;
-#else
- uint64_t en:1;
- uint64_t reserved_1_63:63;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_37_63:27,
+ __BITFIELD_FIELD(uint64_t one_shot:1,
+ __BITFIELD_FIELD(uint64_t len:36,
+ ;)))
} s;
- struct cvmx_ciu_tim_multi_cast_s cn61xx;
- struct cvmx_ciu_tim_multi_cast_s cn66xx;
- struct cvmx_ciu_tim_multi_cast_s cnf71xx;
};
union cvmx_ciu_wdogx {
uint64_t u64;
struct cvmx_ciu_wdogx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_46_63:18;
- uint64_t gstopen:1;
- uint64_t dstop:1;
- uint64_t cnt:24;
- uint64_t len:16;
- uint64_t state:2;
- uint64_t mode:2;
-#else
- uint64_t mode:2;
- uint64_t state:2;
- uint64_t len:16;
- uint64_t cnt:24;
- uint64_t dstop:1;
- uint64_t gstopen:1;
- uint64_t reserved_46_63:18;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_46_63:18,
+ __BITFIELD_FIELD(uint64_t gstopen:1,
+ __BITFIELD_FIELD(uint64_t dstop:1,
+ __BITFIELD_FIELD(uint64_t cnt:24,
+ __BITFIELD_FIELD(uint64_t len:16,
+ __BITFIELD_FIELD(uint64_t state:2,
+ __BITFIELD_FIELD(uint64_t mode:2,
+ ;)))))))
} s;
- struct cvmx_ciu_wdogx_s cn30xx;
- struct cvmx_ciu_wdogx_s cn31xx;
- struct cvmx_ciu_wdogx_s cn38xx;
- struct cvmx_ciu_wdogx_s cn38xxp2;
- struct cvmx_ciu_wdogx_s cn50xx;
- struct cvmx_ciu_wdogx_s cn52xx;
- struct cvmx_ciu_wdogx_s cn52xxp1;
- struct cvmx_ciu_wdogx_s cn56xx;
- struct cvmx_ciu_wdogx_s cn56xxp1;
- struct cvmx_ciu_wdogx_s cn58xx;
- struct cvmx_ciu_wdogx_s cn58xxp1;
- struct cvmx_ciu_wdogx_s cn61xx;
- struct cvmx_ciu_wdogx_s cn63xx;
- struct cvmx_ciu_wdogx_s cn63xxp1;
- struct cvmx_ciu_wdogx_s cn66xx;
- struct cvmx_ciu_wdogx_s cn68xx;
- struct cvmx_ciu_wdogx_s cn68xxp1;
- struct cvmx_ciu_wdogx_s cnf71xx;
};
-#endif
+#endif /* __CVMX_CIU_DEFS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
index e347496a33c3..80e4f8358b81 100644
--- a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -2070,6 +2070,8 @@ static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull;
}
+void __cvmx_interrupt_gmxx_enable(int interface);
+
union cvmx_gmxx_bad_reg {
uint64_t u64;
struct cvmx_gmxx_bad_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
index a5e8fd861c37..39da7f9d7b3f 100644
--- a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -334,6 +334,8 @@ static inline uint64_t CVMX_PCSX_TX_RXX_POLARITY_REG(unsigned long offset, unsig
return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
}
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+
union cvmx_pcsx_anx_adv_reg {
uint64_t u64;
struct cvmx_pcsx_anx_adv_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
index b5b45d26f1c5..847dd9dca6ea 100644
--- a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -268,6 +268,8 @@ static inline uint64_t CVMX_PCSXX_TX_RX_STATES_REG(unsigned long block_id)
return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x1000000ull;
}
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+
union cvmx_pcsxx_10gbx_status_reg {
uint64_t u64;
struct cvmx_pcsxx_10gbx_status_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
index c7d601d9446e..f4c4e8051160 100644
--- a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -45,6 +45,8 @@
#define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_spxx_int_msk_enable(int index);
+
union cvmx_spxx_bckprs_cnt {
uint64_t u64;
struct cvmx_spxx_bckprs_cnt_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
index 146354005d3b..3c409a854d91 100644
--- a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -45,6 +45,8 @@
#define CVMX_STXX_STAT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000638ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_STAT_PKT_XMT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000640ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_stxx_int_msk_enable(int index);
+
union cvmx_stxx_arb_ctl {
uint64_t u64;
struct cvmx_stxx_arb_ctl_s {
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index c99c4b6a79f4..60481502826a 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -279,13 +279,12 @@ union octeon_cvmemctl {
} s;
};
-extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void);
-extern int octeon_get_boot_uart(void);
-struct uart_port;
-extern unsigned int octeon_serial_in(struct uart_port *, int);
-extern void octeon_serial_out(struct uart_port *, int, int);
+int octeon_prune_device_tree(void);
+extern const char __appended_dtb;
+extern const char __dtb_octeon_3xxx_begin;
+extern const char __dtb_octeon_68xx_begin;
/**
* Write a 32bit value to the Octeon NPI register space
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index 1884609741a8..b12d9a3fbfb6 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -63,4 +63,7 @@ enum octeon_dma_bar_type {
*/
extern enum octeon_dma_bar_type octeon_dma_bar_type;
+void octeon_pci_dma_init(void);
+extern char *octeon_swiotlb;
+
#endif
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ad461216b5a1..e8cc328fce2d 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -80,7 +80,12 @@ extern void build_copy_page(void);
* used in our early mem init code for all memory models.
* So always define it.
*/
-#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+extern unsigned long ARCH_PFN_OFFSET;
+# define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
+#else
+# define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+#endif
extern void clear_page(void * page);
extern void copy_page(void * to, void * from);
@@ -252,8 +257,8 @@ extern int __virt_addr_valid(const volatile void *kaddr);
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
-#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
+#define UNCAC_ADDR(addr) (UNCAC_BASE + __pa(addr))
+#define CAC_ADDR(addr) ((unsigned long)__va((addr) - UNCAC_BASE))
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index af34afbc32d9..b2fa62922d88 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -141,7 +141,7 @@ struct mips_fpu_struct {
#define NUM_DSP_REGS 6
-typedef __u32 dspreg_t;
+typedef unsigned long dspreg_t;
struct mips_dsp_state {
dspreg_t dspr[NUM_DSP_REGS];
@@ -386,7 +386,20 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
+#ifdef CONFIG_CPU_LOONGSON3
+/*
+ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
+ * tight read loop is executed, because reads take priority over writes & the
+ * hardware (incorrectly) doesn't ensure that writes will eventually occur.
+ *
+ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
+ * flush from cpu_relax() such that any pending writes will become visible as
+ * expected.
+ */
+#define cpu_relax() smp_mb()
+#else
#define cpu_relax() barrier()
+#endif
/*
* Return_address is a replacement for __builtin_return_address(count)
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index d49d247d48a1..bb36a400203d 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -2,8 +2,10 @@
#ifndef _MIPS_SETUP_H
#define _MIPS_SETUP_H
+#include <linux/types.h>
#include <uapi/asm/setup.h>
+extern void prom_putchar(char);
extern void setup_early_printk(void);
#ifdef CONFIG_EARLY_PRINTK_8250
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index 195db5045ae5..0d9fad5915fe 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -31,7 +31,6 @@ extern int prom_flags;
#define PROM_FLAG_DONT_FREE_TEMP 4
/* Simple char-by-char console I/O. */
-extern void prom_putchar(char c);
extern char prom_getchar(void);
/* Get next memory descriptor after CURR, returns first descriptor
diff --git a/arch/mips/include/asm/sim.h b/arch/mips/include/asm/sim.h
index 91831800c480..59f31a95facd 100644
--- a/arch/mips/include/asm/sim.h
+++ b/arch/mips/include/asm/sim.h
@@ -39,8 +39,6 @@ __asm__( \
".end\t__" #symbol "\n\t" \
".size\t__" #symbol",. - __" #symbol)
-#define nabi_no_regargs
-
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
@@ -67,16 +65,6 @@ __asm__( \
".end\t__" #symbol "\n\t" \
".size\t__" #symbol",. - __" #symbol)
-#define nabi_no_regargs \
- unsigned long __dummy0, \
- unsigned long __dummy1, \
- unsigned long __dummy2, \
- unsigned long __dummy3, \
- unsigned long __dummy4, \
- unsigned long __dummy5, \
- unsigned long __dummy6, \
- unsigned long __dummy7,
-
#endif /* CONFIG_64BIT */
#endif /* _ASM_SIM_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 88ebd83b3bf9..056a6bf13491 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
diff --git a/arch/mips/include/asm/txx9/generic.h b/arch/mips/include/asm/txx9/generic.h
index 64887d3c7ec3..9a2c47bf3c40 100644
--- a/arch/mips/include/asm/txx9/generic.h
+++ b/arch/mips/include/asm/txx9/generic.h
@@ -49,7 +49,6 @@ void txx9_spi_init(int busid, unsigned long base, int irq);
void txx9_ethaddr_init(unsigned int id, unsigned char *ethaddr);
void txx9_sio_init(unsigned long baseaddr, int irq,
unsigned int line, unsigned int sclk, int nocts);
-void prom_putchar(char c);
#ifdef CONFIG_EARLY_PRINTK
extern void (*txx9_prom_putchar)(char c);
void txx9_sio_putchar_init(unsigned long baseaddr);
diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h
index 6d667087f2aa..00805ac6e9fc 100644
--- a/arch/mips/include/asm/txx9/tx4939.h
+++ b/arch/mips/include/asm/txx9/tx4939.h
@@ -101,13 +101,6 @@ struct tx4939_irc_reg {
struct tx4939_le_reg maskext;
};
-struct tx4939_rtc_reg {
- __u32 ctl;
- __u32 adr;
- __u32 dat;
- __u32 tbc;
-};
-
struct tx4939_crypto_reg {
struct tx4939_le_reg csr;
struct tx4939_le_reg idesptr;
@@ -370,26 +363,6 @@ struct tx4939_vpc_desc {
#define TX4939_CLKCTR_CYPRST 0x00000001
/*
- * RTC
- */
-#define TX4939_RTCCTL_ALME 0x00000080
-#define TX4939_RTCCTL_ALMD 0x00000040
-#define TX4939_RTCCTL_BUSY 0x00000020
-
-#define TX4939_RTCCTL_COMMAND 0x00000007
-#define TX4939_RTCCTL_COMMAND_NOP 0x00000000
-#define TX4939_RTCCTL_COMMAND_GETTIME 0x00000001
-#define TX4939_RTCCTL_COMMAND_SETTIME 0x00000002
-#define TX4939_RTCCTL_COMMAND_GETALARM 0x00000003
-#define TX4939_RTCCTL_COMMAND_SETALARM 0x00000004
-
-#define TX4939_RTCTBC_PM 0x00000080
-#define TX4939_RTCTBC_COMP 0x0000007f
-
-#define TX4939_RTC_REG_RAMSIZE 0x00000100
-#define TX4939_RTC_REG_RWBSIZE 0x00000006
-
-/*
* CRYPTO
*/
#define TX4939_CRYPTO_CSR_SAESO 0x08000000
@@ -498,8 +471,6 @@ struct tx4939_vpc_desc {
#define tx4939_ccfgptr \
((struct tx4939_ccfg_reg __iomem *)TX4939_CCFG_REG)
#define tx4939_sramcptr tx4938_sramcptr
-#define tx4939_rtcptr \
- ((struct tx4939_rtc_reg __iomem *)TX4939_RTC_REG)
#define tx4939_cryptoptr \
((struct tx4939_crypto_reg __iomem *)TX4939_CRYPTO_REG)
#define tx4939_vpcptr ((struct tx4939_vpc_reg __iomem *)TX4939_VPC_REG)
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index d626a9a391cc..d31bc2f01208 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -16,6 +16,8 @@
#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <asm/mipsregs.h>
#include <asm/jazz.h>
#include <asm/io.h>
@@ -86,6 +88,7 @@ static int __init vdma_init(void)
printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
return 0;
}
+arch_initcall(vdma_init);
/*
* Allocate DMA pagetables using a simple first-fit algorithm
@@ -556,4 +559,140 @@ int vdma_get_enable(int channel)
return enable;
}
-arch_initcall(vdma_init);
+static void *jazz_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ void *ret;
+
+ ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!ret)
+ return NULL;
+
+ *dma_handle = vdma_alloc(virt_to_phys(ret), size);
+ if (*dma_handle == VDMA_ERROR) {
+ dma_direct_free(dev, size, ret, *dma_handle, attrs);
+ return NULL;
+ }
+
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ dma_cache_wback_inv((unsigned long)ret, size);
+ ret = (void *)UNCAC_ADDR(ret);
+ }
+ return ret;
+}
+
+static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ vdma_free(dma_handle);
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
+ vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
+ return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+}
+
+static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(dev, phys, size, dir);
+ return vdma_alloc(phys, size);
+}
+
+static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
+ vdma_free(dma_addr);
+}
+
+static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ dir);
+ sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
+ if (sg->dma_address == VDMA_ERROR)
+ return 0;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
+ dir);
+ vdma_free(sg->dma_address);
+ }
+}
+
+static void jazz_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void jazz_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == VDMA_ERROR;
+}
+
+const struct dma_map_ops jazz_dma_ops = {
+ .alloc = jazz_dma_alloc,
+ .free = jazz_dma_free,
+ .mmap = arch_dma_mmap,
+ .map_page = jazz_dma_map_page,
+ .unmap_page = jazz_dma_unmap_page,
+ .map_sg = jazz_dma_map_sg,
+ .unmap_sg = jazz_dma_unmap_sg,
+ .sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
+ .sync_single_for_device = jazz_dma_sync_single_for_device,
+ .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = jazz_dma_sync_sg_for_device,
+ .dma_supported = dma_direct_supported,
+ .cache_sync = arch_dma_cache_sync,
+ .mapping_error = jazz_dma_mapping_error,
+};
+EXPORT_SYMBOL(jazz_dma_ops);
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 448fd41792e4..1b5e121c3f0d 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -16,6 +16,7 @@
#include <linux/screen_info.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
+#include <linux/dma-mapping.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
@@ -136,10 +137,16 @@ static struct resource jazz_esp_rsrc[] = {
}
};
+static u64 jazz_esp_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device jazz_esp_pdev = {
.name = "jazz_esp",
.num_resources = ARRAY_SIZE(jazz_esp_rsrc),
- .resource = jazz_esp_rsrc
+ .resource = jazz_esp_rsrc,
+ .dev = {
+ .dma_mask = &jazz_esp_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
};
static struct resource jazz_sonic_rsrc[] = {
@@ -155,10 +162,16 @@ static struct resource jazz_sonic_rsrc[] = {
}
};
+static u64 jazz_sonic_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device jazz_sonic_pdev = {
.name = "jazzsonic",
.num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
- .resource = jazz_sonic_rsrc
+ .resource = jazz_sonic_rsrc,
+ .dev = {
+ .dma_mask = &jazz_sonic_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
};
static struct resource jazz_cmos_rsrc[] = {
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform
index 28448d358c10..a2a5a85ea1f9 100644
--- a/arch/mips/jz4740/Platform
+++ b/arch/mips/jz4740/Platform
@@ -1,4 +1,4 @@
platform-$(CONFIG_MACH_INGENIC) += jz4740/
cflags-$(CONFIG_MACH_INGENIC) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
load-$(CONFIG_MACH_INGENIC) += 0xffffffff80010000
-zload-$(CONFIG_MACH_INGENIC) += 0xffffffff80600000
+zload-$(CONFIG_MACH_INGENIC) += 0xffffffff81000000
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b2509c19cfb5..d535fc706a8b 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1849,7 +1849,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
c->cputype = CPU_LOONGSON3;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 505cb77d1280..4a1647ddfbd9 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -14,8 +14,6 @@
#include <asm/setup.h>
-extern void prom_putchar(char);
-
static void early_console_write(struct console *con, const char *s, unsigned n)
{
while (n-- && *s) {
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
index 83cea3767556..ea26614afac6 100644
--- a/arch/mips/kernel/early_printk_8250.c
+++ b/arch/mips/kernel/early_printk_8250.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
+#include <asm/setup.h>
static void __iomem *serial8250_base;
static unsigned int serial8250_reg_shift;
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 37b9383eacd3..6c257b52f57f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -354,16 +354,56 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp)
sll k0, k0, 30 # Check for SDBBP.
bgez k0, ejtag_return
+#ifdef CONFIG_SMP
+1: PTR_LA k0, ejtag_debug_buffer_spinlock
+ ll k0, 0(k0)
+ bnez k0, 1b
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sc k0, 0(k0)
+ beqz k0, 1b
+# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+ sync
+# endif
+
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_S k1, 0(k0)
+
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+
+ PTR_LA k1, ejtag_debug_buffer
+ LONG_L k1, 0(k1)
+ LONG_S k1, 0(k0)
+
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sw zero, 0(k0)
+#else
PTR_LA k0, ejtag_debug_buffer
LONG_S k1, 0(k0)
+#endif
+
SAVE_ALL
move a0, sp
jal ejtag_exception_handler
RESTORE_ALL
+
+#ifdef CONFIG_SMP
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+ LONG_L k1, 0(k0)
+#else
PTR_LA k0, ejtag_debug_buffer
LONG_L k1, 0(k0)
+#endif
ejtag_return:
+ back_to_back_c0_hazard
MFC0 k0, CP0_DESAVE
.set mips32
deret
@@ -377,6 +417,12 @@ ejtag_return:
.data
EXPORT(ejtag_debug_buffer)
.fill LONGSIZE
+#ifdef CONFIG_SMP
+EXPORT(ejtag_debug_buffer_spinlock)
+ .fill LONGSIZE
+EXPORT(ejtag_debug_buffer_per_cpu)
+ .fill LONGSIZE * NR_CPUS
+#endif
.previous
__INIT
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 7c246b69c545..046846999efd 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -33,21 +33,21 @@
void (*cpu_wait)(void);
EXPORT_SYMBOL(cpu_wait);
-static void r3081_wait(void)
+static void __cpuidle r3081_wait(void)
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
local_irq_enable();
}
-static void r39xx_wait(void)
+static void __cpuidle r39xx_wait(void)
{
if (!need_resched())
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
local_irq_enable();
}
-void r4k_wait(void)
+void __cpuidle r4k_wait(void)
{
local_irq_enable();
__r4k_wait();
@@ -60,7 +60,7 @@ void r4k_wait(void)
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble.
*/
-void r4k_wait_irqoff(void)
+void __cpuidle r4k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
@@ -75,7 +75,7 @@ void r4k_wait_irqoff(void)
* The RM7000 variant has to handle erratum 38. The workaround is to not
* have any pending stores when the WAIT instruction is executed.
*/
-static void rm7k_wait_irqoff(void)
+static void __cpuidle rm7k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
@@ -96,7 +96,7 @@ static void rm7k_wait_irqoff(void)
* since coreclock (and the cp0 counter) stops upon executing it. Only an
* interrupt can wake it, so they must be enabled before entering idle modes.
*/
-static void au1k_wait(void)
+static void __cpuidle au1k_wait(void)
{
unsigned long c0status = read_c0_status() | 1; /* irqs on */
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index f5c8bce70db2..54cd675c5d1d 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -326,19 +326,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
preempt_enable_no_resched();
}
return 1;
- } else {
- if (addr->word != breakpoint_insn.word) {
- /*
- * The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
+ } else if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
}
goto no_kprobe;
}
@@ -364,10 +358,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
}
-ss_probe:
prepare_singlestep(p, regs, kcb);
if (kcb->flags & SKIP_DELAYSLOT) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
@@ -468,51 +463,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = regs->regs[29];
-
- memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-
- regs->cp0_epc = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-/* Defined in the inline asm below. */
-void jprobe_return_end(void);
-
-void __kprobes jprobe_return(void)
-{
- /* Assembler quirk necessitates this '0,code' business. */
- asm volatile(
- "break 0,%0\n\t"
- ".globl jprobe_return_end\n"
- "jprobe_return_end:\n"
- : : "n" (BRK_KPROBE_BP) : "memory");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (regs->cp0_epc >= (unsigned long)jprobe_return &&
- regs->cp0_epc <= (unsigned long)jprobe_return_end) {
- *regs = kcb->jprobe_saved_regs;
- memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
- preempt_enable_no_resched();
-
- return 1;
- }
- return 0;
-}
-
/*
* Function return probe trampoline:
* - init_kprobes() establishes a probepoint here
@@ -595,9 +545,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_assert(ri, orig_ret_address, trampoline_address);
instruction_pointer(regs) = orig_ret_address;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 318f1c05c5b3..6b61be486303 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -43,17 +43,6 @@
#include <asm/mmu_context.h>
#include <asm/mman.h>
-/* Use this to get at 32-bit user passed pointers. */
-/* A() macro should be used for places where you e.g.
- have some internal variable u32 and just want to get
- rid of a compiler warning. AA() has to be used in
- places where you want to convert a function argument
- to 32bit pointer or when you e.g. access pt_regs
- structure and want to consider 32bit registers only.
- */
-#define A(__x) ((unsigned long)(__x))
-#define AA(__x) ((unsigned long)((int)__x))
-
#ifdef __MIPSEB__
#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
#endif
@@ -61,24 +50,6 @@
#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
-SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
- unsigned long, prot, unsigned long, flags, unsigned long, fd,
- unsigned long, pgoff)
-{
- if (pgoff & (~PAGE_MASK >> 12))
- return -EINVAL;
- return ksys_mmap_pgoff(addr, len, prot, flags, fd,
- pgoff >> (PAGE_SHIFT-12));
-}
-
-#define RLIM_INFINITY32 0x7fffffff
-#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
-
-struct rlimit32 {
- int rlim_cur;
- int rlim_max;
-};
-
SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 9670e70139fd..8fc69891e117 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -30,6 +30,7 @@
#include <linux/random.h>
#include <linux/prctl.h>
#include <linux/nmi.h>
+#include <linux/cpu.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
@@ -706,19 +707,25 @@ int mips_get_process_fp_mode(struct task_struct *task)
return value;
}
-static void prepare_for_fp_mode_switch(void *info)
+static long prepare_for_fp_mode_switch(void *unused)
{
- struct mm_struct *mm = info;
-
- if (current->mm == mm)
- lose_fpu(1);
+ /*
+ * This is icky, but we use this to simply ensure that all CPUs have
+ * context switched, regardless of whether they were previously running
+ * kernel or user code. This ensures that no CPU currently has its FPU
+ * enabled, or is about to attempt to enable it through any path other
+ * than enable_restore_fp_context() which will wait appropriately for
+ * fp_mode_switching to be zero.
+ */
+ return 0;
}
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
struct task_struct *t;
- int max_users;
+ struct cpumask process_cpus;
+ int cpu;
/* If nothing to change, return right away, successfully. */
if (value == mips_get_process_fp_mode(task))
@@ -751,35 +758,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
- /* Proceed with the mode switch */
- preempt_disable();
-
- /* Save FP & vector context, then disable FPU & MSA */
- if (task->signal == current->signal)
- lose_fpu(1);
-
- /* Prevent any threads from obtaining live FP context */
- atomic_set(&task->mm->context.fp_mode_switching, 1);
- smp_mb__after_atomic();
-
- /*
- * If there are multiple online CPUs then force any which are running
- * threads in this process to lose their FPU context, which they can't
- * regain until fp_mode_switching is cleared later.
- */
- if (num_online_cpus() > 1) {
- /* No need to send an IPI for the local CPU */
- max_users = (task->mm == current->mm) ? 1 : 0;
-
- if (atomic_read(&current->mm->mm_users) > max_users)
- smp_call_function(prepare_for_fp_mode_switch,
- (void *)current->mm, 1);
- }
-
- /*
- * There are now no threads of the process with live FP context, so it
- * is safe to proceed with the FP mode switch.
- */
+ /* Indicate the new FP mode in each thread */
for_each_thread(task, t) {
/* Update desired FP register width */
if (value & PR_FP_MODE_FR) {
@@ -796,9 +775,34 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
}
- /* Allow threads to use FP again */
- atomic_set(&task->mm->context.fp_mode_switching, 0);
- preempt_enable();
+ /*
+ * We need to ensure that all threads in the process have switched mode
+ * before returning, in order to allow userland to not worry about
+ * races. We can do this by forcing all CPUs that any thread in the
+ * process may be running on to schedule something else - in this case
+ * prepare_for_fp_mode_switch().
+ *
+ * We begin by generating a mask of all CPUs that any thread in the
+ * process may be running on.
+ */
+ cpumask_clear(&process_cpus);
+ for_each_thread(task, t)
+ cpumask_set_cpu(task_cpu(t), &process_cpus);
+
+ /*
+ * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
+ *
+ * The CPUs may have rescheduled already since we switched mode or
+ * generated the cpumask, but that doesn't matter. If the task in this
+ * process is scheduled out then our scheduling
+ * prepare_for_fp_mode_switch() will simply be redundant. If it's
+ * scheduled in then it will already have picked up the new FP mode
+ * whilst doing so.
+ */
+ get_online_cpus();
+ for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
+ work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
+ put_online_cpus();
wake_up_var(&task->mm->context.fp_mode_switching);
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9f6c3f2aa2e2..e5ba56c01ee0 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -41,6 +41,7 @@
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
+#include <asm/processor.h>
#include <asm/syscall.h>
#include <linux/uaccess.h>
#include <asm/bootinfo.h>
@@ -589,9 +590,226 @@ static int fpr_set(struct task_struct *target,
return err;
}
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+/*
+ * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
+ */
+static int dsp32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u32 dspregs[NUM_DSP_REGS + 1];
+
+ BUG_ON(count % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ dspregs[i] = target->thread.dsp.dspr[i];
+ break;
+ case NUM_DSP_REGS:
+ dspregs[i] = target->thread.dsp.dspcontrol;
+ break;
+ }
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u32 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = (s32)dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = (s32)dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
+ */
+static int dsp64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u64 dspregs[NUM_DSP_REGS + 1];
+
+ BUG_ON(count % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ dspregs[i] = target->thread.dsp.dspr[i];
+ break;
+ case NUM_DSP_REGS:
+ dspregs[i] = target->thread.dsp.dspcontrol;
+ break;
+ }
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u64 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * Determine whether the DSP context is present.
+ */
+static int dsp_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
+}
+
+/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
+static int fp_mode_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int fp_mode;
+
+ fp_mode = mips_get_process_fp_mode(target);
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+}
+
+/*
+ * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
+ *
+ * We optimize for the case where `count % sizeof(int) == 0', which
+ * is supposed to have been guaranteed by the kernel before calling
+ * us, e.g. in `ptrace_regset'. We enforce that requirement, so
+ * that we can safely avoid preinitializing temporaries for partial
+ * mode writes.
+ */
+static int fp_mode_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int fp_mode;
+ int err;
+
+ BUG_ON(count % sizeof(int));
+
+ if (pos + count > sizeof(fp_mode))
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+ if (err)
+ return err;
+
+ if (count > 0)
+ err = mips_set_process_fp_mode(target, fp_mode);
+
+ return err;
+}
+
enum mips_regset {
REGSET_GPR,
REGSET_FPR,
+ REGSET_DSP,
+ REGSET_FP_MODE,
};
struct pt_regs_offset {
@@ -697,6 +915,23 @@ static const struct user_regset mips_regsets[] = {
.get = fpr_get,
.set = fpr_set,
},
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .get = dsp32_get,
+ .set = dsp32_set,
+ .active = dsp_active,
+ },
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .get = fp_mode_get,
+ .set = fp_mode_set,
+ },
};
static const struct user_regset_view user_mips_view = {
@@ -728,6 +963,23 @@ static const struct user_regset mips64_regsets[] = {
.get = fpr_get,
.set = fpr_set,
},
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .get = dsp64_get,
+ .set = dsp64_set,
+ .active = dsp_active,
+ },
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .get = fp_mode_get,
+ .set = fp_mode_set,
+ },
};
static const struct user_regset_view user_mips64_view = {
@@ -856,7 +1108,7 @@ long arch_ptrace(struct task_struct *child, long request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 7edc629304c8..bc348d44d151 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -142,7 +142,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index c6bbf2165051..419c92197b2f 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -85,7 +85,7 @@ done:
#ifdef CONFIG_CPU_CAVIUM_OCTEON
/* We need to flush I-cache before jumping to new kernel.
- * Unfortunatelly, this code is cpu-specific.
+ * Unfortunately, this code is cpu-specific.
*/
.set push
.set noreorder
@@ -145,7 +145,7 @@ LEAF(kexec_smp_wait)
#endif
/* All parameters to new kernel are passed in registers a0-a3.
- * kexec_args[0..3] are uses to prepare register values.
+ * kexec_args[0..3] are used to prepare register values.
*/
kexec_args:
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 2c96c0c68116..c71d1eb7da59 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -36,6 +36,7 @@
#include <asm/cdmm.h>
#include <asm/cpu.h>
#include <asm/debug.h>
+#include <asm/dma-coherence.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
@@ -84,6 +85,11 @@ static struct resource bss_resource = { .name = "Kernel bss", };
static void *detect_magic __initdata = detect_memory_region;
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+unsigned long ARCH_PFN_OFFSET;
+EXPORT_SYMBOL(ARCH_PFN_OFFSET);
+#endif
+
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
{
int x = boot_mem_map.nr_map;
@@ -441,6 +447,12 @@ static void __init bootmem_init(void)
mapstart = max(reserved_end, start);
}
+ if (min_low_pfn >= max_low_pfn)
+ panic("Incorrect memory mapping !!!");
+
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+ ARCH_PFN_OFFSET = PFN_UP(ramstart);
+#else
/*
* Reserve any memory between the start of RAM and PHYS_OFFSET
*/
@@ -448,8 +460,6 @@ static void __init bootmem_init(void)
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
BOOT_MEM_RESERVED);
- if (min_low_pfn >= max_low_pfn)
- panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
@@ -459,6 +469,7 @@ static void __init bootmem_init(void)
ARCH_PFN_OFFSET - min_low_pfn);
}
min_low_pfn = ARCH_PFN_OFFSET;
+#endif
/*
* Determine low and high memory ranges
@@ -1055,3 +1066,26 @@ static int __init debugfs_mips(void)
}
arch_initcall(debugfs_mips);
#endif
+
+#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
+/* User defined DMA coherency from command line. */
+enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+ coherentio = IO_COHERENCE_ENABLED;
+ pr_info("Hardware DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+ coherentio = IO_COHERENCE_DISABLED;
+ pr_info("Software DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+#endif
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 0a9cfe7a0372..109ed163a6a6 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -592,13 +592,15 @@ SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
#endif
#ifdef CONFIG_TRAD_SIGNALS
-asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_sigreturn(void)
{
struct sigframe __user *frame;
+ struct pt_regs *regs;
sigset_t blocked;
int sig;
- frame = (struct sigframe __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct sigframe __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
@@ -606,7 +608,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&blocked);
- sig = restore_sigcontext(&regs, &frame->sf_sc);
+ sig = restore_sigcontext(regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
@@ -618,8 +620,8 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
@@ -627,13 +629,15 @@ badframe:
}
#endif /* CONFIG_TRAD_SIGNALS */
-asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_rt_sigreturn(void)
{
struct rt_sigframe __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
@@ -641,7 +645,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -656,8 +660,8 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index b672cebb4a1a..8f65aaf9206d 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -64,13 +64,15 @@ struct rt_sigframe_n32 {
struct ucontextn32 rs_uc;
};
-asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sysn32_rt_sigreturn(void)
{
struct rt_sigframe_n32 __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe_n32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
@@ -78,7 +80,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -93,8 +95,8 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
index 2b3572fb5f1b..b6e3ddef48a0 100644
--- a/arch/mips/kernel/signal_o32.c
+++ b/arch/mips/kernel/signal_o32.c
@@ -151,13 +151,15 @@ static int setup_frame_32(void *sig_return, struct ksignal *ksig,
return 0;
}
-asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_rt_sigreturn(void)
{
struct rt_sigframe32 __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
@@ -165,7 +167,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -180,8 +182,8 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
@@ -251,13 +253,15 @@ struct mips_abi mips_abi_32 = {
};
-asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_sigreturn(void)
{
struct sigframe32 __user *frame;
+ struct pt_regs *regs;
sigset_t blocked;
int sig;
- frame = (struct sigframe32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct sigframe32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
@@ -265,7 +269,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&blocked);
- sig = restore_sigcontext32(&regs, &frame->sf_sc);
+ sig = restore_sigcontext32(regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
@@ -277,8 +281,8 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 8d505a21396e..f8871d5b7eb3 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1221,13 +1221,6 @@ static int enable_restore_fp_context(int msa)
{
int err, was_fpu_owner, prior_msa;
- /*
- * If an FP mode switch is currently underway, wait for it to
- * complete before proceeding.
- */
- wait_var_event(&current->mm->context.fp_mode_switching,
- !atomic_read(&current->mm->context.fp_mode_switching));
-
if (!used_math()) {
/* First time FP context user. */
preempt_disable();
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7cd76f93a438..f7ea8e21656b 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -515,7 +515,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
dvcpu->arch.wait = 0;
if (swq_has_sleeper(&dvcpu->wq))
- swake_up(&dvcpu->wq);
+ swake_up_one(&dvcpu->wq);
return 0;
}
@@ -1204,7 +1204,7 @@ static void kvm_mips_comparecount_func(unsigned long data)
vcpu->arch.wait = 0;
if (swq_has_sleeper(&vcpu->wq))
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
}
/* low level hrtimer wake routine */
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 44bccaee822b..c4aa140b7c91 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <lantiq_soc.h>
+#include <asm/setup.h>
#define ASC_BUF 1024
#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 9ff7ccde9de0..d984bd5c2ec5 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -9,7 +9,6 @@
#include <linux/export.h>
#include <linux/clk.h>
#include <linux/bootmem.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
@@ -114,10 +113,3 @@ void __init prom_init(void)
panic("failed to register_vsmp_smp_ops()");
#endif
}
-
-int __init plat_of_setup(void)
-{
- return of_platform_default_populate(NULL, NULL, NULL);
-}
-
-arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 805b3a6ab2d6..4b9fbb6744ad 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -130,10 +130,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
unsigned long flags;
ch->desc = 0;
- ch->desc_base = dma_alloc_coherent(NULL,
+ ch->desc_base = dma_zalloc_coherent(NULL,
LTQ_DESC_NUM * LTQ_DESC_SIZE,
&ch->phys, GFP_ATOMIC);
- memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 17e15b50a551..37b8fc5b9ac9 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/lasat/lasat.h>
#include <asm/cpu.h>
+#include <asm/setup.h>
#include "at93c.h"
#include <asm/lasat/eeprom.h>
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 1cc306520a55..3a6f34ef5ffc 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -195,6 +195,7 @@
#endif
#else
PTR_SUBU t0, $0, a2
+ move a2, zero /* No remaining longs */
PTR_ADDIU t0, 1
STORE_BYTE(0)
STORE_BYTE(1)
@@ -231,16 +232,25 @@
#ifdef CONFIG_CPU_MIPSR6
.Lbyte_fixup\@:
- PTR_SUBU a2, $0, t0
+ /*
+ * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
+ * a2 = a2 - t0 + 1
+ */
+ PTR_SUBU a2, t0
jr ra
PTR_ADDIU a2, 1
#endif /* CONFIG_CPU_MIPSR6 */
.Lfirst_fixup\@:
+ /* unset_bytes already in a2 */
jr ra
nop
.Lfwd_fixup\@:
+ /*
+ * unset_bytes = partial_start_addr + #bytes - fault_addr
+ * a2 = t1 + (a2 & 3f) - $28->task->BUADDR
+ */
PTR_L t0, TI_TASK($28)
andi a2, 0x3f
LONG_L t0, THREAD_BUADDR(t0)
@@ -249,6 +259,10 @@
LONG_SUBU a2, t0
.Lpartial_fixup\@:
+ /*
+ * unset_bytes = partial_end_addr + #bytes - fault_addr
+ * a2 = a0 + (a2 & STORMASK) - $28->task->BUADDR
+ */
PTR_L t0, TI_TASK($28)
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
@@ -257,10 +271,15 @@
LONG_SUBU a2, t0
.Llast_fixup\@:
+ /* unset_bytes already in a2 */
jr ra
nop
.Lsmall_fixup\@:
+ /*
+ * unset_bytes = end_addr - current_addr + 1
+ * a2 = t1 - a0 + 1
+ */
PTR_SUBU a2, t1, a0
jr ra
PTR_ADDIU a2, 1
diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform
index ffe01c6d0037..a0dbb3b2f2de 100644
--- a/arch/mips/loongson32/Platform
+++ b/arch/mips/loongson32/Platform
@@ -1,8 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1) += \
- $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32r2 -Wa,--trap
-
+cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32 -Wa,--trap
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32
-load-$(CONFIG_LOONGSON1_LS1B) += 0xffffffff80100000
-load-$(CONFIG_LOONGSON1_LS1C) += 0xffffffff80100000
+load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80100000
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
index c79e6a565572..c865b4b9b775 100644
--- a/arch/mips/loongson64/Kconfig
+++ b/arch/mips/loongson64/Kconfig
@@ -91,7 +91,6 @@ config LOONGSON_MACH3X
select LOONGSON_MC146818
select ZONE_DMA32
select LEFI_FIRMWARE_INTERFACE
- select PHYS48_TO_HT40
help
Generic Loongson 3 family machines utilize the 3A/3B revision
of Loongson processor and RS780/SBX00 chipset.
@@ -130,10 +129,6 @@ config LOONGSON_UART_BASE
default y
depends on EARLY_PRINTK || SERIAL_8250
-config PHYS48_TO_HT40
- bool
- default y if CPU_LOONGSON3
-
config LOONGSON_MC146818
bool
default n
diff --git a/arch/mips/loongson64/common/Makefile b/arch/mips/loongson64/common/Makefile
index 8235ac7eac95..57ee03022941 100644
--- a/arch/mips/loongson64/common/Makefile
+++ b/arch/mips/loongson64/common/Makefile
@@ -6,6 +6,7 @@
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
bonito-irq.o mem.o machtype.o platform.o serial.o
obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_CPU_LOONGSON2) += dma.o
#
# Serial port support
@@ -25,8 +26,3 @@ obj-$(CONFIG_CS5536) += cs5536/
#
obj-$(CONFIG_SUSPEND) += pm.o
-
-#
-# Big Memory (SWIOTLB) Support
-#
-obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
index f7c905e50dc4..92dc6bafc127 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
@@ -138,7 +138,7 @@ u32 pci_ohci_read_reg(int reg)
break;
case PCI_OHCI_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
- if ((lo & 0x00000f00) == CS5536_USB_INTR)
+ if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR)
conf_data = 1;
break;
default:
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
deleted file mode 100644
index 6a739f8ae110..000000000000
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/swiotlb.h>
-#include <linux/bootmem.h>
-
-#include <asm/bootinfo.h>
-#include <boot_param.h>
-#include <dma-coherence.h>
-
-static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
-
- mb();
- return ret;
-}
-
-static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
- dir, attrs);
- mb();
- return daddr;
-}
-
-static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs);
- mb();
-
- return r;
-}
-
-static void loongson_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
-{
- swiotlb_sync_single_for_device(dev, dma_handle, size, dir);
- mb();
-}
-
-static void loongson_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- swiotlb_sync_sg_for_device(dev, sg, nents, dir);
- mb();
-}
-
-static int loongson_dma_supported(struct device *dev, u64 mask)
-{
- if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits))
- return 0;
- return swiotlb_dma_supported(dev, mask);
-}
-
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- long nid;
-#ifdef CONFIG_PHYS48_TO_HT40
- /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
- * Loongson-3's 48bit address space and embed it into 40bit */
- nid = (paddr >> 44) & 0x3;
- paddr = ((nid << 44) ^ paddr) | (nid << 37);
-#endif
- return paddr;
-}
-
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- long nid;
-#ifdef CONFIG_PHYS48_TO_HT40
- /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
- * Loongson-3's 48bit address space and embed it into 40bit */
- nid = (daddr >> 37) & 0x3;
- daddr = ((nid << 37) ^ daddr) | (nid << 44);
-#endif
- return daddr;
-}
-
-static const struct dma_map_ops loongson_dma_map_ops = {
- .alloc = loongson_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = loongson_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = loongson_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = loongson_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = loongson_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = loongson_dma_supported,
-};
-
-void __init plat_swiotlb_setup(void)
-{
- swiotlb_init(1);
- mips_dma_map_ops = &loongson_dma_map_ops;
-}
diff --git a/arch/mips/loongson64/common/dma.c b/arch/mips/loongson64/common/dma.c
new file mode 100644
index 000000000000..48f04126bde2
--- /dev/null
+++ b/arch/mips/loongson64/common/dma.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr | 0x80000000;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
+ if (dma_addr > 0x8fffffff)
+ return dma_addr;
+ return dma_addr & 0x0fffffff;
+#else
+ return dma_addr & 0x7fffffff;
+#endif
+}
diff --git a/arch/mips/loongson64/common/early_printk.c b/arch/mips/loongson64/common/early_printk.c
index 6ca632e529dc..a782e2b24747 100644
--- a/arch/mips/loongson64/common/early_printk.c
+++ b/arch/mips/loongson64/common/early_printk.c
@@ -10,6 +10,7 @@
* option) any later version.
*/
#include <linux/serial_reg.h>
+#include <asm/setup.h>
#include <loongson.h>
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 1e8a955ae5a8..8f68ee02a8c2 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -198,7 +198,8 @@ void __init prom_init_env(void)
break;
case PRID_REV_LOONGSON3A_R1:
case PRID_REV_LOONGSON3A_R2:
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
cpu_clock_freq = 900000000;
break;
case PRID_REV_LOONGSON3B_R1:
diff --git a/arch/mips/loongson64/loongson-3/Makefile b/arch/mips/loongson64/loongson-3/Makefile
index 44bc1482158b..b5a0c2fa5446 100644
--- a/arch/mips/loongson64/loongson-3/Makefile
+++ b/arch/mips/loongson64/loongson-3/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for Loongson-3 family machines
#
-obj-y += irq.o cop2-ex.o platform.o acpi_init.o
+obj-y += irq.o cop2-ex.o platform.o acpi_init.o dma.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/loongson64/loongson-3/dma.c b/arch/mips/loongson64/loongson-3/dma.c
new file mode 100644
index 000000000000..5e86635f71db
--- /dev/null
+++ b/arch/mips/loongson64/loongson-3/dma.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+#include <linux/init.h>
+#include <linux/swiotlb.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ long nid = (paddr >> 44) & 0x3;
+ return ((nid << 44) ^ paddr) | (nid << 37);
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ long nid = (daddr >> 37) & 0x3;
+ return ((nid << 37) ^ daddr) | (nid << 44);
+}
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 8501109bb0f0..fea95d003269 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -682,7 +682,8 @@ void play_dead(void)
(void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
break;
case PRID_REV_LOONGSON3A_R2:
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3a_r2r3_play_dead);
break;
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index c463bdad45c7..3e5bb203c95a 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -3,7 +3,7 @@
# Makefile for the Linux/MIPS-specific parts of the memory manager.
#
-obj-y += cache.o dma-default.o extable.o fault.o \
+obj-y += cache.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \
pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o
obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e12dfa48b478..a9ef057c79fe 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -830,12 +830,13 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
return __r4k_flush_icache_range(start, end, true);
}
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
@@ -904,7 +906,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
bc_inv(addr, size);
__sync();
}
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
struct flush_cache_sigtramp_args {
struct mm_struct *mm;
@@ -1505,6 +1507,14 @@ static void probe_pcache(void)
if (c->dcache.flags & MIPS_CACHE_PINDEX)
c->dcache.flags &= ~MIPS_CACHE_ALIASES;
+ /*
+ * In systems with CM the icache fills from L2 or closer caches, and
+ * thus sees remote stores without needing to write them back any
+ * further than that.
+ */
+ if (mips_cm_present())
+ c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
+
switch (current_cpu_type()) {
case CPU_20KC:
/*
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 0d3c656feba0..70a523151ff3 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
EXPORT_SYMBOL(flush_icache_all);
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
/* DMA cache operations. */
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
@@ -65,7 +65,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size);
EXPORT_SYMBOL(_dma_cache_wback_inv);
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
/*
* We could optimize the case where the cache argument is not BCACHE but
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
deleted file mode 100644
index f9fef0028ca2..000000000000
--- a/arch/mips/mm/dma-default.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
- * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
- */
-
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-#include <linux/dma-contiguous.h>
-
-#include <asm/cache.h>
-#include <asm/cpu-type.h>
-#include <asm/io.h>
-
-#include <dma-coherence.h>
-
-#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
-/* User defined DMA coherency from command line. */
-enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
-EXPORT_SYMBOL_GPL(coherentio);
-int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
-
-static int __init setcoherentio(char *str)
-{
- coherentio = IO_COHERENCE_ENABLED;
- pr_info("Hardware DMA cache coherency (command line)\n");
- return 0;
-}
-early_param("coherentio", setcoherentio);
-
-static int __init setnocoherentio(char *str)
-{
- coherentio = IO_COHERENCE_DISABLED;
- pr_info("Software DMA cache coherency (command line)\n");
- return 0;
-}
-early_param("nocoherentio", setnocoherentio);
-#endif
-
-static inline struct page *dma_addr_to_page(struct device *dev,
- dma_addr_t dma_addr)
-{
- return pfn_to_page(
- plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
-}
-
-/*
- * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
- * speculatively fill random cachelines with stale data at any time,
- * requiring an extra flush post-DMA.
- *
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- *
- * Note that the R14000 and R16000 should also be checked for in this
- * condition. However this function is only called on non-I/O-coherent
- * systems and only the R10000 and R12000 are used in such systems, the
- * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
- */
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
-{
- if (plat_device_is_coherent(dev))
- return false;
-
- switch (boot_cpu_type()) {
- case CPU_R10000:
- case CPU_R12000:
- case CPU_BMIPS5000:
- return true;
-
- default:
- /*
- * Presence of MAARs suggests that the CPU supports
- * speculatively prefetching data, and therefore requires
- * the post-DMA flush/invalidate.
- */
- return cpu_has_maar;
- }
-}
-
-static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
-{
- gfp_t dma_flag;
-
-#ifdef CONFIG_ISA
- if (dev == NULL)
- dma_flag = __GFP_DMA;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
- if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
- dma_flag = __GFP_DMA;
- else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
- if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev == NULL ||
- dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
- dma_flag = __GFP_DMA;
- else
-#endif
- dma_flag = 0;
-
- /* Don't invoke OOM killer */
- gfp |= __GFP_NORETRY;
-
- return gfp | dma_flag;
-}
-
-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret;
- struct page *page = NULL;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- gfp = massage_gfp_flags(dev, gfp);
-
- if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
- gfp);
- if (!page)
- page = alloc_pages(gfp, get_order(size));
-
- if (!page)
- return NULL;
-
- ret = page_address(page);
- memset(ret, 0, size);
- *dma_handle = plat_map_dma_mem(dev, ret, size);
- if (!(attrs & DMA_ATTR_NON_CONSISTENT) &&
- !plat_device_is_coherent(dev)) {
- dma_cache_wback_inv((unsigned long) ret, size);
- ret = UNCAC_ADDR(ret);
- }
-
- return ret;
-}
-
-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- unsigned long addr = (unsigned long) vaddr;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page = NULL;
-
- plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
-
- if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
- addr = CAC_ADDR(addr);
-
- page = virt_to_page((void *) addr);
-
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
-}
-
-static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- unsigned long user_count = vma_pages(vma);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long addr = (unsigned long)cpu_addr;
- unsigned long off = vma->vm_pgoff;
- unsigned long pfn;
- int ret = -ENXIO;
-
- if (!plat_device_is_coherent(dev))
- addr = CAC_ADDR(addr);
-
- pfn = page_to_pfn(virt_to_page((void *)addr));
-
- if (attrs & DMA_ATTR_WRITE_COMBINE)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- if (off < count && user_count <= (count - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
-
- return ret;
-}
-
-static inline void __dma_sync_virtual(void *addr, size_t size,
- enum dma_data_direction direction)
-{
- switch (direction) {
- case DMA_TO_DEVICE:
- dma_cache_wback((unsigned long)addr, size);
- break;
-
- case DMA_FROM_DEVICE:
- dma_cache_inv((unsigned long)addr, size);
- break;
-
- case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv((unsigned long)addr, size);
- break;
-
- default:
- BUG();
- }
-}
-
-/*
- * A single sg entry may refer to multiple physically contiguous
- * pages. But we still need to process highmem pages individually.
- * If highmem is not configured then the bulk of this loop gets
- * optimized out.
- */
-static inline void __dma_sync(struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction)
-{
- size_t left = size;
-
- do {
- size_t len = left;
-
- if (PageHighMem(page)) {
- void *addr;
-
- if (offset + len > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
- }
- len = PAGE_SIZE - offset;
- }
-
- addr = kmap_atomic(page);
- __dma_sync_virtual(addr + offset, len, direction);
- kunmap_atomic(addr);
- } else
- __dma_sync_virtual(page_address(page) + offset,
- size, direction);
- offset = 0;
- page++;
- left -= len;
- } while (left);
-}
-
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, unsigned long attrs)
-{
- if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(dma_addr_to_page(dev, dma_addr),
- dma_addr & ~PAGE_MASK, size, direction);
- plat_post_dma_flush(dev);
- plat_unmap_dma_mem(dev, dma_addr, size, direction);
-}
-
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- if (!plat_device_is_coherent(dev) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
- sg->offset;
- }
-
- return nents;
-}
-
-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(page, offset, size, direction);
-
- return plat_map_dma_mem_page(dev, page) + offset;
-}
-
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nhwentries, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nhwentries, i) {
- if (!plat_device_is_coherent(dev) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- direction != DMA_TO_DEVICE)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
- }
-}
-
-static void mips_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
- plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- if (cpu_needs_post_dma_flush(dev)) {
- for_each_sg(sglist, sg, nelems, i) {
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
- }
- plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- if (!plat_device_is_coherent(dev)) {
- for_each_sg(sglist, sg, nelems, i) {
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
- }
-}
-
-static int mips_dma_supported(struct device *dev, u64 mask)
-{
- return plat_dma_supported(dev, mask);
-}
-
-static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- if (!plat_device_is_coherent(dev))
- __dma_sync_virtual(vaddr, size, direction);
-}
-
-static const struct dma_map_ops mips_default_dma_map_ops = {
- .alloc = mips_dma_alloc_coherent,
- .free = mips_dma_free_coherent,
- .mmap = mips_dma_mmap,
- .map_page = mips_dma_map_page,
- .unmap_page = mips_dma_unmap_page,
- .map_sg = mips_dma_map_sg,
- .unmap_sg = mips_dma_unmap_sg,
- .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
- .sync_single_for_device = mips_dma_sync_single_for_device,
- .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
- .sync_sg_for_device = mips_dma_sync_sg_for_device,
- .dma_supported = mips_dma_supported,
- .cache_sync = mips_dma_cache_sync,
-};
-
-const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
-EXPORT_SYMBOL(mips_dma_map_ops);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
new file mode 100644
index 000000000000..2aca1236af36
--- /dev/null
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ */
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dma-contiguous.h>
+#include <linux/highmem.h>
+
+#include <asm/cache.h>
+#include <asm/cpu-type.h>
+#include <asm/dma-coherence.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+static inline int dev_is_coherent(struct device *dev)
+{
+ return dev->archdata.dma_coherent;
+}
+#else
+static inline int dev_is_coherent(struct device *dev)
+{
+ switch (coherentio) {
+ default:
+ case IO_COHERENCE_DEFAULT:
+ return hw_coherentio;
+ case IO_COHERENCE_ENABLED:
+ return 1;
+ case IO_COHERENCE_DISABLED:
+ return 0;
+ }
+}
+#endif /* CONFIG_DMA_PERDEV_COHERENT */
+
+/*
+ * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
+ * fill random cachelines with stale data at any time, requiring an extra
+ * flush post-DMA.
+ *
+ * Warning on the terminology - Linux calls an uncached area coherent; MIPS
+ * terminology calls memory areas with hardware maintained coherency coherent.
+ *
+ * Note that the R14000 and R16000 should also be checked for in this condition.
+ * However this function is only called on non-I/O-coherent systems and only the
+ * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
+ * SGI IP32 aka O2.
+ */
+static inline bool cpu_needs_post_dma_flush(struct device *dev)
+{
+ if (dev_is_coherent(dev))
+ return false;
+
+ switch (boot_cpu_type()) {
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_BMIPS5000:
+ return true;
+ default:
+ /*
+ * Presence of MAARs suggests that the CPU supports
+ * speculatively prefetching data, and therefore requires
+ * the post-DMA flush/invalidate.
+ */
+ return cpu_has_maar;
+ }
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ void *ret;
+
+ ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!ret)
+ return NULL;
+
+ if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ dma_cache_wback_inv((unsigned long) ret, size);
+ ret = (void *)UNCAC_ADDR(ret);
+ }
+
+ return ret;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev))
+ cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
+ dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long addr = (unsigned long)cpu_addr;
+ unsigned long off = vma->vm_pgoff;
+ unsigned long pfn;
+ int ret = -ENXIO;
+
+ if (!dev_is_coherent(dev))
+ addr = CAC_ADDR(addr);
+
+ pfn = page_to_pfn(virt_to_page((void *)addr));
+
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static inline void dma_sync_virt(void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback((unsigned long)addr, size);
+ break;
+
+ case DMA_FROM_DEVICE:
+ dma_cache_inv((unsigned long)addr, size);
+ break;
+
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv((unsigned long)addr, size);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/*
+ * A single sg entry may refer to multiple physically contiguous pages. But
+ * we still need to process highmem pages individually. If highmem is not
+ * configured then the bulk of this loop gets optimized out.
+ */
+static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
+ unsigned long offset = paddr & ~PAGE_MASK;
+ size_t left = size;
+
+ do {
+ size_t len = left;
+
+ if (PageHighMem(page)) {
+ void *addr;
+
+ if (offset + len > PAGE_SIZE) {
+ if (offset >= PAGE_SIZE) {
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
+ }
+ len = PAGE_SIZE - offset;
+ }
+
+ addr = kmap_atomic(page);
+ dma_sync_virt(addr + offset, len, dir);
+ kunmap_atomic(addr);
+ } else
+ dma_sync_virt(page_address(page) + offset, size, dir);
+ offset = 0;
+ page++;
+ left -= len;
+ } while (left);
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (!dev_is_coherent(dev))
+ dma_sync_phys(paddr, size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (cpu_needs_post_dma_flush(dev))
+ dma_sync_phys(paddr, size, dir);
+}
+
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (!dev_is_coherent(dev))
+ dma_sync_virt(vaddr, size, direction);
+}
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index d5d02993aa21..56e4f8bffd4c 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -623,21 +623,6 @@ struct dmadscr {
u64 pad_b;
} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
-void sb1_dma_init(void)
-{
- int i;
-
- for (i = 0; i < DM_NUM_CHANNELS; i++) {
- const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
- V_DM_DSCR_BASE_RINGSZ(1);
- void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
-
- __raw_writeq(base_val, base_reg);
- __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
- __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
- }
-}
-
void clear_page(void *page)
{
u64 to_phys = CPHYSADDR((unsigned long)page);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 79b9f2ad3ff5..49312a14cd17 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1509,7 +1509,7 @@ static void setup_pw(void)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
write_c0_pwctl(1 << 6 | psn);
#endif
- write_c0_kpgd(swapper_pg_dir);
+ write_c0_kpgd((long)swapper_pg_dir);
kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 9bb6baa45da3..24e5b0d06899 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -19,7 +19,6 @@
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
-#define UASM_ISA _UASM_ISA_MICROMIPS
#include <asm/uasm.h>
#define RS_MASK 0x1f
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 9fea6c6bbf49..60ceb93c71a0 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -19,7 +19,6 @@
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
-#define UASM_ISA _UASM_ISA_CLASSIC
#include <asm/uasm.h>
#define RS_MASK 0x1f
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 63940bdce698..17c7fd471a27 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -13,11 +13,9 @@ obj-y += malta-init.o
obj-y += malta-int.o
obj-y += malta-memory.o
obj-y += malta-platform.o
-obj-y += malta-reset.o
obj-y += malta-setup.o
obj-y += malta-time.o
obj-$(CONFIG_MIPS_CMP) += malta-amon.o
-obj-$(CONFIG_MIPS_MALTA_PM) += malta-pm.o
CFLAGS_malta-dtshim.o = -I$(src)/../../../scripts/dtc/libfdt
diff --git a/arch/mips/mti-malta/malta-pm.c b/arch/mips/mti-malta/malta-pm.c
deleted file mode 100644
index efbd659fb602..000000000000
--- a/arch/mips/mti-malta/malta-pm.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2014 Imagination Technologies
- * Author: Paul Burton <paul.burton@mips.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/pci.h>
-
-#include <asm/mach-malta/malta-pm.h>
-
-static struct pci_bus *pm_pci_bus;
-static resource_size_t pm_io_offset;
-
-int mips_pm_suspend(unsigned state)
-{
- int spec_devid;
- u16 sts;
-
- if (!pm_pci_bus || !pm_io_offset)
- return -ENODEV;
-
- /* Ensure the power button status is clear */
- while (1) {
- sts = inw(pm_io_offset + PIIX4_FUNC3IO_PMSTS);
- if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
- break;
- outw(sts, pm_io_offset + PIIX4_FUNC3IO_PMSTS);
- }
-
- /* Enable entry to suspend */
- outw(state | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
- pm_io_offset + PIIX4_FUNC3IO_PMCNTRL);
-
- /* If the special cycle occurs too soon this doesn't work... */
- mdelay(10);
-
- /*
- * The PIIX4 will enter the suspend state only after seeing a special
- * cycle with the correct magic data on the PCI bus. Generate that
- * cycle now.
- */
- spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
- pci_bus_write_config_dword(pm_pci_bus, spec_devid, 0,
- PIIX4_SUSPEND_MAGIC);
-
- /* Give the system some time to power down */
- mdelay(1000);
-
- return 0;
-}
-
-static int __init malta_pm_setup(void)
-{
- struct pci_dev *dev;
- int res, io_region = PCI_BRIDGE_RESOURCES;
-
- /* Find a reference to the PCI bus */
- pm_pci_bus = pci_find_next_bus(NULL);
- if (!pm_pci_bus) {
- pr_warn("malta-pm: failed to find reference to PCI bus\n");
- return -ENODEV;
- }
-
- /* Find the PIIX4 PM device */
- dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
- PCI_ANY_ID, NULL);
- if (!dev) {
- pr_warn("malta-pm: failed to find PIIX4 PM\n");
- return -ENODEV;
- }
-
- /* Request access to the PIIX4 PM IO registers */
- res = pci_request_region(dev, io_region, "PIIX4 PM IO registers");
- if (res) {
- pr_warn("malta-pm: failed to request PM IO registers (%d)\n",
- res);
- pci_dev_put(dev);
- return -ENODEV;
- }
-
- /* Find the offset to the PIIX4 PM IO registers */
- pm_io_offset = pci_resource_start(dev, io_region);
-
- pci_dev_put(dev);
- return 0;
-}
-
-late_initcall(malta_pm_setup);
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
deleted file mode 100644
index dd6f62ad4417..000000000000
--- a/arch/mips/mti-malta/malta-reset.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/io.h>
-#include <linux/pm.h>
-#include <linux/reboot.h>
-
-#include <asm/reboot.h>
-#include <asm/mach-malta/malta-pm.h>
-
-static void mips_machine_power_off(void)
-{
- mips_pm_suspend(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF);
-
- pr_info("Failed to power down, resetting\n");
- machine_restart(NULL);
-}
-
-static int __init mips_reboot_setup(void)
-{
- pm_power_off = mips_machine_power_off;
-
- return 0;
-}
-arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 7b63914d2e58..5d4c5e5fbd69 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -26,6 +26,7 @@
#include <linux/screen_info.h>
#include <linux/time.h>
+#include <asm/dma-coherence.h>
#include <asm/fw/fw.h>
#include <asm/mach-malta/malta-dtshim.h>
#include <asm/mips-cps.h>
@@ -144,12 +145,6 @@ static int __init plat_enable_iocoherency(void)
static void __init plat_setup_iocoherency(void)
{
-#ifdef CONFIG_DMA_NONCOHERENT
- /*
- * Kernel has been configured with software coherency
- * but we might choose to turn it off and use hardware
- * coherency instead.
- */
if (plat_enable_iocoherency()) {
if (coherentio == IO_COHERENCE_DISABLED)
pr_info("Hardware DMA cache coherency disabled\n");
@@ -161,10 +156,6 @@ static void __init plat_setup_iocoherency(void)
else
pr_info("Software DMA cache coherency enabled\n");
}
-#else
- if (!plat_enable_iocoherency())
- panic("Hardware DMA cache coherency not supported!");
-#endif
}
static void __init pci_clock_check(void)
@@ -226,29 +217,6 @@ static void __init bonito_quirks_setup(void)
pr_info("Enabled Bonito debug mode\n");
} else
BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
-
-#ifdef CONFIG_DMA_COHERENT
- if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
- BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
- pr_info("Enabled Bonito CPU coherency\n");
-
- argptr = fw_getcmdline();
- if (strstr(argptr, "iobcuncached")) {
- BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
- BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
- ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
- BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
- pr_info("Disabled Bonito IOBC coherency\n");
- } else {
- BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
- BONITO_PCIMEMBASECFG |=
- (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
- BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
- pr_info("Enabled Bonito IOBC coherency\n");
- }
- } else
- panic("Hardware DMA cache coherency not supported");
-#endif
}
void __init *plat_get_fdt(void)
@@ -279,11 +247,6 @@ void __init plat_mem_setup(void)
*/
enable_dma(4);
-#ifdef CONFIG_DMA_COHERENT
- if (mips_revision_sconid != MIPS_REVISION_SCON_BONITO)
- panic("Hardware DMA cache coherency not supported");
-#endif
-
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
bonito_quirks_setup();
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
index 769f93032c53..8f5bc1597550 100644
--- a/arch/mips/netlogic/common/earlycons.c
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -36,6 +36,7 @@
#include <linux/serial_reg.h>
#include <asm/mipsregs.h>
+#include <asm/setup.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/common.h>
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
index 856a6e6d296e..b5ba83f4c646 100644
--- a/arch/mips/netlogic/xlp/dt.c
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -93,17 +93,3 @@ void __init device_tree_init(void)
{
unflatten_and_copy_device_tree();
}
-
-static struct of_device_id __initdata xlp_ids[] = {
- { .compatible = "simple-bus", },
- {},
-};
-
-int __init xlp8xx_ds_publish_devices(void)
-{
- if (!of_have_populated_dt())
- return 0;
- return of_platform_bus_probe(NULL, xlp_ids, NULL);
-}
-
-device_initcall(xlp8xx_ds_publish_devices);
diff --git a/arch/mips/paravirt/serial.c b/arch/mips/paravirt/serial.c
index 02b665c02272..a37b6f9f0ede 100644
--- a/arch/mips/paravirt/serial.c
+++ b/arch/mips/paravirt/serial.c
@@ -9,16 +9,15 @@
#include <linux/kernel.h>
#include <linux/virtio_console.h>
#include <linux/kvm_para.h>
+#include <asm/setup.h>
/*
* Emit one character to the boot console.
*/
-int prom_putchar(char c)
+void prom_putchar(char c)
{
kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, 0 /* port 0 */,
(unsigned long)&c, 1 /* len == 1 */);
-
- return 1;
}
#ifdef CONFIG_VIRTIO_CONSOLE
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index 57e1463fcd02..a1d2c4ae0d1b 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -167,7 +167,7 @@ oh_my_gawd:
static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * value)
{
- if (bus->number > 0)
+ if (!pci_is_root_bus(bus))
return pci_conf1_read_config(bus, devfn, where, size, value);
return pci_conf0_read_config(bus, devfn, where, size, value);
@@ -310,7 +310,7 @@ oh_my_gawd:
static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
- if (bus->number > 0)
+ if (!pci_is_root_bus(bus))
return pci_conf1_write_config(bus, devfn, where, size, value);
return pci_conf0_write_config(bus, devfn, where, size, value);
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index b4fa6413c4e5..c539d0d2b0cf 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -149,6 +149,13 @@
#define AR2315_PCI_HOST_SLOT 3
#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS)
+/*
+ * We need some arbitrary non-zero value to be programmed to the BAR1 register
+ * of PCI host controller to enable DMA. The same value should be used as the
+ * offset to calculate the physical address of DMA buffer for PCI devices.
+ */
+#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
+
/* ??? access BAR */
#define AR2315_PCI_HOST_MBAR0 0x10000000
/* RAM access BAR */
@@ -167,6 +174,23 @@ struct ar2315_pci_ctrl {
struct resource io_res;
};
+static inline dma_addr_t ar2315_dev_offset(struct device *dev)
+{
+ if (dev && dev_is_pci(dev))
+ return AR2315_PCI_HOST_SDRAM_BASEADDR;
+ return 0;
+}
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + ar2315_dev_offset(dev);
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr - ar2315_dev_offset(dev);
+}
+
static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus)
{
struct pci_controller *hose = bus->sysdata;
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 1e23c8d587bd..64b58cc48a91 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -12,14 +12,18 @@
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
+#define AR724X_PCI_REG_APP 0x00
#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
#define AR724X_PCI_REG_INT_MASK 0x50
+#define AR724X_PCI_APP_LTSSM_ENABLE BIT(0)
+
#define AR724X_PCI_RESET_LINK_UP BIT(0)
#define AR724X_PCI_INT_DEV0 BIT(14)
@@ -325,6 +329,37 @@ static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
apc);
}
+static void ar724x_pci_hw_init(struct ar724x_pci_controller *apc)
+{
+ u32 ppl, app;
+ int wait = 0;
+
+ /* deassert PCIe host controller and PCIe PHY reset */
+ ath79_device_reset_clear(AR724X_RESET_PCIE);
+ ath79_device_reset_clear(AR724X_RESET_PCIE_PHY);
+
+ /* remove the reset of the PCIE PLL */
+ ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+ ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET;
+ ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+ /* deassert bypass for the PCIE PLL */
+ ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+ ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS;
+ ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+ /* set PCIE Application Control to ready */
+ app = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_APP);
+ app |= AR724X_PCI_APP_LTSSM_ENABLE;
+ __raw_writel(app, apc->ctrl_base + AR724X_PCI_REG_APP);
+
+ /* wait up to 100ms for PHY link up */
+ do {
+ mdelay(10);
+ wait++;
+ } while (wait < 10 && !ar724x_pci_check_link(apc));
+}
+
static int ar724x_pci_probe(struct platform_device *pdev)
{
struct ar724x_pci_controller *apc;
@@ -383,6 +418,13 @@ static int ar724x_pci_probe(struct platform_device *pdev)
apc->pci_controller.io_resource = &apc->io_res;
apc->pci_controller.mem_resource = &apc->mem_res;
+ /*
+ * Do the full PCIE Root Complex Initialization Sequence if the PCIe
+ * host controller is in reset.
+ */
+ if (ath79_reset_rr(AR724X_RESET_REG_RESET_MODULE) & AR724X_RESET_PCIE)
+ ar724x_pci_hw_init(apc);
+
apc->link_up = ar724x_pci_check_link(apc);
if (!apc->link_up)
dev_warn(&pdev->dev, "PCIe link is down\n");
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 0f09eafa5e3a..c94a66070a60 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/smp.h>
+#include <linux/dma-direct.h>
#include <asm/sn/arch.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
@@ -182,6 +183,19 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
+
+ return bc->baddr + paddr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr & ~(0xffUL << 56);
+}
+
/*
* Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
* to find the slot number in sense of the bridge device register.
@@ -200,17 +214,6 @@ static inline void pci_disable_swapping(struct pci_dev *dev)
bridge->b_widget.w_tflush; /* Flush */
}
-static inline void pci_enable_swapping(struct pci_dev *dev)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- bridge_t *bridge = bc->base;
- int slot = PCI_SLOT(dev->devfn);
-
- /* Turn on byte swapping */
- bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
- bridge->b_widget.w_tflush; /* Flush */
-}
-
static void pci_fixup_ioc3(struct pci_dev *d)
{
pci_disable_swapping(d);
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 3e92a06fa772..5017d5843c5a 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -21,8 +21,6 @@
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/pci-octeon.h>
-#include <dma-coherence.h>
-
#define USE_OCTEON_INTERNAL_ARBITER
/*
@@ -166,8 +164,6 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
- dev->dev.dma_ops = octeon_pci_dma_map_ops;
-
return 0;
}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 87ba86bd8696..d919a0d813a1 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -94,8 +94,6 @@ union cvmx_pcie_address {
static int cvmx_pcie_rc_initialize(int pcie_port);
-#include <dma-coherence.h>
-
/**
* Return the Core virtual base address for PCIe IO access. IOs are
* read/written as an offset from this address.
@@ -1239,14 +1237,14 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
if (pcie_port) {
- union cvmx_ciu_qlm1 ciu_qlm;
+ union cvmx_ciu_qlm ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
} else {
- union cvmx_ciu_qlm0 ciu_qlm;
+ union cvmx_ciu_qlm ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c
index d7b783463fac..8ed4961b1271 100644
--- a/arch/mips/pic32/pic32mzda/early_console.c
+++ b/arch/mips/pic32/pic32mzda/early_console.c
@@ -13,6 +13,7 @@
*/
#include <asm/mach-pic32/pic32.h>
#include <asm/fw/fw.h>
+#include <asm/setup.h>
#include "pic32mzda.h"
#include "early_pin.h"
@@ -157,7 +158,7 @@ void __init fw_init_early_console(char port)
setup_early_console(port, baud);
}
-int prom_putchar(char c)
+void prom_putchar(char c)
{
if (console_port >= 0) {
while (__raw_readl(
@@ -166,6 +167,4 @@ int prom_putchar(char c)
__raw_writel(c, uart_base + U_TXR(console_port));
}
-
- return 1;
}
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index 3c59ffe5f5f5..ecd30ddfb3db 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -10,6 +10,7 @@
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
+#include <asm/setup.h>
#ifdef CONFIG_SOC_RT288X
#define EARLY_UART_BASE 0x300c00
@@ -68,7 +69,7 @@ static void find_uart_base(void)
}
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
if (!init_complete) {
find_uart_base();
@@ -76,13 +77,13 @@ void prom_putchar(unsigned char ch)
}
if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) {
- uart_w32(ch, UART_TX);
+ uart_w32((unsigned char)ch, UART_TX);
while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
;
} else {
while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
;
- uart_w32(ch, UART_REG_TX);
+ uart_w32((unsigned char)ch, UART_REG_TX);
while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
;
}
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
index 45fdfbcbd4c6..6bdb48d41276 100644
--- a/arch/mips/sgi-ip27/ip27-console.c
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -7,6 +7,7 @@
*/
#include <asm/page.h>
+#include <asm/setup.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn0/hub.h>
#include <asm/sn/klconfig.h>
diff --git a/arch/mips/sgi-ip32/Makefile b/arch/mips/sgi-ip32/Makefile
index 60f0227425e7..4745cd94df11 100644
--- a/arch/mips/sgi-ip32/Makefile
+++ b/arch/mips/sgi-ip32/Makefile
@@ -4,4 +4,4 @@
#
obj-y += ip32-berr.o ip32-irq.o ip32-platform.o ip32-setup.o ip32-reset.o \
- crime.o ip32-memory.o
+ crime.o ip32-memory.o ip32-dma.o
diff --git a/arch/mips/sgi-ip32/ip32-dma.c b/arch/mips/sgi-ip32/ip32-dma.c
new file mode 100644
index 000000000000..fa7b17cb5385
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-dma.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/dma-direct.h>
+#include <asm/ip32/crime.h>
+
+/*
+ * Few notes.
+ * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
+ * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
+ * native-endian)
+ * 3. All other devices see memory as one big chunk at 0x40000000
+ * 4. Non-PCI devices will pass NULL as struct device*
+ *
+ * Thus we translate differently, depending on device.
+ */
+
+#define RAM_OFFSET_MASK 0x3fffffffUL
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK;
+
+ if (!dev)
+ dma_addr += CRIME_HI_MEM_BASE;
+ return dma_addr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK;
+
+ if (dma_addr >= 256*1024*1024)
+ paddr += CRIME_HI_MEM_BASE;
+ return paddr;
+}
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index f4dbce25bc6a..7ec278d72096 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -70,7 +70,6 @@ config SIBYTE_BCM1x55
config SIBYTE_SB1xxx_SOC
bool
- select DMA_COHERENT
select IRQ_MIPS_CPU
select SWAP_IO_SPACE
select SYS_SUPPORTS_32BIT_KERNEL
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 115399202eab..092fb2a6ec4a 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -27,6 +27,7 @@
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/sibyte/board.h>
#include <asm/smp-ops.h>
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 1791a44ee570..dde4dc859f79 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -32,6 +32,7 @@
#include <asm/reboot.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/pci.h>
#include <asm/txx9tmr.h>
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index ce196046ac3e..34605ca21498 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -7,7 +7,13 @@ ccflags-vdso := \
$(filter -I%,$(KBUILD_CFLAGS)) \
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
- $(filter -march=%,$(KBUILD_CFLAGS))
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ -D__VDSO__
+
+ifeq ($(cc-name),clang)
+ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+endif
+
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
-O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
@@ -38,6 +44,7 @@ endif
# VDSO linker flags.
VDSO_LDFLAGS := \
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
+ $(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \
-nostdlib -shared \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
$(call cc-ldoption, -Wl$(comma)--build-id)
diff --git a/arch/mips/vdso/genvdso.h b/arch/mips/vdso/genvdso.h
index 94334727059a..611b06f01a3c 100644
--- a/arch/mips/vdso/genvdso.h
+++ b/arch/mips/vdso/genvdso.h
@@ -15,8 +15,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
ELF(Shdr) *shdr;
char *shstrtab, *name;
uint16_t sh_count, sh_entsize, i;
- unsigned int local_gotno, symtabno, gotsym;
- ELF(Dyn) *dyn = NULL;
shdrs = vdso + FUNC(swap_uint)(ehdr->e_shoff);
sh_count = swap_uint16(ehdr->e_shnum);
@@ -41,9 +39,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
"%s: '%s' contains relocation sections\n",
program_name, path);
return false;
- case SHT_DYNAMIC:
- dyn = vdso + FUNC(swap_uint)(shdr->sh_offset);
- break;
}
/* Check for existing sections. */
@@ -61,52 +56,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
}
}
- /*
- * Ensure the GOT has no entries other than the standard 2, for the same
- * reason we check that there's no relocation sections above.
- * The standard two entries are:
- * - Lazy resolver
- * - Module pointer
- */
- if (dyn) {
- local_gotno = symtabno = gotsym = 0;
-
- while (FUNC(swap_uint)(dyn->d_tag) != DT_NULL) {
- switch (FUNC(swap_uint)(dyn->d_tag)) {
- /*
- * This member holds the number of local GOT entries.
- */
- case DT_MIPS_LOCAL_GOTNO:
- local_gotno = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- /*
- * This member holds the number of entries in the
- * .dynsym section.
- */
- case DT_MIPS_SYMTABNO:
- symtabno = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- /*
- * This member holds the index of the first dynamic
- * symbol table entry that corresponds to an entry in
- * the GOT.
- */
- case DT_MIPS_GOTSYM:
- gotsym = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- }
-
- dyn++;
- }
-
- if (local_gotno > 2 || symtabno - gotsym) {
- fprintf(stderr,
- "%s: '%s' contains unexpected GOT entries\n",
- program_name, path);
- return false;
- }
- }
-
return true;
}
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 39a0db3e2b34..16e684b59875 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -17,6 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -46,7 +47,7 @@ static void __iomem *pmu_base;
#define pmu_read(offset) readw(pmu_base + (offset))
#define pmu_write(offset, value) writew((value), pmu_base + (offset))
-static void vr41xx_cpu_wait(void)
+static void __cpuidle vr41xx_cpu_wait(void)
{
local_irq_disable();
if (!need_resched())
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 9ecad05bfc73..dfb6a79ba7ff 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -27,7 +27,6 @@ config OPENRISC
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
select MODULES_USE_ELF_RELA
- select MULTI_IRQ_HANDLER
select HAVE_DEBUG_STACKOVERFLOW
select OR1K_PIC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
@@ -36,6 +35,7 @@ config OPENRISC
select ARCH_USE_QUEUED_RWLOCKS
select OMPIC if SMP
select ARCH_WANT_FRAME_POINTERS
+ select GENERIC_IRQ_MULTI_HANDLER
config CPU_BIG_ENDIAN
def_bool y
@@ -69,9 +69,6 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT
def_bool y
-config MULTI_IRQ_HANDLER
- def_bool y
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
index 146e1660f00e..b589fac39b92 100644
--- a/arch/openrisc/include/asm/atomic.h
+++ b/arch/openrisc/include/asm/atomic.h
@@ -100,7 +100,7 @@ ATOMIC_OP(xor)
*
* This is often used through atomic_inc_not_zero()
*/
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;
@@ -119,7 +119,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return old;
}
-#define __atomic_add_unless __atomic_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#include <asm-generic/atomic.h>
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index d29f7db53906..f9cd43a39d72 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -16,8 +16,9 @@
#ifndef __ASM_OPENRISC_CMPXCHG_H
#define __ASM_OPENRISC_CMPXCHG_H
+#include <linux/bits.h>
+#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/bitops.h>
#define __HAVE_ARCH_CMPXCHG 1
diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h
index d9eee0a2b7b4..eb612b1865d2 100644
--- a/arch/openrisc/include/asm/irq.h
+++ b/arch/openrisc/include/asm/irq.h
@@ -24,6 +24,4 @@
#define NO_IRQ (-1)
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
#endif /* __ASM_OPENRISC_IRQ_H__ */
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c
index 35e478a93116..5f9445effaf8 100644
--- a/arch/openrisc/kernel/irq.c
+++ b/arch/openrisc/kernel/irq.c
@@ -41,13 +41,6 @@ void __init init_IRQ(void)
irqchip_init();
}
-static void (*handle_arch_irq)(struct pt_regs *);
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- handle_arch_irq = handle_irq;
-}
-
void __irq_entry do_IRQ(struct pt_regs *regs)
{
handle_arch_irq(regs);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 17526bebcbd2..e21751fb24aa 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,7 +11,6 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_SUPPORTS_MEMORY_FAILURE
select RTC_CLASS
select RTC_DRV_GENERIC
@@ -46,6 +45,7 @@ config PARISC
select HAVE_ARCH_HASH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select HAVE_REGS_AND_STACK_ACCESS_API
select GENERIC_SCHED_CLOCK
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
select GENERIC_CLOCKEVENTS
@@ -188,6 +188,10 @@ config PA20
config PA11
def_bool y
depends on PA7000 || PA7100LC || PA7200 || PA7300LC
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select DMA_NONCOHERENT_OPS
+ select DMA_NONCOHERENT_CACHE_SYNC
config PREFETCH
def_bool y
@@ -195,7 +199,7 @@ config PREFETCH
config MLONGCALLS
bool "Enable the -mlong-calls compiler option for big kernels"
- def_bool y if (!MODULES)
+ default y
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 60e6f07b7e32..e9c6385ef0d1 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -36,6 +36,7 @@
#define RP_OFFSET 16
#define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144
+#define REG_SZ 8
#define ASM_ULONG_INSN .dword
#else /* CONFIG_64BIT */
#define LDREG ldw
@@ -50,6 +51,7 @@
#define RP_OFFSET 20
#define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128
+#define REG_SZ 4
#define ASM_ULONG_INSN .word
#endif
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 88bae6676c9b..118953d41763 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -77,30 +77,6 @@ static __inline__ int atomic_read(const atomic_t *v)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define ATOMIC_OP(op, c_op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \
{ \
@@ -160,28 +136,6 @@ ATOMIC_OPS(xor, ^=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_inc(v) (atomic_add( 1,(v)))
-#define atomic_dec(v) (atomic_add( -1,(v)))
-
-#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
-#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-
#define ATOMIC_INIT(i) { (i) }
#ifdef CONFIG_64BIT
@@ -264,72 +218,11 @@ atomic64_read(const atomic64_t *v)
return READ_ONCE((v)->counter);
}
-#define atomic64_inc(v) (atomic64_add( 1,(v)))
-#define atomic64_dec(v) (atomic64_add( -1,(v)))
-
-#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
-#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
-
/* exported interface */
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
#endif /* !CONFIG_64BIT */
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644
index 000000000000..dbaaca84f27f
--- /dev/null
+++ b/arch/parisc/include/asm/barrier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+ which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb() do { synchronize_caches(); } while (0)
+#define rmb() mb()
+#define wmb() mb()
+#define dma_rmb() mb()
+#define dma_wmb() mb()
+#else
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
+#endif
+
+#define __smp_mb() mb()
+#define __smp_rmb() mb()
+#define __smp_wmb() mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 01e1fc057c83..44a9f97194aa 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -21,11 +21,6 @@
** flush/purge and allocate "regular" cacheable pages for everything.
*/
-#ifdef CONFIG_PA11
-extern const struct dma_map_ops pcxl_dma_ops;
-extern const struct dma_map_ops pcx_dma_ops;
-#endif
-
extern const struct dma_map_ops *hppa_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h
index 9a69bf6fc4b6..49f6f3d772cc 100644
--- a/arch/parisc/include/asm/linkage.h
+++ b/arch/parisc/include/asm/linkage.h
@@ -18,9 +18,9 @@
#ifdef __ASSEMBLY__
#define ENTRY(name) \
- .export name !\
- ALIGN !\
-name:
+ ALIGN !\
+name: ASM_NL\
+ .export name
#ifdef CONFIG_64BIT
#define ENDPROC(name) \
@@ -31,13 +31,18 @@ name:
END(name)
#endif
-#define ENTRY_CFI(name) \
+#define ENTRY_CFI(name, ...) \
ENTRY(name) ASM_NL\
+ .proc ASM_NL\
+ .callinfo __VA_ARGS__ ASM_NL\
+ .entry ASM_NL\
CFI_STARTPROC
#define ENDPROC_CFI(name) \
- ENDPROC(name) ASM_NL\
- CFI_ENDPROC
+ CFI_ENDPROC ASM_NL\
+ .exit ASM_NL\
+ .procend ASM_NL\
+ ENDPROC(name)
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 46da07670c2b..2a27b275ab09 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -25,4 +25,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->gr[20];
}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->iaoq[0] = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw))
+
#endif
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 6f84b6acc86e..8a63515f03bf 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{
volatile unsigned int *a;
- mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
@@ -30,17 +29,16 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable();
} else
cpu_relax();
- mb();
}
#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
- mb();
+
a = __ldcw_align(x);
- *a = 1;
mb();
+ *a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a;
int ret;
- mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
- mb();
return ret;
}
diff --git a/arch/parisc/include/asm/unwind.h b/arch/parisc/include/asm/unwind.h
index c73a3ee20226..f133b7efbebb 100644
--- a/arch/parisc/include/asm/unwind.h
+++ b/arch/parisc/include/asm/unwind.h
@@ -4,6 +4,9 @@
#include <linux/list.h>
+/* Max number of levels to backtrace */
+#define MAX_UNWIND_ENTRIES 30
+
/* From ABI specifications */
struct unwind_table_entry {
unsigned int region_start;
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index fc0df353ff0d..87245c584784 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -113,7 +113,6 @@
#define ELOOP 249 /* Too many symbolic links encountered */
#define ENOSYS 251 /* Function not implemented */
-#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index e95207c0565e..c7508f5717fb 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -482,6 +482,8 @@
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
+ sync
+ or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif
.endm
@@ -764,7 +766,6 @@ END(fault_vector_11)
#endif
/* Fault vector is separately protected and *must* be on its own page */
.align PAGE_SIZE
-ENTRY(end_fault_vector)
.import handle_interruption,code
.import do_cpu_irq_mask,code
@@ -776,7 +777,6 @@ ENTRY(end_fault_vector)
*/
ENTRY_CFI(ret_from_kernel_thread)
-
/* Call schedule_tail first though */
BL schedule_tail, %r2
nop
@@ -815,8 +815,9 @@ ENTRY_CFI(_switch_to)
LDREG TASK_THREAD_INFO(%r25), %r25
bv %r0(%r2)
mtctl %r25,%cr30
+ENDPROC_CFI(_switch_to)
-_switch_to_ret:
+ENTRY_CFI(_switch_to_ret)
mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest
callee_rest_float
@@ -824,7 +825,7 @@ _switch_to_ret:
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
-ENDPROC_CFI(_switch_to)
+ENDPROC_CFI(_switch_to_ret)
/*
* Common rfi return path for interruptions, kernel execve, and
@@ -885,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi)
STREG %r19,PT_SR5(%r16)
STREG %r19,PT_SR6(%r16)
STREG %r19,PT_SR7(%r16)
+ENDPROC_CFI(syscall_exit_rfi)
-intr_return:
+ENTRY_CFI(intr_return)
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
+ENDPROC_CFI(intr_return)
.import do_notify_resume,code
intr_check_sig:
@@ -1046,7 +1049,6 @@ intr_extint:
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
-ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
@@ -1997,12 +1999,9 @@ ENDPROC_CFI(syscall_exit)
.align L1_CACHE_BYTES
.globl mcount
.type mcount, @function
-ENTRY(mcount)
+ENTRY_CFI(mcount, caller)
_mcount:
.export _mcount,data
- .proc
- .callinfo caller,frame=0
- .entry
/*
* The 64bit mcount() function pointer needs 4 dwords, of which the
* first two are free. We optimize it here and put 2 instructions for
@@ -2024,18 +2023,13 @@ ftrace_stub:
.dword mcount
.dword 0 /* code in head.S puts value of global gp here */
#endif
- .exit
- .procend
-ENDPROC(mcount)
+ENDPROC_CFI(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8
.globl return_to_handler
.type return_to_handler, @function
-ENTRY_CFI(return_to_handler)
- .proc
- .callinfo caller,frame=FRAME_SIZE
- .entry
+ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
.export parisc_return_to_handler,data
parisc_return_to_handler:
copy %r3,%r1
@@ -2074,8 +2068,6 @@ parisc_return_to_handler:
bv %r0(%rp)
#endif
LDREGM -FRAME_SIZE(%sp),%r3
- .exit
- .procend
ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -2085,31 +2077,30 @@ ENDPROC_CFI(return_to_handler)
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */
-ENTRY_CFI(call_on_stack)
+ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers,
we assume the PIC register is not changed across call. For
CONFIG_64BIT, the argument pointer is left to point at the
argument region allocated for the call to call_on_stack. */
+
+ /* Switch to new stack. We allocate two frames. */
+ ldo 2*FRAME_SIZE(%arg2), %sp
# ifdef CONFIG_64BIT
- /* Switch to new stack. We allocate two 128 byte frames. */
- ldo 256(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
- STREG %rp, -144(%sp)
+ STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls always use function descriptor */
LDREG 16(%arg1), %arg1
bve,l (%arg1), %rp
- STREG %r1, -136(%sp)
- LDREG -144(%sp), %rp
+ STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
+ LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bve (%rp)
- LDREG -136(%sp), %sp
+ LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# else
- /* Switch to new stack. We allocate two 64 byte frames. */
- ldo 128(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
- STREG %r1, -68(%sp)
- STREG %rp, -84(%sp)
+ STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
+ STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls use function descriptor if PLABEL bit is set */
bb,>=,n %arg1, 30, 1f
depwi 0,31,2, %arg1
@@ -2117,9 +2108,9 @@ ENTRY_CFI(call_on_stack)
1:
be,l 0(%sr4,%arg1), %sr0, %r31
copy %r31, %rp
- LDREG -84(%sp), %rp
+ LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bv (%rp)
- LDREG -68(%sp), %sp
+ LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# endif /* CONFIG_64BIT */
ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 22e6374ece44..f33bf2d306d6 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -44,10 +44,6 @@
.align 16
ENTRY_CFI(flush_tlb_all_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
/*
* The pitlbe and pdtlbe instructions should only be used to
* flush the entire tlb. Also, there needs to be no intervening
@@ -189,18 +185,11 @@ fdtdone:
2: bv %r0(%r2)
nop
-
- .exit
- .procend
ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
ENTRY_CFI(flush_instruction_cache_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
load32 cache_info, %r1
/* Flush Instruction Cache */
@@ -256,18 +245,11 @@ fisync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
load32 cache_info, %r1
/* Flush Data Cache */
@@ -324,9 +306,6 @@ fdsync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_data_cache_local)
/* Macros to serialize TLB purge operations on SMP. */
@@ -353,6 +332,7 @@ ENDPROC_CFI(flush_data_cache_local)
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
ldi 1,\tmp
+ sync
stw \tmp,0(\la)
mtsm \flags
#endif
@@ -361,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local)
/* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
#ifdef CONFIG_64BIT
/* Unroll the loop. */
@@ -423,18 +399,11 @@ ENTRY_CFI(clear_page_asm)
#endif
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */
ENTRY_CFI(copy_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
@@ -541,9 +510,6 @@ ENTRY_CFI(copy_page_asm)
#endif
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(copy_page_asm)
/*
@@ -597,10 +563,6 @@ ENDPROC_CFI(copy_page_asm)
*/
ENTRY_CFI(copy_user_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
/* Convert virtual `to' and `from' addresses to physical addresses.
Move `from' physical address to non shadowed register. */
ldil L%(__PAGE_OFFSET), %r1
@@ -749,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm)
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(copy_user_page_asm)
ENTRY_CFI(clear_user_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
@@ -835,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm)
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(clear_user_page_asm)
ENTRY_CFI(flush_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@@ -902,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@@ -976,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@@ -1019,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@@ -1061,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1082,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1104,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1126,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1147,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
@@ -1190,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1211,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
- .procend
ENDPROC_CFI(flush_kernel_icache_range_asm)
__INIT
@@ -1222,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm)
*/
.align 256
ENTRY_CFI(disable_sr_hashing_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
/*
* Switch to real mode
*/
@@ -1307,9 +1186,6 @@ srdis_done:
2: bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(disable_sr_hashing_asm)
.end
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 6df07ce4f3c2..04c48f1ef3fb 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -21,13 +21,12 @@
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/mm.h>
-#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <linux/export.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
@@ -395,7 +394,7 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init);
-static void *pa11_dma_alloc(struct device *dev, size_t size,
+static void *pcxl_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
unsigned long vaddr;
@@ -422,190 +421,60 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
return (void *)vaddr;
}
-static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
+static void *pcx_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
- int order;
-
- order = get_order(size);
- size = 1 << (order + PAGE_SHIFT);
- unmap_uncached_pages((unsigned long)vaddr, size);
- pcxl_free_range((unsigned long)vaddr, size);
- free_pages((unsigned long)__va(dma_handle), order);
-}
+ void *addr;
-static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction, unsigned long attrs)
-{
- void *addr = page_address(page) + offset;
- BUG_ON(direction == DMA_NONE);
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
+ return NULL;
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- flush_kernel_dcache_range((unsigned long) addr, size);
+ addr = (void *)__get_free_pages(flag, get_order(size));
+ if (addr)
+ *dma_handle = (dma_addr_t)virt_to_phys(addr);
- return virt_to_phys(addr);
+ return addr;
}
-static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- BUG_ON(direction == DMA_NONE);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
- if (direction == DMA_TO_DEVICE)
- return;
-
- /*
- * For PCI_DMA_FROMDEVICE this flush is not necessary for the
- * simple map/unmap case. However, it IS necessary if if
- * pci_dma_sync_single_* has been called and the buffer reused.
- */
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
+ return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
+ else
+ return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
}
-static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
- int i;
- struct scatterlist *sg;
-
- BUG_ON(direction == DMA_NONE);
+ int order = get_order(size);
- for_each_sg(sglist, sg, nents, i) {
- unsigned long vaddr = (unsigned long)sg_virt(sg);
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
+ size = 1 << (order + PAGE_SHIFT);
+ unmap_uncached_pages((unsigned long)vaddr, size);
+ pcxl_free_range((unsigned long)vaddr, size);
- sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
- sg_dma_len(sg) = sg->length;
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- flush_kernel_dcache_range(vaddr, sg->length);
+ vaddr = __va(dma_handle);
}
- return nents;
-}
-
-static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- BUG_ON(direction == DMA_NONE);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
-
- if (direction == DMA_TO_DEVICE)
- return;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
-}
-
-static void pa11_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
- size);
-}
-
-static void pa11_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
- size);
+ free_pages((unsigned long)vaddr, get_order(size));
}
-static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- int i;
- struct scatterlist *sg;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
+ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
-static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- int i;
- struct scatterlist *sg;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
+ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
-static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
-
-const struct dma_map_ops pcxl_dma_ops = {
- .alloc = pa11_dma_alloc,
- .free = pa11_dma_free,
- .map_page = pa11_dma_map_page,
- .unmap_page = pa11_dma_unmap_page,
- .map_sg = pa11_dma_map_sg,
- .unmap_sg = pa11_dma_unmap_sg,
- .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .sync_single_for_device = pa11_dma_sync_single_for_device,
- .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .sync_sg_for_device = pa11_dma_sync_sg_for_device,
- .cache_sync = pa11_dma_cache_sync,
-};
-
-static void *pcx_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
-{
- void *addr;
-
- if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
- return NULL;
-
- addr = (void *)__get_free_pages(flag, get_order(size));
- if (addr)
- *dma_handle = (dma_addr_t)virt_to_phys(addr);
-
- return addr;
-}
-
-static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t iova, unsigned long attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
- return;
-}
-
-const struct dma_map_ops pcx_dma_ops = {
- .alloc = pcx_dma_alloc,
- .free = pcx_dma_free,
- .map_page = pa11_dma_map_page,
- .unmap_page = pa11_dma_unmap_page,
- .map_sg = pa11_dma_map_sg,
- .unmap_sg = pa11_dma_unmap_sg,
- .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .sync_single_for_device = pa11_dma_sync_single_for_device,
- .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .sync_sg_for_device = pa11_dma_sync_sg_for_device,
- .cache_sync = pa11_dma_cache_sync,
-};
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b931745815e0..eb39e7e380d7 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -302,7 +302,7 @@ get_wchan(struct task_struct *p)
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
- } while (count++ < 16);
+ } while (count++ < MAX_UNWIND_ENTRIES);
return 0;
}
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 7aa1d4d0d444..2582df1c529b 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -676,3 +676,103 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
return &user_parisc_native_view;
}
+
+
+/* HAVE_REGS_AND_STACK_ACCESS_API feature */
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_INDEX(gr,0),
+ REG_OFFSET_INDEX(gr,1),
+ REG_OFFSET_INDEX(gr,2),
+ REG_OFFSET_INDEX(gr,3),
+ REG_OFFSET_INDEX(gr,4),
+ REG_OFFSET_INDEX(gr,5),
+ REG_OFFSET_INDEX(gr,6),
+ REG_OFFSET_INDEX(gr,7),
+ REG_OFFSET_INDEX(gr,8),
+ REG_OFFSET_INDEX(gr,9),
+ REG_OFFSET_INDEX(gr,10),
+ REG_OFFSET_INDEX(gr,11),
+ REG_OFFSET_INDEX(gr,12),
+ REG_OFFSET_INDEX(gr,13),
+ REG_OFFSET_INDEX(gr,14),
+ REG_OFFSET_INDEX(gr,15),
+ REG_OFFSET_INDEX(gr,16),
+ REG_OFFSET_INDEX(gr,17),
+ REG_OFFSET_INDEX(gr,18),
+ REG_OFFSET_INDEX(gr,19),
+ REG_OFFSET_INDEX(gr,20),
+ REG_OFFSET_INDEX(gr,21),
+ REG_OFFSET_INDEX(gr,22),
+ REG_OFFSET_INDEX(gr,23),
+ REG_OFFSET_INDEX(gr,24),
+ REG_OFFSET_INDEX(gr,25),
+ REG_OFFSET_INDEX(gr,26),
+ REG_OFFSET_INDEX(gr,27),
+ REG_OFFSET_INDEX(gr,28),
+ REG_OFFSET_INDEX(gr,29),
+ REG_OFFSET_INDEX(gr,30),
+ REG_OFFSET_INDEX(gr,31),
+ REG_OFFSET_INDEX(sr,0),
+ REG_OFFSET_INDEX(sr,1),
+ REG_OFFSET_INDEX(sr,2),
+ REG_OFFSET_INDEX(sr,3),
+ REG_OFFSET_INDEX(sr,4),
+ REG_OFFSET_INDEX(sr,5),
+ REG_OFFSET_INDEX(sr,6),
+ REG_OFFSET_INDEX(sr,7),
+ REG_OFFSET_INDEX(iasq,0),
+ REG_OFFSET_INDEX(iasq,1),
+ REG_OFFSET_INDEX(iaoq,0),
+ REG_OFFSET_INDEX(iaoq,1),
+ REG_OFFSET_NAME(cr27),
+ REG_OFFSET_NAME(ksp),
+ REG_OFFSET_NAME(kpc),
+ REG_OFFSET_NAME(sar),
+ REG_OFFSET_NAME(iir),
+ REG_OFFSET_NAME(isr),
+ REG_OFFSET_NAME(ior),
+ REG_OFFSET_NAME(ipsw),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index cc9963421a19..2b16d8d6598f 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -35,12 +35,6 @@ real32_stack:
real64_stack:
.block 8192
-#ifdef CONFIG_64BIT
-# define REG_SZ 8
-#else
-# define REG_SZ 4
-#endif
-
#define N_SAVED_REGS 9
save_cr_space:
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 8d3a7b80ac42..4e87c35c22b7 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -97,14 +97,12 @@ void __init dma_ops_init(void)
panic( "PA-RISC Linux currently only supports machines that conform to\n"
"the PA-RISC 1.1 or 2.0 architecture specification.\n");
- case pcxs:
- case pcxt:
- hppa_dma_ops = &pcx_dma_ops;
- break;
case pcxl2:
pa7300lc_init();
case pcxl: /* falls through */
- hppa_dma_ops = &pcxl_dma_ops;
+ case pcxs:
+ case pcxt:
+ hppa_dma_ops = &dma_noncoherent_ops;
break;
default:
break;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index e775f80ae28c..5f7e57fcaeef 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -629,11 +629,12 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%r26), %r28
+1: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%r26)
+2: stw %r24, 0(%r26)
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@@ -647,6 +648,7 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
+ sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
@@ -796,30 +798,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%r26), %r29
+13: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%r26)
+14: stb %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%r26), %r29
+15: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%r26)
+16: sth %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%r26), %r29
+17: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%r26)
+18: stw %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -827,10 +829,10 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%r26), %r29
+19: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%r26)
+20: std %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
@@ -848,7 +850,8 @@ cas2_action:
cas2_end:
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
@@ -858,6 +861,7 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
+ sync
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 4309ad31a874..318815212518 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info)
int i = 1;
printk(KERN_CRIT "Backtrace:\n");
- while (i <= 16) {
+ while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 2ef83d78eec4..5cdf13069dd9 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/kallsyms.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
@@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name,
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
- printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
+ pr_warn("Out of order unwind entry! %px and %px\n",
+ start, start+1);
}
start->region_start += base_addr;
@@ -203,25 +203,60 @@ int __init unwind_init(void)
return 0;
}
-#ifdef CONFIG_64BIT
-#define get_func_addr(fptr) fptr[2]
-#else
-#define get_func_addr(fptr) fptr[0]
-#endif
-
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{
- extern void handle_interruption(int, struct pt_regs *);
- static unsigned long *hi = (unsigned long *)&handle_interruption;
-
- if (pc == get_func_addr(hi)) {
+ /*
+ * We have to use void * instead of a function pointer, because
+ * function pointers aren't a pointer to the function on 64-bit.
+ * Make them const so the compiler knows they live in .text
+ */
+ extern void * const handle_interruption;
+ extern void * const ret_from_kernel_thread;
+ extern void * const syscall_exit;
+ extern void * const intr_return;
+ extern void * const _switch_to_ret;
+#ifdef CONFIG_IRQSTACKS
+ extern void * const call_on_stack;
+#endif /* CONFIG_IRQSTACKS */
+
+ if (pc == (unsigned long) &handle_interruption) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
+ return 1;
+ }
+
+ if (pc == (unsigned long) &ret_from_kernel_thread ||
+ pc == (unsigned long) &syscall_exit) {
+ info->prev_sp = info->prev_ip = 0;
+ return 1;
+ }
+
+ if (pc == (unsigned long) &intr_return) {
+ struct pt_regs *regs;
+
+ dbg("Found intr_return()\n");
+ regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
+ info->prev_sp = regs->gr[30];
+ info->prev_ip = regs->iaoq[0];
+ info->rp = regs->gr[2];
+ return 1;
+ }
+
+ if (pc == (unsigned long) &_switch_to_ret) {
+ info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+ info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+ return 1;
+ }
+#ifdef CONFIG_IRQSTACKS
+ if (pc == (unsigned long) &call_on_stack) {
+ info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
+ info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
}
+#endif
return 0;
}
@@ -238,34 +273,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
if (e == NULL) {
unsigned long sp;
- dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
-
-#ifdef CONFIG_KALLSYMS
- /* Handle some frequent special cases.... */
- {
- char symname[KSYM_NAME_LEN];
- char *modname;
-
- kallsyms_lookup(info->ip, NULL, NULL, &modname,
- symname);
-
- dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
-
- if (strcmp(symname, "_switch_to_ret") == 0) {
- info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
- info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
- dbg("_switch_to_ret @ %lx - setting "
- "prev_sp=%lx prev_ip=%lx\n",
- info->ip, info->prev_sp,
- info->prev_ip);
- return;
- } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
- strcmp(symname, "syscall_exit") == 0) {
- info->prev_ip = info->prev_sp = 0;
- return;
- }
- }
-#endif
+ dbg("Cannot find unwind entry for %pS; forced unwinding\n",
+ (void *) info->ip);
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp
@@ -439,8 +448,8 @@ unsigned long return_address(unsigned int level)
/* initialize unwind info */
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
- r.iaoq[0] = (unsigned long) current_text_addr();
- r.gr[2] = (unsigned long) __builtin_return_address(0);
+ r.iaoq[0] = _THIS_IP_;
+ r.gr[2] = _RET_IP_;
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index d4fe19806d57..b53fb6fedf06 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -64,9 +64,6 @@
*/
ENTRY_CFI(lclear_user)
- .proc
- .callinfo NO_CALLS
- .entry
comib,=,n 0,%r25,$lclu_done
get_sr
$lclu_loop:
@@ -81,13 +78,9 @@ $lclu_done:
ldo 1(%r25),%r25
ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
-
- .exit
ENDPROC_CFI(lclear_user)
- .procend
-
/*
* long lstrnlen_user(char *s, long n)
*
@@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user)
*/
ENTRY_CFI(lstrnlen_user)
- .proc
- .callinfo NO_CALLS
- .entry
comib,= 0,%r25,$lslen_nzero
copy %r26,%r24
get_sr
@@ -111,7 +101,6 @@ $lslen_loop:
$lslen_done:
bv %r0(%r2)
sub %r26,%r24,%r28
- .exit
$lslen_nzero:
b $lslen_done
@@ -125,9 +114,6 @@ $lslen_nzero:
ENDPROC_CFI(lstrnlen_user)
- .procend
-
-
/*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
@@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user)
save_len = r31
ENTRY_CFI(pa_memcpy)
- .proc
- .callinfo NO_CALLS
- .entry
-
/* Last destination address */
add dst,len,end
@@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy)
b .Lcopy_done
10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
-
- .exit
ENDPROC_CFI(pa_memcpy)
- .procend
.end
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 2607d2d33405..74842d28a7a1 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -19,7 +19,6 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/unistd.h>
@@ -616,17 +615,13 @@ void __init mem_init(void)
free_all_bootmem();
#ifdef CONFIG_PA11
- if (hppa_dma_ops == &pcxl_dma_ops) {
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+ PCXL_DMA_MAP_SIZE);
- } else {
- pcxl_dma_start = 0;
- parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
- }
-#else
- parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+ } else
#endif
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
mem_init_print_info(NULL);
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 682b3e6a1e21..963abf8bf1c0 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -18,18 +18,11 @@
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
* on the platform without lwsync.
*/
-#define __atomic_op_acquire(op, args...) \
-({ \
- typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
- __ret; \
-})
-
-#define __atomic_op_release(op, args...) \
-({ \
- __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
- op##_relaxed(args); \
-})
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
static __inline__ int atomic_read(const atomic_t *v)
{
@@ -129,8 +122,6 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
static __inline__ void atomic_inc(atomic_t *v)
{
int t;
@@ -145,6 +136,7 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic_inc atomic_inc
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
{
@@ -163,16 +155,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t;
}
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
static __inline__ void atomic_dec(atomic_t *v)
{
int t;
@@ -187,6 +169,7 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic_dec atomic_dec
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
{
@@ -218,7 +201,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -226,13 +209,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # __atomic_add_unless\n\
+"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
cmpw 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -248,6 +231,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return t;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
@@ -280,9 +264,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
}
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
-#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
@@ -412,8 +393,6 @@ ATOMIC64_OPS(xor, xor)
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
static __inline__ void atomic64_inc(atomic64_t *v)
{
long t;
@@ -427,6 +406,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic64_inc atomic64_inc
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
{
@@ -444,16 +424,6 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
static __inline__ void atomic64_dec(atomic64_t *v)
{
long t;
@@ -467,6 +437,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic64_dec atomic64_dec
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
{
@@ -487,9 +458,6 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
@@ -513,6 +481,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
return t;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_cmpxchg_relaxed(v, o, n) \
@@ -524,7 +493,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -532,13 +501,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
+"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
cmpd 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -551,8 +520,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
- return t != u;
+ return t;
}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -582,6 +552,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
+#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 8e7b09703ca4..27d6e3c8fde9 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -52,6 +52,7 @@ struct arch_hw_breakpoint {
#include <asm/reg.h>
#include <asm/debug.h>
+struct perf_event_attr;
struct perf_event;
struct pmu;
struct perf_sample_data;
@@ -60,8 +61,10 @@ struct perf_sample_data;
extern int hw_breakpoint_slots(int type);
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 9f3be5c8a4a3..785c464b6588 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -88,7 +88,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_msr;
- struct pt_regs jprobe_saved_regs;
struct prev_kprobe prev_kprobe;
};
@@ -103,17 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int __is_active_jprobe(unsigned long addr);
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- return 0;
-}
-#endif
#else
static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 80547dad37da..fec8a6773119 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
/*
* Check for virtual address in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- return is_kernel_addr(info->address);
+ return is_kernel_addr(hw->address);
}
int arch_bp_generic_fields(int type, int *gen_bp_type)
@@ -141,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
int ret = -EINVAL, length_max;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (!bp)
return ret;
- info->type = HW_BRK_TYPE_TRANSLATE;
- if (bp->attr.bp_type & HW_BREAKPOINT_R)
- info->type |= HW_BRK_TYPE_READ;
- if (bp->attr.bp_type & HW_BREAKPOINT_W)
- info->type |= HW_BRK_TYPE_WRITE;
- if (info->type == HW_BRK_TYPE_TRANSLATE)
+ hw->type = HW_BRK_TYPE_TRANSLATE;
+ if (attr->bp_type & HW_BREAKPOINT_R)
+ hw->type |= HW_BRK_TYPE_READ;
+ if (attr->bp_type & HW_BREAKPOINT_W)
+ hw->type |= HW_BRK_TYPE_WRITE;
+ if (hw->type == HW_BRK_TYPE_TRANSLATE)
/* must set alteast read or write */
return ret;
- if (!(bp->attr.exclude_user))
- info->type |= HW_BRK_TYPE_USER;
- if (!(bp->attr.exclude_kernel))
- info->type |= HW_BRK_TYPE_KERNEL;
- if (!(bp->attr.exclude_hv))
- info->type |= HW_BRK_TYPE_HYP;
- info->address = bp->attr.bp_addr;
- info->len = bp->attr.bp_len;
+ if (!attr->exclude_user)
+ hw->type |= HW_BRK_TYPE_USER;
+ if (!attr->exclude_kernel)
+ hw->type |= HW_BRK_TYPE_KERNEL;
+ if (!attr->exclude_hv)
+ hw->type |= HW_BRK_TYPE_HYP;
+ hw->address = attr->bp_addr;
+ hw->len = attr->bp_len;
/*
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -178,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
if (cpu_has_feature(CPU_FTR_DAWR)) {
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
- if ((bp->attr.bp_addr >> 9) !=
- ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
+ if ((attr->bp_addr >> 9) !=
+ ((attr->bp_addr + attr->bp_len - 1) >> 9))
return -EINVAL;
}
- if (info->len >
- (length_max - (info->address & HW_BREAKPOINT_ALIGN)))
+ if (hw->len >
+ (length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
return -EINVAL;
return 0;
}
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 7a1f99f1b47f..e4a49c051325 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -25,50 +25,6 @@
#include <linux/preempt.h>
#include <linux/ftrace.h>
-/*
- * This is called from ftrace code after invoking registered handlers to
- * disambiguate regs->nip changes done by jprobes and livepatch. We check if
- * there is an active jprobe at the provided address (mcount location).
- */
-int __is_active_jprobe(unsigned long addr)
-{
- if (!preemptible()) {
- struct kprobe *p = raw_cpu_read(current_kprobe);
- return (p && (unsigned long)p->addr == addr) ? 1 : 0;
- }
-
- return 0;
-}
-
-static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb, unsigned long orig_nip)
-{
- /*
- * Emulate singlestep (and also recover regs->nip)
- * as if there is a nop
- */
- regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- __this_cpu_write(current_kprobe, NULL);
- if (orig_nip)
- regs->nip = orig_nip;
- return 1;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- if (kprobe_ftrace(p))
- return __skip_singlestep(p, regs, kcb, 0);
- else
- return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
/* Ftrace callback handler for kprobes */
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct ftrace_ops *ops, struct pt_regs *regs)
@@ -76,18 +32,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct kprobe *p;
struct kprobe_ctlblk *kcb;
- preempt_disable();
-
p = get_kprobe((kprobe_opcode_t *)nip);
if (unlikely(!p) || kprobe_disabled(p))
- goto end;
+ return;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
- unsigned long orig_nip = regs->nip;
-
/*
* On powerpc, NIP is *before* this instruction for the
* pre handler
@@ -96,19 +48,23 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, regs))
- __skip_singlestep(p, regs, kcb, orig_nip);
- else {
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
- * If pre_handler returns !0, it sets regs->nip and
- * resets current kprobe. In this case, we should not
- * re-enable preemption.
+ * Emulate singlestep (and also recover regs->nip)
+ * as if there is a nop
*/
- return;
+ regs->nip += MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
}
+ /*
+ * If pre_handler returns !0, it changes regs->nip. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
}
-end:
- preempt_enable_no_resched();
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e4c5bf33970b..5c60bb0f927f 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs)
}
prepare_singlestep(p, regs);
return 1;
- } else {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /* If trap variant, then it belongs not to us */
- kprobe_opcode_t cur_insn = *addr;
- if (is_trap(cur_insn))
- goto no_kprobe;
- /* The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
+ } else if (*addr != BREAKPOINT_INSTRUCTION) {
+ /* If trap variant, then it belongs not to us */
+ kprobe_opcode_t cur_insn = *addr;
+
+ if (is_trap(cur_insn))
goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- if (!skip_singlestep(p, regs, kcb))
- goto ss_probe;
- ret = 1;
- }
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
}
goto no_kprobe;
}
@@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs)
*/
kprobe_opcode_t cur_insn = *addr;
if (is_trap(cur_insn))
- goto no_kprobe;
+ goto no_kprobe;
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
@@ -366,11 +358,13 @@ int kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb);
- if (p->pre_handler && p->pre_handler(p, regs))
- /* handler has already set things up, so skip ss setup */
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler changed execution path, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
@@ -611,60 +605,6 @@ unsigned long arch_deref_entry_point(void *entry)
}
NOKPROBE_SYMBOL(arch_deref_entry_point);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
- /* setup return addr to the jprobe handler routine */
- regs->nip = arch_deref_entry_point(jp->entry);
-#ifdef PPC64_ELF_ABI_v2
- regs->gpr[12] = (unsigned long)jp->entry;
-#elif defined(PPC64_ELF_ABI_v1)
- regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
-#endif
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
-
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void __used jprobe_return(void)
-{
- asm volatile("jprobe_return_trap:\n"
- "trap\n"
- ::: "memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) {
- pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n",
- regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap"));
- return 0;
- }
-
- memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
- preempt_enable_no_resched();
- return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 9a5b5a513604..32476a6e4e9c 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -104,39 +104,13 @@ ftrace_regs_call:
bl ftrace_stub
nop
- /* Load the possibly modified NIP */
- ld r15, _NIP(r1)
-
+ /* Load ctr with the possibly modified NIP */
+ ld r3, _NIP(r1)
+ mtctr r3
#ifdef CONFIG_LIVEPATCH
- cmpd r14, r15 /* has NIP been altered? */
+ cmpd r14, r3 /* has NIP been altered? */
#endif
-#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
- /* NIP has not been altered, skip over further checks */
- beq 1f
-
- /* Check if there is an active jprobe on us */
- subi r3, r14, 4
- bl __is_active_jprobe
- nop
-
- /*
- * If r3 == 1, then this is a kprobe/jprobe.
- * else, this is livepatched function.
- *
- * The conditional branch for livepatch_handler below will use the
- * result of this comparison. For kprobe/jprobe, we just need to branch to
- * the new NIP, not call livepatch_handler. The branch below is bne, so we
- * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
- * CR0[EQ] = (r3 == 1).
- */
- cmpdi r3, 1
-1:
-#endif
-
- /* Load CTR with the possibly modified NIP */
- mtctr r15
-
/* Restore gprs */
REST_GPR(0,r1)
REST_10GPRS(2,r1)
@@ -154,10 +128,7 @@ ftrace_regs_call:
addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH
- /*
- * Based on the cmpd or cmpdi above, if the NIP was altered and we're
- * not on a kprobe/jprobe, then handle livepatch.
- */
+ /* Based on the cmpd above, if the NIP was altered handle livepatch */
bne- livepatch_handler
#endif
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de686b340f4a..ee4a8854985e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -216,7 +216,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
wqp = kvm_arch_vcpu_wq(vcpu);
if (swq_has_sleeper(wqp)) {
- swake_up(wqp);
+ swake_up_one(wqp);
++vcpu->stat.halt_wakeup;
}
@@ -3188,7 +3188,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
}
}
- prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
if (kvmppc_vcore_check_block(vc)) {
finish_swait(&vc->wq, &wait);
@@ -3311,7 +3311,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
- swake_up(&vc->wq);
+ swake_up_one(&vc->wq);
}
}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 3f66fcf8ad99..19d8ab49d1bd 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1469,7 +1469,7 @@ static int collect_events(struct perf_event *group, int max_count,
}
/*
- * Add a event to the PMU.
+ * Add an event to the PMU.
* If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
@@ -1548,7 +1548,7 @@ nocheck:
}
/*
- * Remove a event from the PMU.
+ * Remove an event from the PMU.
*/
static void power_pmu_del(struct perf_event *event, int ef_flags)
{
@@ -1742,7 +1742,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
- * A event can only go on a limited PMC if it counts something
+ * An event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 855115ace98c..c452359c9cb8 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -25,18 +25,11 @@
#define ATOMIC_INIT(i) { (i) }
-#define __atomic_op_acquire(op, args...) \
-({ \
- typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory"); \
- __ret; \
-})
-
-#define __atomic_op_release(op, args...) \
-({ \
- __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); \
- op##_relaxed(args); \
-})
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
static __always_inline int atomic_read(const atomic_t *v)
{
@@ -209,130 +202,8 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-/*
- * The extra atomic operations that are constructed from one of the core
- * AMO-based operations above (aside from sub, which is easier to fit above).
- * These are required to perform a full barrier, but they're OK this way
- * because atomic_*_return is also required to perform a full barrier.
- *
- */
-#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
-static __always_inline \
-bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, func_op, comp_op, I) \
- ATOMIC_OP(op, func_op, comp_op, I, int, )
-#else
-#define ATOMIC_OPS(op, func_op, comp_op, I) \
- ATOMIC_OP(op, func_op, comp_op, I, int, ) \
- ATOMIC_OP(op, func_op, comp_op, I, long, 64)
-#endif
-
-ATOMIC_OPS(add_and_test, add, ==, 0)
-ATOMIC_OPS(sub_and_test, sub, ==, 0)
-ATOMIC_OPS(add_negative, add, <, 0)
-
-#undef ATOMIC_OP
-#undef ATOMIC_OPS
-
-#define ATOMIC_OP(op, func_op, I, c_type, prefix) \
-static __always_inline \
-void atomic##prefix##_##op(atomic##prefix##_t *v) \
-{ \
- atomic##prefix##_##func_op(I, v); \
-}
-
-#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
-static __always_inline \
-c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##func_op##_relaxed(I, v); \
-} \
-static __always_inline \
-c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##func_op(I, v); \
-}
-
-#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
-static __always_inline \
-c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##op##_relaxed(v) c_op I; \
-} \
-static __always_inline \
-c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##op(v) c_op I; \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_OP( op, asm_op, I, int, ) \
- ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
-#else
-#define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_OP( op, asm_op, I, int, ) \
- ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
- ATOMIC_OP( op, asm_op, I, long, 64) \
- ATOMIC_FETCH_OP( op, asm_op, I, long, 64) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
-#endif
-
-ATOMIC_OPS(inc, add, +, 1)
-ATOMIC_OPS(dec, add, +, -1)
-
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#define atomic_inc_return atomic_inc_return
-#define atomic_dec_return atomic_dec_return
-
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#define atomic_fetch_inc atomic_fetch_inc
-#define atomic_fetch_dec atomic_fetch_dec
-
-#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#define atomic64_inc_return atomic64_inc_return
-#define atomic64_dec_return atomic64_dec_return
-
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#define atomic64_fetch_inc atomic64_fetch_inc
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-
-#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
-static __always_inline \
-bool atomic##prefix##_##op(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_##func_op##_return(v) comp_op I; \
-}
-
-ATOMIC_OP(inc_and_test, inc, ==, 0, )
-ATOMIC_OP(dec_and_test, dec, ==, 0, )
-#ifndef CONFIG_GENERIC_ATOMIC64
-ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
-ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
-#endif
-
-#undef ATOMIC_OP
-
/* This is required to provide a full barrier on success. */
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -349,9 +220,10 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
: "memory");
return prev;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
+static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long prev, rc;
@@ -368,27 +240,7 @@ static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
: "memory");
return prev;
}
-
-static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- return __atomic64_add_unless(v, a, u) != u;
-}
-#endif
-
-/*
- * The extra atomic operations that are constructed from one of the core
- * LR/SC-based operations above.
- */
-static __always_inline int atomic_inc_not_zero(atomic_t *v)
-{
- return __atomic_add_unless(v, 1, 0);
-}
-
-#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
-{
- return atomic64_add_unless(v, 1, 0);
-}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8a1863d9ed53..515240576930 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -106,7 +106,6 @@ config S390
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
- select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
@@ -146,6 +145,7 @@ config S390
select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_UNCOMPRESSED
select HAVE_KERNEL_XZ
select HAVE_KPROBES
select HAVE_KRETPROBES
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 68a690442be0..eee6703093c3 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -14,8 +14,18 @@ LD_BFD := elf64-s390
LDFLAGS := -m elf64_s390
KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
-KBUILD_CFLAGS += -m64
KBUILD_AFLAGS += -m64
+KBUILD_CFLAGS += -m64
+aflags_dwarf := -Wa,-gdwarf-2
+KBUILD_AFLAGS_DECOMPRESSOR := -m64 -D__ASSEMBLY__
+KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
+KBUILD_CFLAGS_DECOMPRESSOR := -m64 -O2
+KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
UTS_MACHINE := s390x
STACK_SIZE := 16384
CHECKFLAGS += -D__s390__ -D__s390x__
@@ -52,18 +62,14 @@ cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
#
cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
-# old style option for packed stacks
-ifeq ($(call cc-option-yn,-mkernel-backchain),y)
-cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
-aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-endif
-
-# new style option for packed stacks
ifeq ($(call cc-option-yn,-mpacked-stack),y)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
endif
+KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
+KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
+
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
ifneq ($(call cc-option-yn,-mstack-size=8192),y)
@@ -71,8 +77,11 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
endif
endif
-ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
-cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
+ifdef CONFIG_WARN_DYNAMIC_STACK
+ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
+ KBUILD_CFLAGS += -mwarn-dynamicstack
+ KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
+ endif
endif
ifdef CONFIG_EXPOLINE
@@ -82,6 +91,7 @@ ifdef CONFIG_EXPOLINE
CC_FLAGS_EXPOLINE += -mindirect-branch-table
export CC_FLAGS_EXPOLINE
cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+ aflags-y += -DCC_USING_EXPOLINE
endif
endif
@@ -102,11 +112,12 @@ KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi)
KBUILD_AFLAGS += $(aflags-y) $(cfi)
+export KBUILD_AFLAGS_DECOMPRESSOR
+export KBUILD_CFLAGS_DECOMPRESSOR
OBJCOPYFLAGS := -O binary
-head-y := arch/s390/kernel/head.o
-head-y += arch/s390/kernel/head64.o
+head-y := arch/s390/kernel/head64.o
# See arch/s390/Kbuild for content of core part of the kernel
core-y += arch/s390/
@@ -121,7 +132,7 @@ boot := arch/s390/boot
syscalls := arch/s390/kernel/syscalls
tools := arch/s390/tools
-all: image bzImage
+all: bzImage
#KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
KBUILD_IMAGE := $(boot)/bzImage
@@ -129,7 +140,7 @@ KBUILD_IMAGE := $(boot)/bzImage
install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
-image bzImage: vmlinux
+bzImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zfcpdump:
@@ -152,8 +163,7 @@ archprepare:
# Don't use tabs in echo arguments
define archhelp
- echo '* image - Kernel image for IPL ($(boot)/image)'
- echo '* bzImage - Compressed kernel image for IPL ($(boot)/bzImage)'
+ echo '* bzImage - Kernel image for IPL ($(boot)/bzImage)'
echo ' install - Install kernel using'
echo ' (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ee6a9c387c87..9bf8489df6e6 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -206,35 +206,28 @@ static int
appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
- char buf[2];
+ int timer_active = appldata_timer_active;
+ int zero = 0;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &timer_active,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
+
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
- ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
spin_lock(&appldata_timer_lock);
- if (buf[0] == '1')
+ if (timer_active)
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
- else if (buf[0] == '0')
+ else
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
-out:
- *lenp = len;
- *ppos += len;
return 0;
}
@@ -248,37 +241,24 @@ static int
appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
- int interval;
- char buf[16];
+ int interval = appldata_interval;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &interval,
+ .maxlen = sizeof(int),
+ .extra1 = &one,
+ };
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- len = sprintf(buf, "%i\n", appldata_interval);
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- interval = 0;
- sscanf(buf, "%i", &interval);
- if (interval <= 0)
- return -EINVAL;
+ rc = proc_dointvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
spin_lock(&appldata_timer_lock);
appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
-out:
- *lenp = len;
- *ppos += len;
return 0;
}
@@ -293,10 +273,17 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
- unsigned int len;
- int rc, found;
- char buf[2];
struct list_head *lh;
+ int rc, found;
+ int active;
+ int zero = 0;
+ int one = 1;
+ struct ctl_table ctl_entry = {
+ .data = &active,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
found = 0;
mutex_lock(&appldata_ops_mutex);
@@ -317,31 +304,15 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
}
mutex_unlock(&appldata_ops_mutex);
- if (!*lenp || *ppos) {
- *lenp = 0;
+ active = ops->active;
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write) {
module_put(ops->owner);
- return 0;
- }
- if (!write) {
- strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len)) {
- module_put(ops->owner);
- return -EFAULT;
- }
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len)) {
- module_put(ops->owner);
- return -EFAULT;
+ return rc;
}
mutex_lock(&appldata_ops_mutex);
- if ((buf[0] == '1') && (ops->active == 0)) {
+ if (active && (ops->active == 0)) {
// protect work queue callback
if (!try_module_get(ops->owner)) {
mutex_unlock(&appldata_ops_mutex);
@@ -359,7 +330,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
module_put(ops->owner);
} else
ops->active = 1;
- } else if ((buf[0] == '0') && (ops->active == 1)) {
+ } else if (!active && (ops->active == 1)) {
ops->active = 0;
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
(unsigned long) ops->data, ops->size,
@@ -370,9 +341,6 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
module_put(ops->owner);
}
mutex_unlock(&appldata_ops_mutex);
-out:
- *lenp = len;
- *ppos += len;
module_put(ops->owner);
return 0;
}
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index d1fa37fcce83..9e6668ee93de 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -3,19 +3,52 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-targets := image
-targets += bzImage
-subdir- := compressed
+KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
-$(obj)/image: vmlinux FORCE
- $(call if_changed,objcopy)
+KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
+
+#
+# Use -march=z900 for als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
+#
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
+AFLAGS_head.o += -march=z900
+AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
+AFLAGS_mem.o += -march=z900
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_als.o += -march=z900
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
+CFLAGS_sclp_early_core.o += -march=z900
+endif
+
+CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
+
+obj-y := head.o als.o ebcdic.o sclp_early_core.o mem.o
+targets := bzImage startup.a $(obj-y)
+subdir- := compressed
+
+OBJECTS := $(addprefix $(obj)/,$(obj-y))
$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
-$(obj)/compressed/vmlinux: FORCE
+$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
+quiet_cmd_ar = AR $@
+ cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(filter $(OBJECTS), $^)
+
+$(obj)/startup.a: $(OBJECTS) FORCE
+ $(call if_changed,ar)
+
install: $(CONFIGURE) $(obj)/bzImage
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
+
+chkbss := $(OBJECTS)
+chkbss-target := $(obj)/startup.a
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/kernel/als.c b/arch/s390/boot/als.c
index d1892bf36cab..d592e0d90d9f 100644
--- a/arch/s390/kernel/als.c
+++ b/arch/s390/boot/als.c
@@ -3,12 +3,10 @@
* Copyright IBM Corp. 2016
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/facility.h>
#include <asm/lowcore.h>
#include <asm/sclp.h>
-#include "entry.h"
/*
* The code within this file will be called very early. It may _not_
@@ -18,9 +16,9 @@
* For temporary objects the stack (16k) should be used.
*/
-static unsigned long als[] __initdata = { FACILITIES_ALS };
+static unsigned long als[] = { FACILITIES_ALS };
-static void __init u16_to_hex(char *str, u16 val)
+static void u16_to_hex(char *str, u16 val)
{
int i, num;
@@ -33,9 +31,9 @@ static void __init u16_to_hex(char *str, u16 val)
*str = '\0';
}
-static void __init print_machine_type(void)
+static void print_machine_type(void)
{
- static char mach_str[80] __initdata = "Detected machine-type number: ";
+ static char mach_str[80] = "Detected machine-type number: ";
char type_str[5];
struct cpuid id;
@@ -46,7 +44,7 @@ static void __init print_machine_type(void)
sclp_early_printk(mach_str);
}
-static void __init u16_to_decimal(char *str, u16 val)
+static void u16_to_decimal(char *str, u16 val)
{
int div = 1;
@@ -60,9 +58,9 @@ static void __init u16_to_decimal(char *str, u16 val)
*str = '\0';
}
-static void __init print_missing_facilities(void)
+static void print_missing_facilities(void)
{
- static char als_str[80] __initdata = "Missing facilities: ";
+ static char als_str[80] = "Missing facilities: ";
unsigned long val;
char val_str[6];
int i, j, first;
@@ -95,7 +93,7 @@ static void __init print_missing_facilities(void)
sclp_early_printk("See Principles of Operations for facility bits\n");
}
-static void __init facility_mismatch(void)
+static void facility_mismatch(void)
{
sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
print_machine_type();
@@ -103,7 +101,7 @@ static void __init facility_mismatch(void)
disabled_wait(0x8badcccc);
}
-void __init verify_facilities(void)
+void verify_facilities(void)
{
int i;
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
index 2088cc140629..45aeb4f08752 100644
--- a/arch/s390/boot/compressed/.gitignore
+++ b/arch/s390/boot/compressed/.gitignore
@@ -1,4 +1,5 @@
sizes.h
vmlinux
vmlinux.lds
+vmlinux.scr.lds
vmlinux.bin.full
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 5766f7b9b271..04609478d18b 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,39 +6,29 @@
#
KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += misc.o piggy.o sizes.h head.o
-
-KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
-KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
-KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h)
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
+KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
-OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o ebcdic.o als.o)
-OBJECTS += $(objtree)/drivers/s390/char/sclp_early_core.o
-OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
+OBJECTS := $(addprefix $(obj)/,$(obj-y))
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
$(call if_changed,ld)
-TRIM_HEAD_SIZE := 0x11000
-
-sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - $(TRIM_HEAD_SIZE))/p'
+# extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
+sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p'
quiet_cmd_sizes = GEN $@
cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
-quiet_cmd_trim_head = TRIM $@
- cmd_trim_head = tail -c +$$(($(TRIM_HEAD_SIZE) + 1)) $< > $@
-
$(obj)/sizes.h: vmlinux
$(call if_changed,sizes)
@@ -48,21 +38,18 @@ $(obj)/head.o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(objtree)/$(obj)
$(obj)/misc.o: $(obj)/sizes.h
-OBJCOPYFLAGS_vmlinux.bin.full := -R .comment -S
-$(obj)/vmlinux.bin.full: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
+$(obj)/vmlinux.bin: vmlinux
$(call if_changed,objcopy)
-$(obj)/vmlinux.bin: $(obj)/vmlinux.bin.full
- $(call if_changed,trim_head)
-
vmlinux.bin.all-y := $(obj)/vmlinux.bin
-suffix-$(CONFIG_KERNEL_GZIP) := gz
-suffix-$(CONFIG_KERNEL_BZIP2) := bz2
-suffix-$(CONFIG_KERNEL_LZ4) := lz4
-suffix-$(CONFIG_KERNEL_LZMA) := lzma
-suffix-$(CONFIG_KERNEL_LZO) := lzo
-suffix-$(CONFIG_KERNEL_XZ) := xz
+suffix-$(CONFIG_KERNEL_GZIP) := .gz
+suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
+suffix-$(CONFIG_KERNEL_LZ4) := .lz4
+suffix-$(CONFIG_KERNEL_LZMA) := .lzma
+suffix-$(CONFIG_KERNEL_LZO) := .lzo
+suffix-$(CONFIG_KERNEL_XZ) := .xz
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
@@ -78,5 +65,9 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
$(call if_changed,xzkern)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
+$(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y)
$(call if_changed,ld)
+
+chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS))
+chkbss-target := $(obj)/vmlinux.bin
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 9f94eca0f467..df8dbbc17bcc 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -15,7 +15,7 @@
#include "sizes.h"
__HEAD
-ENTRY(startup_continue)
+ENTRY(startup_decompressor)
basr %r13,0 # get base
.LPG1:
# setup stack
@@ -23,7 +23,7 @@ ENTRY(startup_continue)
aghi %r15,-160
brasl %r14,decompress_kernel
# Set up registers for memory mover. We move the decompressed image to
- # 0x11000, where startup_continue of the decompressed image is supposed
+ # 0x100000, where startup_continue of the decompressed image is supposed
# to be.
lgr %r4,%r2
lg %r2,.Loffset-.LPG1(%r13)
@@ -33,7 +33,7 @@ ENTRY(startup_continue)
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
# When the memory mover is done we pass control to
- # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
+ # arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in
# the decompressed image.
lgr %r6,%r2
br %r1
@@ -47,6 +47,6 @@ mover_end:
.Lstack:
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.Loffset:
- .quad 0x11000
+ .quad 0x100000
.Lmvsize:
.quad SZ__bss_start
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 511b2cc9b91a..f66ad73c205b 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -71,43 +71,6 @@ static int puts(const char *s)
return 0;
}
-void *memset(void *s, int c, size_t n)
-{
- char *xs;
-
- xs = s;
- while (n--)
- *xs++ = c;
- return s;
-}
-
-void *memcpy(void *dest, const void *src, size_t n)
-{
- const char *s = src;
- char *d = dest;
-
- while (n--)
- *d++ = *s++;
- return dest;
-}
-
-void *memmove(void *dest, const void *src, size_t n)
-{
- const char *s = src;
- char *d = dest;
-
- if (d <= s) {
- while (n--)
- *d++ = *s++;
- } else {
- d += n;
- s += n;
- while (n--)
- *--d = *--s;
- }
- return dest;
-}
-
static void error(char *x)
{
unsigned long long psw = 0x000a0000deadbeefULL;
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index d43c2db12d30..b16ac8b3c439 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -23,13 +23,10 @@ SECTIONS
*(.text.*)
_etext = . ;
}
- .rodata.compressed : {
- *(.rodata.compressed)
- }
.rodata : {
_rodata = . ;
*(.rodata) /* read-only data */
- *(.rodata.*)
+ *(EXCLUDE_FILE (*piggy.o) .rodata.compressed)
_erodata = . ;
}
.data : {
@@ -38,6 +35,15 @@ SECTIONS
*(.data.*)
_edata = . ;
}
+ startup_continue = 0x100000;
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+ . = 0x100000;
+#else
+ . = ALIGN(8);
+#endif
+ .rodata.compressed : {
+ *(.rodata.compressed)
+ }
. = ALIGN(256);
.bss : {
_bss = . ;
@@ -54,5 +60,6 @@ SECTIONS
*(.eh_frame)
*(__ex_table)
*(*__ksymtab*)
+ *(___kcrctab*)
}
}
diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr.lds.S
index 42a242597f34..ff01d18c9222 100644
--- a/arch/s390/boot/compressed/vmlinux.scr
+++ b/arch/s390/boot/compressed/vmlinux.scr.lds.S
@@ -2,10 +2,14 @@
SECTIONS
{
.rodata.compressed : {
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
input_len = .;
LONG(input_data_end - input_data) input_data = .;
+#endif
*(.data)
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
output_len = . - 4;
input_data_end = .;
+#endif
}
}
diff --git a/arch/s390/boot/ebcdic.c b/arch/s390/boot/ebcdic.c
new file mode 100644
index 000000000000..7391e7d36086
--- /dev/null
+++ b/arch/s390/boot/ebcdic.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/ebcdic.c"
diff --git a/arch/s390/kernel/head.S b/arch/s390/boot/head.S
index 5c42f16a54c4..f721913b73f1 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/boot/head.S
@@ -272,14 +272,14 @@ iplstart:
.org 0x10000
ENTRY(startup)
j .Lep_startup_normal
- .org 0x10008
+ .org EP_OFFSET
#
# This is a list of s390 kernel entry points. At address 0x1000f the number of
# valid entry points is stored.
#
# IMPORTANT: Do not change this table, it is s390 kernel ABI!
#
- .ascii "S390EP"
+ .ascii EP_STRING
.byte 0x00,0x01
#
# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
@@ -310,10 +310,11 @@ ENTRY(startup_kdump)
l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-STACK_FRAME_OVERHEAD
brasl %r14,verify_facilities
-# For uncompressed images, continue in
-# arch/s390/kernel/head64.S. For compressed images, continue in
-# arch/s390/boot/compressed/head.S.
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
jg startup_continue
+#else
+ jg startup_decompressor
+#endif
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/boot/head_kdump.S
index 174d6959bf5b..174d6959bf5b 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/boot/head_kdump.S
diff --git a/arch/s390/boot/mem.S b/arch/s390/boot/mem.S
new file mode 100644
index 000000000000..b33463633f03
--- /dev/null
+++ b/arch/s390/boot/mem.S
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include "../lib/mem.S"
diff --git a/arch/s390/boot/sclp_early_core.c b/arch/s390/boot/sclp_early_core.c
new file mode 100644
index 000000000000..5a19fd7020b5
--- /dev/null
+++ b/arch/s390/boot/sclp_early_core.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../drivers/s390/char/sclp_early_core.c"
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index a2945b289a29..3452e18bb1ca 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -497,7 +497,7 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
}
diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
rc = hypfs_create_str(cpu_dir, "type", buffer);
- return PTR_RET(rc);
+ return PTR_ERR_OR_ZERO(rc);
}
static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
@@ -544,7 +544,7 @@ static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
return PTR_ERR(rc);
diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
rc = hypfs_create_str(cpu_dir, "type", buffer);
- return PTR_RET(rc);
+ return PTR_ERR_OR_ZERO(rc);
}
static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 06b513d192b9..c681329fdeec 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -36,7 +36,7 @@ struct hypfs_sb_info {
kuid_t uid; /* uid used for files and dirs */
kgid_t gid; /* gid used for files and dirs */
struct dentry *update_file; /* file to trigger update */
- time_t last_update; /* last update time in secs since 1970 */
+ time64_t last_update; /* last update, CLOCK_MONOTONIC time */
struct mutex lock; /* lock to protect update process */
};
@@ -52,7 +52,7 @@ static void hypfs_update_update(struct super_block *sb)
struct hypfs_sb_info *sb_info = sb->s_fs_info;
struct inode *inode = d_inode(sb_info->update_file);
- sb_info->last_update = get_seconds();
+ sb_info->last_update = ktime_get_seconds();
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
}
@@ -179,7 +179,7 @@ static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
* to restart data collection in this case.
*/
mutex_lock(&fs_info->lock);
- if (fs_info->last_update == get_seconds()) {
+ if (fs_info->last_update == ktime_get_seconds()) {
rc = -EBUSY;
goto out;
}
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index c1bedb4c8de0..046e044a48d0 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -47,6 +47,50 @@ struct ap_queue_status {
};
/**
+ * ap_intructions_available() - Test if AP instructions are available.
+ *
+ * Returns 0 if the AP instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+ register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
+ register unsigned long reg1 asm ("1") = -ENODEV;
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(
+ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg1), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * ap_tapq(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @info: Pointer to queue descriptor
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+{
+ register unsigned long reg0 asm ("0") = qid;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "=d" (reg1), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ if (info)
+ *info = reg2;
+ return reg1;
+}
+
+/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @tbit: Test facilities bit
@@ -54,10 +98,57 @@ struct ap_queue_status {
*
* Returns AP queue status structure.
*/
-struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info);
+static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
+ int tbit,
+ unsigned long *info)
+{
+ if (tbit)
+ qid |= 1UL << 23; /* set T bit*/
+ return ap_tapq(qid, info);
+}
+/**
+ * ap_pqap_rapq(): Reset adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | (1UL << 24);
+ register struct ap_queue_status reg1 asm ("1");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(RAPQ) */
+ : "=d" (reg1)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | (2UL << 24);
+ register struct ap_queue_status reg1 asm ("1");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(ZAPQ) */
+ : "=d" (reg1)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * struct ap_config_info - convenience struct for AP crypto
+ * config info as returned by the ap_qci() function.
+ */
struct ap_config_info {
unsigned int apsc : 1; /* S bit */
unsigned int apxa : 1; /* N bit */
@@ -74,50 +165,189 @@ struct ap_config_info {
unsigned char _reserved4[16];
} __aligned(8);
-/*
- * ap_query_configuration(): Fetch cryptographic config info
+/**
+ * ap_qci(): Get AP configuration data
*
- * Returns the ap configuration info fetched via PQAP(QCI).
- * On success 0 is returned, on failure a negative errno
- * is returned, e.g. if the PQAP(QCI) instruction is not
- * available, the return value will be -EOPNOTSUPP.
+ * Returns 0 on success, or -EOPNOTSUPP.
*/
-int ap_query_configuration(struct ap_config_info *info);
+static inline int ap_qci(struct ap_config_info *config)
+{
+ register unsigned long reg0 asm ("0") = 4UL << 24;
+ register unsigned long reg1 asm ("1") = -EOPNOTSUPP;
+ register struct ap_config_info *reg2 asm ("2") = config;
+
+ asm volatile(
+ ".long 0xb2af0000\n" /* PQAP(QCI) */
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg1)
+ : "d" (reg0), "d" (reg2)
+ : "cc", "memory");
+
+ return reg1;
+}
/*
* struct ap_qirq_ctrl - convenient struct for easy invocation
- * of the ap_queue_irq_ctrl() function. This struct is passed
- * as GR1 parameter to the PQAP(AQIC) instruction. For details
- * please see the AR documentation.
+ * of the ap_aqic() function. This struct is passed as GR1
+ * parameter to the PQAP(AQIC) instruction. For details please
+ * see the AR documentation.
*/
struct ap_qirq_ctrl {
unsigned int _res1 : 8;
- unsigned int zone : 8; /* zone info */
- unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
+ unsigned int zone : 8; /* zone info */
+ unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
unsigned int _res2 : 4;
- unsigned int gisc : 3; /* guest isc field */
+ unsigned int gisc : 3; /* guest isc field */
unsigned int _res3 : 6;
- unsigned int gf : 2; /* gisa format */
+ unsigned int gf : 2; /* gisa format */
unsigned int _res4 : 1;
- unsigned int gisa : 27; /* gisa origin */
+ unsigned int gisa : 27; /* gisa origin */
unsigned int _res5 : 1;
- unsigned int isc : 3; /* irq sub class */
+ unsigned int isc : 3; /* irq sub class */
};
/**
- * ap_queue_irq_ctrl(): Control interruption on a AP queue.
+ * ap_aqic(): Control interruption for a specific AP.
* @qid: The AP queue number
- * @qirqctrl: struct ap_qirq_ctrl, see above
+ * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
* @ind: The notification indicator byte
*
* Returns AP queue status.
+ */
+static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+ struct ap_qirq_ctrl qirqctrl,
+ void *ind)
+{
+ register unsigned long reg0 asm ("0") = qid | (3UL << 24);
+ register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
+ register struct ap_queue_status reg1_out asm ("1");
+ register void *reg2 asm ("2") = ind;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(AQIC) */
+ : "=d" (reg1_out)
+ : "d" (reg0), "d" (reg1_in), "d" (reg2)
+ : "cc");
+ return reg1_out;
+}
+
+/*
+ * union ap_qact_ap_info - used together with the
+ * ap_aqic() function to provide a convenient way
+ * to handle the ap info needed by the qact function.
+ */
+union ap_qact_ap_info {
+ unsigned long val;
+ struct {
+ unsigned int : 3;
+ unsigned int mode : 3;
+ unsigned int : 26;
+ unsigned int cat : 8;
+ unsigned int : 8;
+ unsigned char ver[2];
+ };
+};
+
+/**
+ * ap_qact(): Query AP combatibility type.
+ * @qid: The AP queue number
+ * @apinfo: On input the info about the AP queue. On output the
+ * alternate AP queue info provided by the qact function
+ * in GR2 is stored in.
*
- * Control interruption on the given AP queue.
- * Just a simple wrapper function for the low level PQAP(AQIC)
- * instruction available for other kernel modules.
+ * Returns AP queue status. Check response_code field for failures.
*/
-struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind);
+static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+ union ap_qact_ap_info *apinfo)
+{
+ register unsigned long reg0 asm ("0") = qid | (5UL << 24)
+ | ((ifbit & 0x01) << 22);
+ register unsigned long reg1_in asm ("1") = apinfo->val;
+ register struct ap_queue_status reg1_out asm ("1");
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(QACT) */
+ : "+d" (reg1_in), "=d" (reg1_out), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ apinfo->val = reg2;
+ return reg1_out;
+}
+
+/**
+ * ap_nqap(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
+ unsigned long long psmid,
+ void *msg, size_t length)
+{
+ register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = (unsigned long) msg;
+ register unsigned long reg3 asm ("3") = (unsigned long) length;
+ register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+ asm volatile (
+ "0: .long 0xb2ad0042\n" /* NQAP */
+ " brc 2,0b"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+ : "d" (reg4), "d" (reg5)
+ : "cc", "memory");
+ return reg1;
+}
+
+/**
+ * ap_dqap(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially. The response is incomplete, hence the
+ * DQAP is repeated.
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
+ unsigned long long *psmid,
+ void *msg, size_t length)
+{
+ register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm("2") = 0UL;
+ register unsigned long reg4 asm("4") = (unsigned long) msg;
+ register unsigned long reg5 asm("5") = (unsigned long) length;
+ register unsigned long reg6 asm("6") = 0UL;
+ register unsigned long reg7 asm("7") = 0UL;
+
+
+ asm volatile(
+ "0: .long 0xb2ae0064\n" /* DQAP */
+ " brc 6,0b\n"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2),
+ "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
+ : : "cc", "memory");
+ *psmid = (((unsigned long long) reg6) << 32) + reg7;
+ return reg1;
+}
#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 4b55532f15c4..fd20ab5d4cf7 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -55,17 +55,9 @@ static inline void atomic_add(int i, atomic_t *v)
__atomic_add(i, &v->counter);
}
-#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v) atomic_add(1, _v)
-#define atomic_inc_return(_v) atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
-#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v) atomic_sub(1, _v)
-#define atomic_dec_return(_v) atomic_sub_return(1, _v)
-#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
#define ATOMIC_OPS(op) \
static inline void atomic_##op(int i, atomic_t *v) \
@@ -90,21 +82,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return __atomic_cmpxchg(&v->counter, old, new);
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == u))
- break;
- old = atomic_cmpxchg(v, c, c + a);
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define ATOMIC64_INIT(i) { (i) }
static inline long atomic64_read(const atomic64_t *v)
@@ -168,50 +145,8 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
-static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
-{
- long c, old;
-
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == u))
- break;
- old = atomic64_cmpxchg(v, c, c + i);
- if (likely(old == c))
- break;
- c = old;
- }
- return c != u;
-}
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
-
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
-#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add(1, _v)
-#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
-#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
-#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub(1, _v)
-#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
-#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index de023a9a88ca..bf2cbff926ef 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -2,7 +2,7 @@
/*
* CPU-measurement facilities
*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2018
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
@@ -139,8 +139,14 @@ struct hws_trailer_entry {
unsigned char timestamp[16]; /* 16 - 31 timestamp */
unsigned long long reserved1; /* 32 -Reserved */
unsigned long long reserved2; /* */
- unsigned long long progusage1; /* 48 - reserved for programming use */
- unsigned long long progusage2; /* */
+ union { /* 48 - reserved for programming use */
+ struct {
+ unsigned int clock_base:1; /* in progusage2 */
+ unsigned long long progusage1:63;
+ unsigned long long progusage2;
+ };
+ unsigned long long progusage[2];
+ };
} __packed;
/* Load program parameter */
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index e07cce88dfb0..fcbd638fb9f4 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -9,6 +9,14 @@
#ifndef _ASM_S390_GMAP_H
#define _ASM_S390_GMAP_H
+/* Generic bits for GMAP notification on DAT table entry changes. */
+#define GMAP_NOTIFY_SHADOW 0x2
+#define GMAP_NOTIFY_MPROT 0x1
+
+/* Status bits only for huge segment entries */
+#define _SEGMENT_ENTRY_GMAP_IN 0x8000 /* invalidation notify bit */
+#define _SEGMENT_ENTRY_GMAP_UC 0x4000 /* dirty (migration) */
+
/**
* struct gmap_struct - guest address space
* @list: list head for the mm->context gmap list
@@ -132,4 +140,6 @@ void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
int gmap_mprotect_notify(struct gmap *, unsigned long start,
unsigned long len, int prot);
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
+ unsigned long gaddr, unsigned long vmaddr);
#endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 9c5fc50204dd..2d1afa58a4b6 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -37,7 +37,10 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-#define arch_clear_hugepage_flags(page) do { } while (0)
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_arch_1, &page->flags);
+}
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 13de80cf741c..b106aa29bf55 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -68,8 +68,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_saved_imask;
unsigned long kprobe_saved_ctl[3];
struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *p);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 5bc888841eaf..406d940173ab 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -185,7 +185,7 @@ struct lowcore {
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
-} __packed;
+} __packed __aligned(8192);
#define S390_lowcore (*((struct lowcore *) 0))
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index f5ff9dbad8ac..f31a15044c24 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -24,6 +24,8 @@ typedef struct {
unsigned int uses_skeys:1;
/* The mmu context uses CMM. */
unsigned int uses_cmm:1;
+ /* The gmaps associated with this context are allowed to use huge pages. */
+ unsigned int allow_gmap_hpage_1m:1;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d16bc79c30bb..0717ee76885d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -32,6 +32,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.uses_skeys = 0;
mm->context.uses_cmm = 0;
+ mm->context.allow_gmap_hpage_1m = 0;
#endif
switch (mm->context.asce_limit) {
case _REGION2_SIZE:
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index a01f81186e86..123dac3717b3 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -8,7 +8,7 @@
#ifdef __ASSEMBLY__
-#ifdef CONFIG_EXPOLINE
+#ifdef CC_USING_EXPOLINE
_LC_BR_R1 = __LC_BR_R1
@@ -189,7 +189,7 @@ _LC_BR_R1 = __LC_BR_R1
.macro BASR_EX rsave,rtarget,ruse=%r1
basr \rsave,\rtarget
.endm
-#endif
+#endif /* CC_USING_EXPOLINE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 94f8db468c9b..10fe982f2b4b 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -51,6 +51,10 @@ struct zpci_fmb_fmt2 {
u64 max_work_units;
};
+struct zpci_fmb_fmt3 {
+ u64 tx_bytes;
+};
+
struct zpci_fmb {
u32 format : 8;
u32 fmt_ind : 24;
@@ -66,6 +70,7 @@ struct zpci_fmb {
struct zpci_fmb_fmt0 fmt0;
struct zpci_fmb_fmt1 fmt1;
struct zpci_fmb_fmt2 fmt2;
+ struct zpci_fmb_fmt3 fmt3;
};
} __packed __aligned(128);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5ab636089c60..0e7cb0dc9c33 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -268,8 +268,10 @@ static inline int is_module_addr(void *addr)
#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
+#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
+#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
@@ -1101,7 +1103,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
pte_t *sptep, pte_t *tptep, pte_t pte);
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
+ pte_t *ptep);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq);
int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
@@ -1116,6 +1119,10 @@ int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
unsigned long *oldpte, unsigned long *oldpgste);
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
/*
* Certain architectures need to do special things when PTEs
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h
index 6090670df51f..e297bcfc476f 100644
--- a/arch/s390/include/asm/purgatory.h
+++ b/arch/s390/include/asm/purgatory.h
@@ -13,11 +13,5 @@
int verify_sha256_digest(void);
-extern u64 kernel_entry;
-extern u64 kernel_type;
-
-extern u64 crash_start;
-extern u64 crash_size;
-
#endif /* __ASSEMBLY__ */
#endif /* _S390_PURGATORY_H_ */
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index de11ecc99c7c..9c9970a5dfb1 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -262,7 +262,6 @@ struct qdio_outbuf_state {
void *user;
};
-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
#define CHSC_AC1_INITIATE_INPUTQ 0x80
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 54f81f8ed662..724faede8ac5 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,4 @@
#include <asm-generic/sections.h>
-extern char _ehead[];
-
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 9c30ebe046f3..1d66016f4170 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -9,8 +9,10 @@
#include <linux/const.h>
#include <uapi/asm/setup.h>
-
+#define EP_OFFSET 0x10008
+#define EP_STRING "S390EP"
#define PARMAREA 0x10400
+#define PARMAREA_END 0x11000
/*
* Machine features detected in early.c
diff --git a/arch/s390/include/uapi/asm/chsc.h b/arch/s390/include/uapi/asm/chsc.h
index dc329aa03f76..83a574e95b3a 100644
--- a/arch/s390/include/uapi/asm/chsc.h
+++ b/arch/s390/include/uapi/asm/chsc.h
@@ -23,29 +23,29 @@ struct chsc_async_header {
__u32 key : 4;
__u32 : 28;
struct subchannel_id sid;
-} __attribute__ ((packed));
+};
struct chsc_async_area {
struct chsc_async_header header;
__u8 data[CHSC_SIZE - sizeof(struct chsc_async_header)];
-} __attribute__ ((packed));
+};
struct chsc_header {
__u16 length;
__u16 code;
-} __attribute__ ((packed));
+};
struct chsc_sync_area {
struct chsc_header header;
__u8 data[CHSC_SIZE - sizeof(struct chsc_header)];
-} __attribute__ ((packed));
+};
struct chsc_response_struct {
__u16 length;
__u16 code;
__u32 parms;
__u8 data[CHSC_SIZE - 2 * sizeof(__u16) - sizeof(__u32)];
-} __attribute__ ((packed));
+};
struct chsc_chp_cd {
struct chp_id chpid;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2fed39b26b42..dbfd1730e631 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -9,39 +9,21 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace early setup code
-CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early_nobss.o = $(CC_FLAGS_FTRACE)
endif
-GCOV_PROFILE_als.o := n
GCOV_PROFILE_early.o := n
GCOV_PROFILE_early_nobss.o := n
-KCOV_INSTRUMENT_als.o := n
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_early_nobss.o := n
-UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_early_nobss.o := n
#
-# Use -march=z900 for als.c to be able to print an error
-# message if the kernel is started on a machine which is too old
-#
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
-CFLAGS_als.o += -march=z900
-AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += -march=z900
-endif
-
-CFLAGS_als.o += -D__NO_FORTIFY
-
-#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
CFLAGS_smp.o := -Wno-nonnull
@@ -61,13 +43,13 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
-obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o early_nobss.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o
-extra-y += head.o head64.o vmlinux.lds
+extra-y += head64.o vmlinux.lds
obj-$(CONFIG_SYSFS) += nospec-sysfs.o
CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
@@ -99,5 +81,5 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o
obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
-chkbss := head.o head64.o als.o early_nobss.o
+chkbss := head64.o early_nobss.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 9f5ea9d87069..c3620bafc374 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -306,6 +306,15 @@ static void *kzalloc_panic(int len)
return rc;
}
+static const char *nt_name(Elf64_Word type)
+{
+ const char *name = "LINUX";
+
+ if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
+ name = KEXEC_CORE_NOTE_NAME;
+ return name;
+}
+
/*
* Initialize ELF note
*/
@@ -332,11 +341,26 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
{
- const char *note_name = "LINUX";
+ return nt_init_name(buf, type, desc, d_len, nt_name(type));
+}
+
+/*
+ * Calculate the size of ELF note
+ */
+static size_t nt_size_name(int d_len, const char *name)
+{
+ size_t size;
- if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
- note_name = KEXEC_CORE_NOTE_NAME;
- return nt_init_name(buf, type, desc, d_len, note_name);
+ size = sizeof(Elf64_Nhdr);
+ size += roundup(strlen(name) + 1, 4);
+ size += roundup(d_len, 4);
+
+ return size;
+}
+
+static inline size_t nt_size(Elf64_Word type, int d_len)
+{
+ return nt_size_name(d_len, nt_name(type));
}
/*
@@ -375,6 +399,29 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
}
/*
+ * Calculate size of ELF notes per cpu
+ */
+static size_t get_cpu_elf_notes_size(void)
+{
+ struct save_area *sa = NULL;
+ size_t size;
+
+ size = nt_size(NT_PRSTATUS, sizeof(struct elf_prstatus));
+ size += nt_size(NT_PRFPREG, sizeof(elf_fpregset_t));
+ size += nt_size(NT_S390_TIMER, sizeof(sa->timer));
+ size += nt_size(NT_S390_TODCMP, sizeof(sa->todcmp));
+ size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
+ size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
+ size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
+ if (MACHINE_HAS_VX) {
+ size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
+ size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
+ }
+
+ return size;
+}
+
+/*
* Initialize prpsinfo note (new kernel)
*/
static void *nt_prpsinfo(void *ptr)
@@ -429,6 +476,30 @@ static void *nt_vmcoreinfo(void *ptr)
return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
}
+static size_t nt_vmcoreinfo_size(void)
+{
+ const char *name = "VMCOREINFO";
+ char nt_name[11];
+ Elf64_Nhdr note;
+ void *addr;
+
+ if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
+ return 0;
+
+ if (copy_oldmem_kernel(&note, addr, sizeof(note)))
+ return 0;
+
+ memset(nt_name, 0, sizeof(nt_name));
+ if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
+ sizeof(nt_name) - 1))
+ return 0;
+
+ if (strcmp(nt_name, name) != 0)
+ return 0;
+
+ return nt_size_name(note.n_descsz, name);
+}
+
/*
* Initialize final note (needed for /proc/vmcore code)
*/
@@ -539,6 +610,27 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
return ptr;
}
+static size_t get_elfcorehdr_size(int mem_chunk_cnt)
+{
+ size_t size;
+
+ size = sizeof(Elf64_Ehdr);
+ /* PT_NOTES */
+ size += sizeof(Elf64_Phdr);
+ /* nt_prpsinfo */
+ size += nt_size(NT_PRPSINFO, sizeof(struct elf_prpsinfo));
+ /* regsets */
+ size += get_cpu_cnt() * get_cpu_elf_notes_size();
+ /* nt_vmcoreinfo */
+ size += nt_vmcoreinfo_size();
+ /* nt_final */
+ size += sizeof(Elf64_Nhdr);
+ /* PT_LOADS */
+ size += mem_chunk_cnt * sizeof(Elf64_Phdr);
+
+ return size;
+}
+
/*
* Create ELF core header (new kernel)
*/
@@ -566,8 +658,8 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
mem_chunk_cnt = get_mem_chunk_cnt();
- alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
- mem_chunk_cnt * sizeof(Elf64_Phdr);
+ alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
+
hdr = kzalloc_panic(alloc_size);
/* Init elf header */
ptr = ehdr_init(hdr, mem_chunk_cnt);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 827699eb48fa..5b28b434f8a1 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -331,8 +331,20 @@ static void __init setup_boot_command_line(void)
append_to_cmdline(append_ipl_scpdata);
}
+static void __init check_image_bootable(void)
+{
+ if (!memcmp(EP_STRING, (void *)EP_OFFSET, strlen(EP_STRING)))
+ return;
+
+ sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
+ sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
+ sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
+ disabled_wait(0xbadb007);
+}
+
void __init startup_init(void)
{
+ check_image_bootable();
time_early_init();
init_kernel_storage_key();
lockdep_off();
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 961abfac2c5f..472fa2f1a4a5 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -83,7 +83,6 @@ long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user
DECLARE_PER_CPU(u64, mt_cycles[8]);
-void verify_facilities(void);
void gs_load_bc_cb(struct pt_regs *regs);
void set_fs_fixup(void);
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 791cb9000e86..6d14ad42ba88 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -48,11 +48,23 @@ ENTRY(startup_continue)
# Early machine initialization and detection functions.
#
brasl %r14,startup_init
- lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
- # virtual and never return ...
+
+# check control registers
+ stctg %c0,%c15,0(%r15)
+ oi 6(%r15),0x60 # enable sigp emergency & external call
+ oi 4(%r15),0x10 # switch on low address proctection
+ lctlg %c0,%c15,0(%r15)
+
+ lam 0,15,.Laregs-.LPG1(%r13) # load acrs needed by uaccess
+ brasl %r14,start_kernel # go to C code
+#
+# We returned from start_kernel ?!? PANIK
+#
+ basr %r13,0
+ lpswe .Ldw-.(%r13) # load disabled wait psw
+
.align 16
.LPG1:
-.Lentry:.quad 0x0000000180000000,_stext
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
@@ -85,30 +97,5 @@ ENTRY(startup_continue)
.endr
.Llinkage_stack:
.long 0,0,0x89000000,0,0,0,0x8a000000,0
-
-ENTRY(_ehead)
-
- .org 0x100000 - 0x11000 # head.o ends at 0x11000
-#
-# startup-code, running in absolute addressing mode
-#
-ENTRY(_stext)
- basr %r13,0 # get base
-.LPG3:
-# check control registers
- stctg %c0,%c15,0(%r15)
- oi 6(%r15),0x60 # enable sigp emergency & external call
- oi 4(%r15),0x10 # switch on low address proctection
- lctlg %c0,%c15,0(%r15)
-
- lam 0,15,.Laregs-.LPG3(%r13) # load acrs needed by uaccess
- brasl %r14,start_kernel # go to C code
-#
-# We returned from start_kernel ?!? PANIK
-#
- basr %r13,0
- lpswe .Ldw-.(%r13) # load disabled wait psw
-
- .align 8
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 60f60afa645c..7c0a095e9c5f 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -321,38 +321,20 @@ static int kprobe_handler(struct pt_regs *regs)
* If we have no pre-handler or it returned 0, we
* continue with single stepping. If we have a
* pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry
- * for jprobe processing, so get out doing nothing
- * more here.
+ * for changing execution path, so get out doing
+ * nothing more here.
*/
push_kprobe(kcb, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ pop_kprobe(kcb);
+ preempt_enable_no_resched();
return 1;
+ }
kcb->kprobe_status = KPROBE_HIT_SS;
}
enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- /*
- * Continuation after the jprobe completed and
- * caused the jprobe_return trap. The jprobe
- * break_handler "returns" to the original
- * function that still has the kprobe breakpoint
- * installed. We continue with single stepping.
- */
- kcb->kprobe_status = KPROBE_HIT_SS;
- enable_singlestep(kcb, regs,
- (unsigned long) p->ainsn.insn);
- return 1;
- } /* else:
- * No kprobe at this address and the current kprobe
- * has no break handler (no jprobe!). The kernel just
- * exploded, let the standard trap handler pick up the
- * pieces.
- */
} /* else:
* No kprobe at this address and no active kprobe. The trap has
* not been caused by a kprobe breakpoint. The race of breakpoint
@@ -452,9 +434,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->psw.addr = orig_ret_address;
- pop_kprobe(get_kprobe_ctlblk());
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
@@ -661,60 +641,6 @@ int kprobe_exceptions_notify(struct notifier_block *self,
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack;
-
- memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
- /* setup return addr to the jprobe handler routine */
- regs->psw.addr = (unsigned long) jp->entry;
- regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
-
- /* r15 is the stack pointer */
- stack = (unsigned long) regs->gprs[15];
-
- memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer to get messed up.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
- asm volatile(".word 0x0002");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack;
-
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
-
- stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
-
- /* Put the regs back */
- memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
- /* put the stack back */
- memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
- preempt_enable_no_resched();
- return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
static struct kprobe trampoline = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 18ae7b9c71d6..bdddaae96559 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -35,6 +35,8 @@ early_param("nospec", nospec_setup_early);
static int __init nospec_report(void)
{
+ if (test_facility(156))
+ pr_info("Spectre V2 mitigation: etokens\n");
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
@@ -56,7 +58,15 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
void __init nospec_auto_detect(void)
{
- if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ if (test_facility(156)) {
+ /*
+ * The machine supports etokens.
+ * Disable expolines and disable nobp.
+ */
+ if (IS_ENABLED(CC_USING_EXPOLINE))
+ nospec_disable = 1;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ } else if (IS_ENABLED(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index 8affad5f18cb..e30e580ae362 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -13,6 +13,8 @@ ssize_t cpu_show_spectre_v1(struct device *dev,
ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ if (test_facility(156))
+ return sprintf(buf, "Mitigation: etokens\n");
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 0292d68e7dde..cb198d4a6dca 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -2,7 +2,7 @@
/*
* Performance event support for the System z CPU-measurement Sampling Facility
*
- * Copyright IBM Corp. 2013
+ * Copyright IBM Corp. 2013, 2018
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#define KMSG_COMPONENT "cpum_sf"
@@ -1587,6 +1587,17 @@ static void aux_buffer_free(void *data)
"%lu SDBTs\n", num_sdbt);
}
+static void aux_sdb_init(unsigned long sdb)
+{
+ struct hws_trailer_entry *te;
+
+ te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
+
+ /* Save clock base */
+ te->clock_base = 1;
+ memcpy(&te->progusage2, &tod_clock_base[1], 8);
+}
+
/*
* aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
* @cpu: On which to allocate, -1 means current
@@ -1666,6 +1677,7 @@ static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
/* Tail is the entry in a SDBT */
*tail = (unsigned long)pages[i];
aux->sdb_index[i] = (unsigned long)pages[i];
+ aux_sdb_init((unsigned long)pages[i]);
}
sfb->num_sdb = nr_pages;
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
index 54e2d634b849..4352a504f235 100644
--- a/arch/s390/kernel/perf_regs.c
+++ b/arch/s390/kernel/perf_regs.c
@@ -12,9 +12,6 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
{
freg_t fp;
- if (WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX))
- return 0;
-
if (idx >= PERF_REG_S390_R0 && idx <= PERF_REG_S390_R15)
return regs->gprs[idx];
@@ -33,7 +30,8 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
if (idx == PERF_REG_S390_PC)
return regs->psw.addr;
- return regs->gprs[idx];
+ WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX);
+ return 0;
}
#define REG_RESERVED (~((1UL << PERF_REG_S390_MAX) - 1))
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d82a9ec64ea9..c637c12f9e37 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -674,12 +674,12 @@ static void __init reserve_kernel(void)
#ifdef CONFIG_DMA_API_DEBUG
/*
* DMA_API_DEBUG code stumbles over addresses from the
- * range [_ehead, _stext]. Mark the memory as reserved
+ * range [PARMAREA_END, _stext]. Mark the memory as reserved
* so it is not used for CONFIG_DMA_API_DEBUG=y.
*/
memblock_reserve(0, PFN_PHYS(start_pfn));
#else
- memblock_reserve(0, (unsigned long)_ehead);
+ memblock_reserve(0, PARMAREA_END);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
#endif
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 54f5496913fa..12f80d1f0415 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -59,6 +59,8 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
}
EXPORT_SYMBOL(stsi);
+#ifdef CONFIG_PROC_FS
+
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
{
switch (encoding) {
@@ -301,6 +303,8 @@ static int __init sysinfo_create_proc(void)
}
device_initcall(sysinfo_create_proc);
+#endif /* CONFIG_PROC_FS */
+
/*
* Service levels interface.
*/
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index cf561160ea88..e8766beee5ad 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -221,17 +221,22 @@ void read_persistent_clock64(struct timespec64 *ts)
ext_to_timespec64(clk, ts);
}
-void read_boot_clock64(struct timespec64 *ts)
+void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
+ struct timespec64 *boot_offset)
{
unsigned char clk[STORE_CLOCK_EXT_SIZE];
+ struct timespec64 boot_time;
__u64 delta;
delta = initial_leap_seconds + TOD_UNIX_EPOCH;
- memcpy(clk, tod_clock_base, 16);
- *(__u64 *) &clk[1] -= delta;
- if (*(__u64 *) &clk[1] > delta)
+ memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
+ *(__u64 *)&clk[1] -= delta;
+ if (*(__u64 *)&clk[1] > delta)
clk[0]--;
- ext_to_timespec64(clk, ts);
+ ext_to_timespec64(clk, &boot_time);
+
+ read_persistent_clock64(wall_time);
+ *boot_offset = timespec64_sub(*wall_time, boot_time);
}
static u64 read_tod_clock(struct clocksource *cs)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 4b6e0397f66d..e8184a15578a 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -579,41 +579,33 @@ early_param("topology", topology_setup);
static int topology_ctl_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
+ int enabled = topology_is_enabled();
int new_mode;
- char buf[2];
+ int zero = 0;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &enabled,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
+
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
- ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- if (buf[0] != '0' && buf[0] != '1')
- return -EINVAL;
mutex_lock(&smp_cpu_state_mutex);
- new_mode = topology_get_mode(buf[0] == '1');
+ new_mode = topology_get_mode(enabled);
if (topology_mode != new_mode) {
topology_mode = new_mode;
topology_schedule_update();
}
mutex_unlock(&smp_cpu_state_mutex);
topology_flush_work();
-out:
- *lenp = len;
- *ppos += len;
- return 0;
+
+ return rc;
}
static struct ctl_table topology_ctl_table[] = {
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 09abae40f917..3031cc6dd0ab 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -47,7 +47,7 @@ static struct page **vdso64_pagelist;
*/
unsigned int __read_mostly vdso_enabled = 1;
-static int vdso_fault(const struct vm_special_mapping *sm,
+static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page **vdso_pagelist;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index f0414f52817b..b43f8d33a369 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -19,7 +19,7 @@
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
-ENTRY(startup)
+ENTRY(startup_continue)
jiffies = jiffies_64;
PHDRS {
@@ -30,16 +30,12 @@ PHDRS {
SECTIONS
{
- . = 0x00000000;
+ . = 0x100000;
+ _stext = .; /* Start of text section */
.text : {
/* Text and read-only data */
+ _text = .;
HEAD_TEXT
- /*
- * E.g. perf doesn't like symbols starting at address zero,
- * therefore skip the initial PSW and channel program located
- * at address zero and let _text start at 0x200.
- */
- _text = 0x200;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
@@ -47,6 +43,7 @@ SECTIONS
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
+ *(.text.*_indirect_*)
*(.fixup)
*(.gnu.warning)
} :text = 0x0700
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index daa09f89ca2d..fcb55b02990e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1145,7 +1145,7 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
* yield-candidate.
*/
vcpu->preempted = true;
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
/*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3b7a5151b6a5..f9d90337e64a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -172,6 +172,10 @@ static int nested;
module_param(nested, int, S_IRUGO);
MODULE_PARM_DESC(nested, "Nested virtualization support");
+/* allow 1m huge page guest backing, if !nested */
+static int hpage;
+module_param(hpage, int, 0444);
+MODULE_PARM_DESC(hpage, "1m huge page backing support");
/*
* For now we handle at most 16 double words as this is what the s390 base
@@ -475,6 +479,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_AIS_MIGRATION:
r = 1;
break;
+ case KVM_CAP_S390_HPAGE_1M:
+ r = 0;
+ if (hpage)
+ r = 1;
+ break;
case KVM_CAP_S390_MEM_OP:
r = MEM_OP_MAX_SIZE;
break;
@@ -511,19 +520,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
}
static void kvm_s390_sync_dirty_log(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+ struct kvm_memory_slot *memslot)
{
+ int i;
gfn_t cur_gfn, last_gfn;
- unsigned long address;
+ unsigned long gaddr, vmaddr;
struct gmap *gmap = kvm->arch.gmap;
+ DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
- /* Loop over all guest pages */
+ /* Loop over all guest segments */
+ cur_gfn = memslot->base_gfn;
last_gfn = memslot->base_gfn + memslot->npages;
- for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
- address = gfn_to_hva_memslot(memslot, cur_gfn);
+ for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
+ gaddr = gfn_to_gpa(cur_gfn);
+ vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
+ if (kvm_is_error_hva(vmaddr))
+ continue;
+
+ bitmap_zero(bitmap, _PAGE_ENTRIES);
+ gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
+ for (i = 0; i < _PAGE_ENTRIES; i++) {
+ if (test_bit(i, bitmap))
+ mark_page_dirty(kvm, cur_gfn + i);
+ }
- if (test_and_clear_guest_dirty(gmap->mm, address))
- mark_page_dirty(kvm, cur_gfn);
if (fatal_signal_pending(current))
return;
cond_resched();
@@ -667,6 +687,27 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
r ? "(not available)" : "(success)");
break;
+ case KVM_CAP_S390_HPAGE_1M:
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus)
+ r = -EBUSY;
+ else if (!hpage || kvm->arch.use_cmma)
+ r = -EINVAL;
+ else {
+ r = 0;
+ kvm->mm->context.allow_gmap_hpage_1m = 1;
+ /*
+ * We might have to create fake 4k page
+ * tables. To avoid that the hardware works on
+ * stale PGSTEs, we emulate these instructions.
+ */
+ kvm->arch.use_skf = 0;
+ kvm->arch.use_pfmfi = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
+ r ? "(not available)" : "(success)");
+ break;
case KVM_CAP_S390_USER_STSI:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
@@ -714,10 +755,13 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
if (!sclp.has_cmma)
break;
- ret = -EBUSY;
VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
mutex_lock(&kvm->lock);
- if (!kvm->created_vcpus) {
+ if (kvm->created_vcpus)
+ ret = -EBUSY;
+ else if (kvm->mm->context.allow_gmap_hpage_1m)
+ ret = -EINVAL;
+ else {
kvm->arch.use_cmma = 1;
/* Not compatible with cmma. */
kvm->arch.use_pfmfi = 0;
@@ -1540,6 +1584,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
uint8_t *keys;
uint64_t hva;
int srcu_idx, i, r = 0;
+ bool unlocked;
if (args->flags != 0)
return -EINVAL;
@@ -1564,9 +1609,11 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (r)
goto out;
+ i = 0;
down_read(&current->mm->mmap_sem);
srcu_idx = srcu_read_lock(&kvm->srcu);
- for (i = 0; i < args->count; i++) {
+ while (i < args->count) {
+ unlocked = false;
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
r = -EFAULT;
@@ -1580,8 +1627,14 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
}
r = set_guest_storage_key(current->mm, hva, keys[i], 0);
- if (r)
- break;
+ if (r) {
+ r = fixup_user_fault(current, current->mm, hva,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (r)
+ break;
+ }
+ if (!r)
+ i++;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
up_read(&current->mm->mmap_sem);
@@ -4082,6 +4135,11 @@ static int __init kvm_s390_init(void)
return -ENODEV;
}
+ if (nested && hpage) {
+ pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently");
+ return -EINVAL;
+ }
+
for (i = 0; i < 16; i++)
kvm_s390_fac_base[i] |=
S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index eb0eb60c7be6..cfc5a62329f6 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -246,9 +246,10 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
static int handle_iske(struct kvm_vcpu *vcpu)
{
- unsigned long addr;
+ unsigned long gaddr, vmaddr;
unsigned char key;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_iske++;
@@ -262,18 +263,28 @@ static int handle_iske(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
- addr = kvm_s390_logical_to_effective(vcpu, addr);
- addr = kvm_s390_real_to_abs(vcpu, addr);
- addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
- if (kvm_is_error_hva(addr))
+ gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
+ gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+retry:
+ unlocked = false;
down_read(&current->mm->mmap_sem);
- rc = get_guest_storage_key(current->mm, addr, &key);
- up_read(&current->mm->mmap_sem);
+ rc = get_guest_storage_key(current->mm, vmaddr, &key);
+
+ if (rc) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (!rc) {
+ up_read(&current->mm->mmap_sem);
+ goto retry;
+ }
+ }
if (rc)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ up_read(&current->mm->mmap_sem);
vcpu->run->s.regs.gprs[reg1] &= ~0xff;
vcpu->run->s.regs.gprs[reg1] |= key;
return 0;
@@ -281,8 +292,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
static int handle_rrbe(struct kvm_vcpu *vcpu)
{
- unsigned long addr;
+ unsigned long vmaddr, gaddr;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_rrbe++;
@@ -296,19 +308,27 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
- addr = kvm_s390_logical_to_effective(vcpu, addr);
- addr = kvm_s390_real_to_abs(vcpu, addr);
- addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
- if (kvm_is_error_hva(addr))
+ gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
+ gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+retry:
+ unlocked = false;
down_read(&current->mm->mmap_sem);
- rc = reset_guest_reference_bit(current->mm, addr);
- up_read(&current->mm->mmap_sem);
+ rc = reset_guest_reference_bit(current->mm, vmaddr);
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (!rc) {
+ up_read(&current->mm->mmap_sem);
+ goto retry;
+ }
+ }
if (rc < 0)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+ up_read(&current->mm->mmap_sem);
kvm_s390_set_psw_cc(vcpu, rc);
return 0;
}
@@ -323,6 +343,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
unsigned long start, end;
unsigned char key, oldkey;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_sske++;
@@ -355,19 +376,28 @@ static int handle_sske(struct kvm_vcpu *vcpu)
}
while (start != end) {
- unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ unlocked = false;
- if (kvm_is_error_hva(addr))
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&current->mm->mmap_sem);
- rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
+ rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
m3 & SSKE_NQ, m3 & SSKE_MR,
m3 & SSKE_MC);
- up_read(&current->mm->mmap_sem);
- if (rc < 0)
+
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ rc = !rc ? -EAGAIN : rc;
+ }
+ if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- start += PAGE_SIZE;
+
+ up_read(&current->mm->mmap_sem);
+ if (rc >= 0)
+ start += PAGE_SIZE;
}
if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -948,15 +978,16 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
while (start != end) {
- unsigned long useraddr;
+ unsigned long vmaddr;
+ bool unlocked = false;
/* Translate guest address to host address */
- useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
- if (kvm_is_error_hva(useraddr))
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (clear_user((void __user *)useraddr, PAGE_SIZE))
+ if (clear_user((void __user *)vmaddr, PAGE_SIZE))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
@@ -966,14 +997,20 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
if (rc)
return rc;
down_read(&current->mm->mmap_sem);
- rc = cond_set_guest_storage_key(current->mm, useraddr,
+ rc = cond_set_guest_storage_key(current->mm, vmaddr,
key, NULL, nq, mr, mc);
- up_read(&current->mm->mmap_sem);
- if (rc < 0)
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ rc = !rc ? -EAGAIN : rc;
+ }
+ if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- }
- start += PAGE_SIZE;
+ up_read(&current->mm->mmap_sem);
+ if (rc >= 0)
+ start += PAGE_SIZE;
+ }
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 2311f15be9cf..40c4d59c926e 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -17,7 +17,7 @@
ENTRY(memmove)
ltgr %r4,%r4
lgr %r1,%r2
- bzr %r14
+ jz .Lmemmove_exit
aghi %r4,-1
clgr %r2,%r3
jnh .Lmemmove_forward
@@ -36,6 +36,7 @@ ENTRY(memmove)
.Lmemmove_forward_remainder:
larl %r5,.Lmemmove_mvc
ex %r4,0(%r5)
+.Lmemmove_exit:
BR_EX %r14
.Lmemmove_reverse:
ic %r0,0(%r4,%r3)
@@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
*/
ENTRY(memset)
ltgr %r4,%r4
- bzr %r14
+ jz .Lmemset_exit
ltgr %r3,%r3
jnz .Lmemset_fill
aghi %r4,-1
@@ -80,6 +81,7 @@ ENTRY(memset)
.Lmemset_clear_remainder:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
+.Lmemset_exit:
BR_EX %r14
.Lmemset_fill:
cghi %r4,1
@@ -115,7 +117,7 @@ EXPORT_SYMBOL(memset)
*/
ENTRY(memcpy)
ltgr %r4,%r4
- bzr %r14
+ jz .Lmemcpy_exit
aghi %r4,-1
srlg %r5,%r4,8
ltgr %r5,%r5
@@ -124,6 +126,7 @@ ENTRY(memcpy)
.Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
+.Lmemcpy_exit:
BR_EX %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)
@@ -145,9 +148,9 @@ EXPORT_SYMBOL(memcpy)
.macro __MEMSET bits,bytes,insn
ENTRY(__memset\bits)
ltgr %r4,%r4
- bzr %r14
+ jz .L__memset_exit\bits
cghi %r4,\bytes
- je .L__memset_exit\bits
+ je .L__memset_store\bits
aghi %r4,-(\bytes+1)
srlg %r5,%r4,8
ltgr %r5,%r5
@@ -163,8 +166,9 @@ ENTRY(__memset\bits)
larl %r5,.L__memset_mvc\bits
ex %r4,0(%r5)
BR_EX %r14
-.L__memset_exit\bits:
+.L__memset_store\bits:
\insn %r3,0(%r2)
+.L__memset_exit\bits:
BR_EX %r14
.L__memset_mvc\bits:
mvc \bytes(1,%r1),0(%r1)
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 6cf024eb2085..510a18299196 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -191,12 +191,7 @@ static void cmm_set_timer(void)
del_timer(&cmm_timer);
return;
}
- if (timer_pending(&cmm_timer)) {
- if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
- return;
- }
- cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
- add_timer(&cmm_timer);
+ mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds * HZ);
}
static void cmm_timer_fn(struct timer_list *unused)
@@ -251,45 +246,42 @@ static int cmm_skip_blanks(char *cp, char **endp)
return str != cp;
}
-static struct ctl_table cmm_table[];
-
static int cmm_pages_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- char buf[16], *p;
- unsigned int len;
- long nr;
+ long nr = cmm_get_pages();
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &nr,
+ .maxlen = sizeof(long),
+ };
+ int rc;
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
+ rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (write) {
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- cmm_skip_blanks(buf, &p);
- nr = simple_strtoul(p, &p, 0);
- if (ctl == &cmm_table[0])
- cmm_set_pages(nr);
- else
- cmm_add_timed_pages(nr);
- } else {
- if (ctl == &cmm_table[0])
- nr = cmm_get_pages();
- else
- nr = cmm_get_timed_pages();
- len = sprintf(buf, "%ld\n", nr);
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- }
- *lenp = len;
- *ppos += len;
+ cmm_set_pages(nr);
+ return 0;
+}
+
+static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ long nr = cmm_get_timed_pages();
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &nr,
+ .maxlen = sizeof(long),
+ };
+ int rc;
+
+ rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+
+ cmm_add_timed_pages(nr);
return 0;
}
@@ -338,7 +330,7 @@ static struct ctl_table cmm_table[] = {
{
.procname = "cmm_timed_pages",
.mode = 0644,
- .proc_handler = cmm_pages_handler,
+ .proc_handler = cmm_timed_pages_handler,
},
{
.procname = "cmm_timeout",
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 6ad15d3fab81..84111a43ea29 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -80,7 +80,7 @@ struct qin64 {
struct dcss_segment {
struct list_head list;
char dcss_name[8];
- char res_name[15];
+ char res_name[16];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
@@ -433,7 +433,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
- strncat(seg->res_name, " (DCSS)", 7);
+ strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e074480d3598..4cc3f06b0ab3 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -502,6 +502,8 @@ retry:
/* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL;
+ if (flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out_up;
goto out;
}
if (unlikely(fault & VM_FAULT_ERROR))
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index bc56ec8abcf7..bb44990c8212 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2,8 +2,10 @@
/*
* KVM guest address space mapping code
*
- * Copyright IBM Corp. 2007, 2016
+ * Copyright IBM Corp. 2007, 2016, 2018
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ * Janosch Frank <frankja@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
@@ -521,6 +523,9 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table,
rcu_read_unlock();
}
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
+ unsigned long gaddr);
+
/**
* gmap_link - set up shadow page tables to connect a host to a guest address
* @gmap: pointer to guest mapping meta data structure
@@ -541,6 +546,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
+ u64 unprot;
int rc;
BUG_ON(gmap_is_shadow(gmap));
@@ -584,8 +590,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
return -EFAULT;
pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd));
- /* large pmds cannot yet be handled */
- if (pmd_large(*pmd))
+ /* Are we allowed to use huge pages? */
+ if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
return -EFAULT;
/* Link gmap segment table entry location to page table. */
rc = radix_tree_preload(GFP_KERNEL);
@@ -596,10 +602,22 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table);
- if (!rc)
- *table = pmd_val(*pmd);
- } else
- rc = 0;
+ if (!rc) {
+ if (pmd_large(*pmd)) {
+ *table = (pmd_val(*pmd) &
+ _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
+ | _SEGMENT_ENTRY_GMAP_UC;
+ } else
+ *table = pmd_val(*pmd) &
+ _SEGMENT_ENTRY_HARDWARE_BITS;
+ }
+ } else if (*table & _SEGMENT_ENTRY_PROTECT &&
+ !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
+ unprot = (u64)*table;
+ unprot &= ~_SEGMENT_ENTRY_PROTECT;
+ unprot |= _SEGMENT_ENTRY_GMAP_UC;
+ gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
+ }
spin_unlock(&gmap->guest_table_lock);
spin_unlock(ptl);
radix_tree_preload_end();
@@ -690,6 +708,12 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
+ /*
+ * We do not discard pages that are backed by
+ * hugetlbfs, so we don't have to refault them.
+ */
+ if (vma && is_vm_hugetlb_page(vma))
+ continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
}
@@ -864,7 +888,128 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
*/
static void gmap_pte_op_end(spinlock_t *ptl)
{
- spin_unlock(ptl);
+ if (ptl)
+ spin_unlock(ptl);
+}
+
+/**
+ * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
+ * and return the pmd pointer
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ *
+ * Returns a pointer to the pmd for a guest address, or NULL
+ */
+static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
+{
+ pmd_t *pmdp;
+
+ BUG_ON(gmap_is_shadow(gmap));
+ spin_lock(&gmap->guest_table_lock);
+ pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
+
+ if (!pmdp || pmd_none(*pmdp)) {
+ spin_unlock(&gmap->guest_table_lock);
+ return NULL;
+ }
+
+ /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
+ if (!pmd_large(*pmdp))
+ spin_unlock(&gmap->guest_table_lock);
+ return pmdp;
+}
+
+/**
+ * gmap_pmd_op_end - release the guest_table_lock if needed
+ * @gmap: pointer to the guest mapping meta data structure
+ * @pmdp: pointer to the pmd
+ */
+static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
+{
+ if (pmd_large(*pmdp))
+ spin_unlock(&gmap->guest_table_lock);
+}
+
+/*
+ * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
+ * @pmdp: pointer to the pmd to be protected
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: notification bits to set
+ *
+ * Returns:
+ * 0 if successfully protected
+ * -EAGAIN if a fixup is needed
+ * -EINVAL if unsupported notifier bits have been specified
+ *
+ * Expected to be called with sg->mm->mmap_sem in read and
+ * guest_table_lock held.
+ */
+static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
+ pmd_t *pmdp, int prot, unsigned long bits)
+{
+ int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
+ int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
+ pmd_t new = *pmdp;
+
+ /* Fixup needed */
+ if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
+ return -EAGAIN;
+
+ if (prot == PROT_NONE && !pmd_i) {
+ pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
+ gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+ }
+
+ if (prot == PROT_READ && !pmd_p) {
+ pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
+ pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
+ gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+ }
+
+ if (bits & GMAP_NOTIFY_MPROT)
+ pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
+
+ /* Shadow GMAP protection needs split PMDs */
+ if (bits & GMAP_NOTIFY_SHADOW)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * gmap_protect_pte - remove access rights to memory and set pgste bits
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @pmdp: pointer to the pmd associated with the pte
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: notification bits to set
+ *
+ * Returns 0 if successfully protected, -ENOMEM if out of memory and
+ * -EAGAIN if a fixup is needed.
+ *
+ * Expected to be called with sg->mm->mmap_sem in read
+ */
+static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
+ pmd_t *pmdp, int prot, unsigned long bits)
+{
+ int rc;
+ pte_t *ptep;
+ spinlock_t *ptl = NULL;
+ unsigned long pbits = 0;
+
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return -EAGAIN;
+
+ ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
+ if (!ptep)
+ return -ENOMEM;
+
+ pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
+ pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
+ /* Protect and unlock. */
+ rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
+ gmap_pte_op_end(ptl);
+ return rc;
}
/*
@@ -883,30 +1028,45 @@ static void gmap_pte_op_end(spinlock_t *ptl)
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits)
{
- unsigned long vmaddr;
- spinlock_t *ptl;
- pte_t *ptep;
+ unsigned long vmaddr, dist;
+ pmd_t *pmdp;
int rc;
BUG_ON(gmap_is_shadow(gmap));
while (len) {
rc = -EAGAIN;
- ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
- if (ptep) {
- rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
- gmap_pte_op_end(ptl);
+ pmdp = gmap_pmd_op_walk(gmap, gaddr);
+ if (pmdp) {
+ if (!pmd_large(*pmdp)) {
+ rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
+ bits);
+ if (!rc) {
+ len -= PAGE_SIZE;
+ gaddr += PAGE_SIZE;
+ }
+ } else {
+ rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
+ bits);
+ if (!rc) {
+ dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
+ len = len < dist ? 0 : len - dist;
+ gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
+ }
+ }
+ gmap_pmd_op_end(gmap, pmdp);
}
if (rc) {
+ if (rc == -EINVAL)
+ return rc;
+
+ /* -EAGAIN, fixup of userspace mm and gmap */
vmaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(vmaddr))
return vmaddr;
rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
if (rc)
return rc;
- continue;
}
- gaddr += PAGE_SIZE;
- len -= PAGE_SIZE;
}
return 0;
}
@@ -935,7 +1095,7 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
return -EINVAL;
down_read(&gmap->mm->mmap_sem);
- rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
+ rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
up_read(&gmap->mm->mmap_sem);
return rc;
}
@@ -1474,6 +1634,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
unsigned long limit;
int rc;
+ BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
BUG_ON(gmap_is_shadow(parent));
spin_lock(&parent->shadow_lock);
sg = gmap_find_shadow(parent, asce, edat_level);
@@ -1526,7 +1687,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
down_read(&parent->mm->mmap_sem);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
- PROT_READ, PGSTE_VSIE_BIT);
+ PROT_READ, GMAP_NOTIFY_SHADOW);
up_read(&parent->mm->mmap_sem);
spin_lock(&parent->shadow_lock);
new->initialized = true;
@@ -2092,6 +2253,225 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
}
EXPORT_SYMBOL_GPL(ptep_notify);
+static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
+ unsigned long gaddr)
+{
+ pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
+ gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
+}
+
+/**
+ * gmap_pmdp_xchg - exchange a gmap pmd with another
+ * @gmap: pointer to the guest address space structure
+ * @pmdp: pointer to the pmd entry
+ * @new: replacement entry
+ * @gaddr: the affected guest address
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
+ unsigned long gaddr)
+{
+ gaddr &= HPAGE_MASK;
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
+ IDTE_GLOBAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
+ else
+ __pmdp_csp(pmdp);
+ *pmdp = new;
+}
+
+static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
+ int purge)
+{
+ pmd_t *pmdp;
+ struct gmap *gmap;
+ unsigned long gaddr;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (pmdp) {
+ gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (purge)
+ __pmdp_csp(pmdp);
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
+ * flushing
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
+{
+ gmap_pmdp_clear(mm, vmaddr, 0);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
+
+/**
+ * gmap_pmdp_csp - csp all affected guest pmd entries
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
+{
+ gmap_pmdp_clear(mm, vmaddr, 1);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
+
+/**
+ * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *entry, gaddr;
+ struct gmap *gmap;
+ pmd_t *pmdp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ entry = radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (entry) {
+ pmdp = (pmd_t *)entry;
+ gaddr = __gmap_segment_gaddr(entry);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
+ gmap->asce, IDTE_LOCAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
+ *entry = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
+
+/**
+ * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *entry, gaddr;
+ struct gmap *gmap;
+ pmd_t *pmdp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ entry = radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (entry) {
+ pmdp = (pmd_t *)entry;
+ gaddr = __gmap_segment_gaddr(entry);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
+ gmap->asce, IDTE_GLOBAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
+ else
+ __pmdp_csp(pmdp);
+ *entry = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
+
+/**
+ * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
+ * @gmap: pointer to guest address space
+ * @pmdp: pointer to the pmd to be tested
+ * @gaddr: virtual address in the guest address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
+ unsigned long gaddr)
+{
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return false;
+
+ /* Already protected memory, which did not change is clean */
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
+ !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
+ return false;
+
+ /* Clear UC indication and reset protection */
+ pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
+ gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
+ return true;
+}
+
+/**
+ * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
+ * @gmap: pointer to guest address space
+ * @bitmap: dirty bitmap for this pmd
+ * @gaddr: virtual address in the guest address space
+ * @vmaddr: virtual address in the host address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
+ unsigned long gaddr, unsigned long vmaddr)
+{
+ int i;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ spinlock_t *ptl;
+
+ pmdp = gmap_pmd_op_walk(gmap, gaddr);
+ if (!pmdp)
+ return;
+
+ if (pmd_large(*pmdp)) {
+ if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
+ bitmap_fill(bitmap, _PAGE_ENTRIES);
+ } else {
+ for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
+ ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
+ if (!ptep)
+ continue;
+ if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
+ set_bit(i, bitmap);
+ spin_unlock(ptl);
+ }
+ }
+ gmap_pmd_op_end(gmap, pmdp);
+}
+EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
+
static inline void thp_split_mm(struct mm_struct *mm)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -2168,17 +2548,45 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
*/
-static int __s390_enable_skey(pte_t *pte, unsigned long addr,
- unsigned long next, struct mm_walk *walk)
+static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
/* Clear storage key */
ptep_zap_key(walk->mm, addr, pte);
return 0;
}
+static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
+ unsigned long hmask, unsigned long next,
+ struct mm_walk *walk)
+{
+ pmd_t *pmd = (pmd_t *)pte;
+ unsigned long start, end;
+ struct page *page = pmd_page(*pmd);
+
+ /*
+ * The write check makes sure we do not set a key on shared
+ * memory. This is needed as the walker does not differentiate
+ * between actual guest memory and the process executable or
+ * shared libraries.
+ */
+ if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
+ !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
+ return 0;
+
+ start = pmd_val(*pmd) & HPAGE_MASK;
+ end = start + HPAGE_SIZE - 1;
+ __storage_key_init_range(start, end);
+ set_bit(PG_arch_1, &page->flags);
+ return 0;
+}
+
int s390_enable_skey(void)
{
- struct mm_walk walk = { .pte_entry = __s390_enable_skey };
+ struct mm_walk walk = {
+ .hugetlb_entry = __s390_enable_skey_hugetlb,
+ .pte_entry = __s390_enable_skey_pte,
+ };
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc = 0;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index e804090f4470..b0246c705a19 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -123,6 +123,29 @@ static inline pte_t __rste_to_pte(unsigned long rste)
return pte;
}
+static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
+{
+ struct page *page;
+ unsigned long size, paddr;
+
+ if (!mm_uses_skeys(mm) ||
+ rste & _SEGMENT_ENTRY_INVALID)
+ return;
+
+ if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
+ page = pud_page(__pud(rste));
+ size = PUD_SIZE;
+ paddr = rste & PUD_MASK;
+ } else {
+ page = pmd_page(__pmd(rste));
+ size = PMD_SIZE;
+ paddr = rste & PMD_MASK;
+ }
+
+ if (!test_and_set_bit(PG_arch_1, &page->flags))
+ __storage_key_init_range(paddr, paddr + size - 1);
+}
+
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -137,6 +160,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
else
rste |= _SEGMENT_ENTRY_LARGE;
+ clear_huge_pte_skeys(mm, rste);
pte_val(*ptep) = rste;
}
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 382153ff17e3..dc3cede7f2ec 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru);
if (make_stable)
- set_page_stable_dat(page, 0);
+ set_page_stable_dat(page, order);
else
set_page_unused(page, order);
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index c44171588d08..f8c6faab41f4 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -14,7 +14,7 @@
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
- asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
: [addr] "+a" (addr) : [skey] "d" (skey));
return addr;
}
@@ -23,8 +23,6 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, size;
- if (!PAGE_DEFAULT_KEY)
- return;
while (start < end) {
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
@@ -37,7 +35,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
continue;
}
}
- page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+ page_set_storage_key(start, PAGE_DEFAULT_KEY, 1);
start += PAGE_SIZE;
}
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index e3bd5627afef..76d89ee8b428 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -28,7 +28,7 @@ static struct ctl_table page_table_sysctl[] = {
.data = &page_table_allocate_pgste,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &page_table_allocate_pgste_min,
.extra2 = &page_table_allocate_pgste_max,
},
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 301e466e4263..f2cc7da473e4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -347,18 +347,27 @@ static inline void pmdp_idte_local(struct mm_struct *mm,
mm->context.asce, IDTE_LOCAL);
else
__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_local(mm, addr);
}
static inline void pmdp_idte_global(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- if (MACHINE_HAS_TLB_GUEST)
+ if (MACHINE_HAS_TLB_GUEST) {
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
- else if (MACHINE_HAS_IDTE)
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_global(mm, addr);
+ } else if (MACHINE_HAS_IDTE) {
__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
- else
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_global(mm, addr);
+ } else {
__pmdp_csp(pmdp);
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_csp(mm, addr);
+ }
}
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
@@ -392,6 +401,8 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
cpumask_of(smp_processor_id()))) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
+ if (mm_has_pgste(mm))
+ gmap_pmdp_invalidate(mm, addr);
} else {
pmdp_idte_global(mm, addr, pmdp);
}
@@ -399,6 +410,24 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
return old;
}
+static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
+ pud = pud_alloc(mm, p4d, addr);
+ if (!pud)
+ return NULL;
+ pmd = pmd_alloc(mm, pud, addr);
+ return pmd;
+}
+
pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
{
@@ -693,40 +722,14 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
/*
* Test and reset if a guest page is dirty
*/
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
+bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
- spinlock_t *ptl;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
pgste_t pgste;
- pte_t *ptep;
pte_t pte;
bool dirty;
int nodat;
- pgd = pgd_offset(mm, addr);
- p4d = p4d_alloc(mm, pgd, addr);
- if (!p4d)
- return false;
- pud = pud_alloc(mm, p4d, addr);
- if (!pud)
- return false;
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- return false;
- /* We can't run guests backed by huge pages, but userspace can
- * still set them up and then try to migrate them without any
- * migration support.
- */
- if (pmd_large(*pmd))
- return true;
-
- ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (unlikely(!ptep))
- return false;
-
pgste = pgste_get_lock(ptep);
dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
pgste_val(pgste) &= ~PGSTE_UC_BIT;
@@ -742,21 +745,43 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
*ptep = pte;
}
pgste_set_unlock(ptep, pgste);
-
- spin_unlock(ptl);
return dirty;
}
-EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
+EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq)
{
- unsigned long keyul;
+ unsigned long keyul, paddr;
spinlock_t *ptl;
pgste_t old, new;
+ pmd_t *pmdp;
pte_t *ptep;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ spin_unlock(ptl);
+ return -EFAULT;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ /*
+ * Huge pmds need quiescing operations, they are
+ * always mapped.
+ */
+ page_set_storage_key(paddr, key, 1);
+ spin_unlock(ptl);
+ return 0;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
@@ -767,14 +792,14 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
- unsigned long address, bits, skey;
+ unsigned long bits, skey;
- address = pte_val(*ptep) & PAGE_MASK;
- skey = (unsigned long) page_get_storage_key(address);
+ paddr = pte_val(*ptep) & PAGE_MASK;
+ skey = (unsigned long) page_get_storage_key(paddr);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
/* Set storage key ACC and FP */
- page_set_storage_key(address, skey, !nq);
+ page_set_storage_key(paddr, skey, !nq);
/* Merge host changed & referenced into pgste */
pgste_val(new) |= bits << 52;
}
@@ -830,11 +855,32 @@ EXPORT_SYMBOL(cond_set_guest_storage_key);
int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
{
spinlock_t *ptl;
+ unsigned long paddr;
pgste_t old, new;
+ pmd_t *pmdp;
pte_t *ptep;
int cc = 0;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ spin_unlock(ptl);
+ return -EFAULT;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ cc = page_reset_referenced(paddr);
+ spin_unlock(ptl);
+ return cc;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
@@ -843,7 +889,8 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
pgste_val(new) &= ~PGSTE_GR_BIT;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
- cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
+ paddr = pte_val(*ptep) & PAGE_MASK;
+ cc = page_reset_referenced(paddr);
/* Merge real referenced bit into host-set */
pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
}
@@ -862,18 +909,42 @@ EXPORT_SYMBOL(reset_guest_reference_bit);
int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char *key)
{
+ unsigned long paddr;
spinlock_t *ptl;
pgste_t pgste;
+ pmd_t *pmdp;
pte_t *ptep;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ /* Not yet mapped memory has a zero key */
+ spin_unlock(ptl);
+ *key = 0;
+ return 0;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ *key = page_get_storage_key(paddr);
+ spin_unlock(ptl);
+ return 0;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
pgste = pgste_get_lock(ptep);
*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
+ paddr = pte_val(*ptep) & PAGE_MASK;
if (!(pte_val(*ptep) & _PAGE_INVALID))
- *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
+ *key = page_get_storage_key(paddr);
/* Reflect guest's logical view, not physical */
*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
pgste_set_unlock(ptep, pgste);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 5f0234ec8038..d7052cbe984f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -485,8 +485,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
/* br %r1 */
_EMIT2(0x07f1);
} else {
- /* larl %r1,.+14 */
- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline));
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 06a80434cfe6..5bd374491f94 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -134,6 +134,8 @@ void __init numa_setup(void)
{
pr_info("NUMA mode: %s\n", mode->name);
nodes_clear(node_possible_map);
+ /* Initially attach all possible CPUs to node 0. */
+ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
if (mode->setup)
mode->setup();
numa_setup_memory();
@@ -141,20 +143,6 @@ void __init numa_setup(void)
}
/*
- * numa_init_early() - Initialization initcall
- *
- * This runs when only one CPU is online and before the first
- * topology update is called for by the scheduler.
- */
-static int __init numa_init_early(void)
-{
- /* Attach all possible CPUs to node 0 for now. */
- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
- return 0;
-}
-early_initcall(numa_init_early);
-
-/*
* numa_init_late() - Initialization initcall
*
* Register NUMA nodes.
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index b482e95b6249..57f7cdac70a3 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -48,6 +48,10 @@ static char *pci_fmt2_names[] = {
"Maximum work units",
};
+static char *pci_fmt3_names[] = {
+ "Transmitted bytes",
+};
+
static char *pci_sw_names[] = {
"Allocated pages",
"Mapped pages",
@@ -112,6 +116,10 @@ static int pci_perf_show(struct seq_file *m, void *v)
pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
&zdev->fmb->fmt2.consumed_work_units);
break;
+ case 3:
+ pci_fmb_show(m, pci_fmt3_names, ARRAY_SIZE(pci_fmt3_names),
+ &zdev->fmb->fmt3.tx_bytes);
+ break;
default:
seq_puts(m, "Unknown format\n");
}
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index 1ace023cbdce..8d61218a71aa 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -7,13 +7,13 @@ purgatory-y := head.o purgatory.o string.o sha256.o mem.o
targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
$(call if_changed_rule,cc_o_c)
-$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S
+$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
-$(obj)/string.o: $(srctree)/arch/s390/lib/string.c
+$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
$(call if_changed_rule,cc_o_c)
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
@@ -21,8 +21,9 @@ LDFLAGS_purgatory.ro += -z nodefaultlib
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
-KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float
+KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 660c96a05a9b..2e3707b12edd 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -243,33 +243,26 @@ gprregs:
.quad 0
.endr
-purgatory_sha256_digest:
- .global purgatory_sha256_digest
- .rept 32 /* SHA256_DIGEST_SIZE */
- .byte 0
- .endr
-
-purgatory_sha_regions:
- .global purgatory_sha_regions
- .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */
- .byte 0
- .endr
-
-kernel_entry:
- .global kernel_entry
- .quad 0
-
-kernel_type:
- .global kernel_type
- .quad 0
-
-crash_start:
- .global crash_start
- .quad 0
+/* Macro to define a global variable with name and size (in bytes) to be
+ * shared with C code.
+ *
+ * Add the .size and .type attribute to satisfy checks on the Elf_Sym during
+ * purgatory load.
+ */
+.macro GLOBAL_VARIABLE name,size
+\name:
+ .global \name
+ .size \name,\size
+ .type \name,object
+ .skip \size,0
+.endm
-crash_size:
- .global crash_size
- .quad 0
+GLOBAL_VARIABLE purgatory_sha256_digest,32
+GLOBAL_VARIABLE purgatory_sha_regions,16*__KEXEC_SHA_REGION_SIZE
+GLOBAL_VARIABLE kernel_entry,8
+GLOBAL_VARIABLE kernel_type,8
+GLOBAL_VARIABLE crash_start,8
+GLOBAL_VARIABLE crash_size,8
.align PAGE_SIZE
stack:
diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c
index 4e2beb3c29b7..3528e6da4e87 100644
--- a/arch/s390/purgatory/purgatory.c
+++ b/arch/s390/purgatory/purgatory.c
@@ -12,15 +12,6 @@
#include <linux/string.h>
#include <asm/purgatory.h>
-struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
-u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
-
-u64 kernel_entry;
-u64 kernel_type;
-
-u64 crash_start;
-u64 crash_size;
-
int verify_sha256_digest(void)
{
struct kexec_sha_region *ptr, *end;
diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss
index d92f2d94a5d9..9bba2c14e0ca 100644
--- a/arch/s390/scripts/Makefile.chkbss
+++ b/arch/s390/scripts/Makefile.chkbss
@@ -2,13 +2,22 @@
quiet_cmd_chkbss = CHKBSS $<
define cmd_chkbss
+ rm -f $@; \
if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
echo "error: $< .bss section is not empty" >&2; exit 1; \
fi; \
touch $@;
endef
-$(obj)/built-in.a: $(patsubst %, $(obj)/%.chkbss, $(chkbss))
+chkbss-target ?= $(obj)/built-in.a
+ifneq (,$(findstring /,$(chkbss)))
+chkbss-files := $(patsubst %, %.chkbss, $(chkbss))
+else
+chkbss-files := $(patsubst %, $(obj)/%.chkbss, $(chkbss))
+endif
+
+$(chkbss-target): $(chkbss-files)
+targets += $(notdir $(chkbss-files))
%.o.chkbss: %.o
$(call cmd,chkbss)
diff --git a/arch/s390/tools/gen_opcode_table.c b/arch/s390/tools/gen_opcode_table.c
index 259aa0680d1a..a1bc02b29c81 100644
--- a/arch/s390/tools/gen_opcode_table.c
+++ b/arch/s390/tools/gen_opcode_table.c
@@ -257,7 +257,7 @@ static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
if (!desc->group)
exit(EXIT_FAILURE);
group = &desc->group[desc->nr_groups - 1];
- strncpy(group->opcode, insn->opcode, 2);
+ memcpy(group->opcode, insn->opcode, 2);
group->type = insn->type;
group->offset = offset;
group->count = 1;
@@ -283,7 +283,7 @@ static void print_opcode_table(struct gen_opcode *desc)
continue;
add_to_group(desc, insn, offset);
if (strncmp(opcode, insn->opcode, 2)) {
- strncpy(opcode, insn->opcode, 2);
+ memcpy(opcode, insn->opcode, 2);
printf("\t/* %.2s */ \\\n", opcode);
}
print_opcode(insn, offset);
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 0fd0099f43cc..f37b95a80232 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -32,44 +32,9 @@
#include <asm/atomic-irq.h>
#endif
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
-
- return c;
-}
-
#endif /* CONFIG_CPU_J2 */
#endif /* __ASM_SH_ATOMIC_H */
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h
index 1e881f5db659..593a9704782b 100644
--- a/arch/sh/include/asm/cmpxchg-xchg.h
+++ b/arch/sh/include/asm/cmpxchg-xchg.h
@@ -8,7 +8,8 @@
* This work is licensed under the terms of the GNU GPL, version 2. See the
* file "COPYING" in the main directory of this archive for more details.
*/
-#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/compiler.h>
#include <asm/byteorder.h>
/*
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index 7431c172c0cb..199d17b765f2 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -10,7 +10,6 @@
#include <linux/types.h>
struct arch_hw_breakpoint {
- char *name; /* Contains name of the symbol to set bkpt */
unsigned long address;
u16 len;
u16 type;
@@ -41,6 +40,7 @@ struct sh_ubc {
struct clk *clk; /* optional interface clock / MSTP bit */
};
+struct perf_event_attr;
struct perf_event;
struct task_struct;
struct pmu;
@@ -54,8 +54,10 @@ static inline int hw_breakpoint_slots(int type)
}
/* arch/sh/kernel/hw_breakpoint.c */
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h
index 85d8bcaa8493..6171682f7798 100644
--- a/arch/sh/include/asm/kprobes.h
+++ b/arch/sh/include/asm/kprobes.h
@@ -27,7 +27,6 @@ struct kprobe;
void arch_remove_kprobe(struct kprobe *);
void kretprobe_trampoline(void);
-void jprobe_return_end(void);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
@@ -43,9 +42,6 @@ struct prev_kprobe {
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned long kprobe_status;
- unsigned long jprobe_saved_r15;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 8648ed05ccf0..d9ff3b42da7c 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -124,14 +124,13 @@ static int get_hbp_len(u16 hbp_len)
/*
* Check for virtual address in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->len);
+ va = hw->address;
+ len = get_hbp_len(hw->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -174,40 +173,40 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
return 0;
}
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->len = SH_BREAKPOINT_LEN_1;
+ hw->len = SH_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->len = SH_BREAKPOINT_LEN_2;
+ hw->len = SH_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->len = SH_BREAKPOINT_LEN_4;
+ hw->len = SH_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
- info->len = SH_BREAKPOINT_LEN_8;
+ hw->len = SH_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_R:
- info->type = SH_BREAKPOINT_READ;
+ hw->type = SH_BREAKPOINT_READ;
break;
case HW_BREAKPOINT_W:
- info->type = SH_BREAKPOINT_WRITE;
+ hw->type = SH_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
- info->type = SH_BREAKPOINT_RW;
+ hw->type = SH_BREAKPOINT_RW;
break;
default:
return -EINVAL;
@@ -219,19 +218,20 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
ret = -EINVAL;
- switch (info->len) {
+ switch (hw->len) {
case SH_BREAKPOINT_LEN_1:
align = 0;
break;
@@ -249,17 +249,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
}
/*
- * For kernel-addresses, either the address or symbol name can be
- * specified.
- */
- if (info->name)
- info->address = (unsigned long)kallsyms_lookup_name(info->name);
-
- /*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
- if (info->address & align)
+ if (hw->address & align)
return -EINVAL;
return 0;
@@ -346,7 +339,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
perf_bp_event(bp, args->regs);
/* Deliver the signal to userspace */
- if (!arch_check_bp_in_kernelspace(bp)) {
+ if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
force_sig_fault(SIGTRAP, TRAP_HWBKPT,
(void __user *)NULL, current);
}
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 52a5e11247d1..241e903dd3ee 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -248,11 +248,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
- } else {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
- }
}
goto no_kprobe;
}
@@ -277,11 +272,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -358,8 +355,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->pc = orig_ret_address;
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
-
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
@@ -508,14 +503,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
} else {
- if (kprobe_handler(args->regs)) {
+ if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
- } else {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler &&
- p->break_handler(p, args->regs))
- ret = NOTIFY_STOP;
- }
}
}
}
@@ -523,57 +512,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_r15 = regs->regs[15];
- addr = kcb->jprobe_saved_r15;
-
- /*
- * TBD: As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
- */
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
- MIN_STACK_SIZE(addr));
-
- regs->pc = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack_addr = kcb->jprobe_saved_r15;
- u8 *addr = (u8 *)regs->pc;
-
- if ((addr >= (u8 *)jprobe_return) &&
- (addr <= (u8 *)jprobe_return_end)) {
- *regs = kcb->jprobe_saved_regs;
-
- memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
-
- kcb->kprobe_status = KPROBE_HIT_SS;
- preempt_enable_no_resched();
- return 1;
- }
-
- return 0;
-}
-
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index d13ce517f4b9..94c930f0bc62 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -27,17 +27,17 @@ int atomic_fetch_or(int, atomic_t *);
int atomic_fetch_xor(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int);
-int __atomic_add_unless(atomic_t *, int, int);
+int atomic_fetch_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int);
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+
#define atomic_set_release(v, i) atomic_set((v), (i))
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
-#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
-#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
@@ -46,22 +46,4 @@ void atomic_set(atomic_t *, int);
#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
-#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
-#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 28db058d471b..6963482c81d8 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -50,38 +50,6 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_dec_return(v) atomic_sub_return(1, v)
-#define atomic64_dec_return(v) atomic64_sub_return(1, v)
-
-#define atomic_inc_return(v) atomic_add_return(1, v)
-#define atomic64_inc_return(v) atomic64_add_return(1, v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic64_inc(v) atomic64_add(1, v)
-
-#define atomic_dec(v) atomic_sub(1, v)
-#define atomic64_dec(v) atomic64_sub(1, v)
-
-#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
-
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
static inline int atomic_xchg(atomic_t *v, int new)
@@ -89,42 +57,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(&v->counter, new);
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h
index 3704490b4488..bfcaa6326c20 100644
--- a/arch/sparc/include/asm/kprobes.h
+++ b/arch/sparc/include/asm/kprobes.h
@@ -44,7 +44,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_orig_tnpc;
unsigned long kprobe_orig_tstate_pil;
- struct pt_regs jprobe_saved_regs;
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index ab4ba4347941..dfbca2470536 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
prepare_singlestep(p, regs, kcb);
return 1;
- } else {
- if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+ } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
+ ret = 1;
}
goto no_kprobe;
}
@@ -181,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -441,53 +437,6 @@ out:
exception_exit(prev_state);
}
-/* Jprobes support. */
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
-
- regs->tpc = (unsigned long) jp->entry;
- regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
- regs->tstate |= TSTATE_PIL;
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- register unsigned long orig_fp asm("g1");
-
- orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
- __asm__ __volatile__("\n"
-"1: cmp %%sp, %0\n\t"
- "blu,a,pt %%xcc, 1b\n\t"
- " restore\n\t"
- ".globl jprobe_return_trap_instruction\n"
-"jprobe_return_trap_instruction:\n\t"
- "ta 0x70"
- : /* no outputs */
- : "r" (orig_fp));
-}
-
-extern void jprobe_return_trap_instruction(void);
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- u32 *addr = (u32 *) regs->tpc;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (addr == (u32 *) jprobe_return_trap_instruction) {
- memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
/* The value stored in the return address register is actually 2
* instructions before where the callee will return to.
* Sequences usually look something like this
@@ -562,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 465a901a0ada..281fa634bb1a 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -95,7 +95,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
}
EXPORT_SYMBOL(atomic_cmpxchg);
-int __atomic_add_unless(atomic_t *v, int a, int u)
+int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
@@ -107,7 +107,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(__atomic_add_unless);
+EXPORT_SYMBOL(atomic_fetch_add_unless);
/* Atomic operations are already serializing */
void atomic_set(atomic_t *v, int i)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 887d3a7bb646..6d4774f203d0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -180,7 +180,7 @@ config X86
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
select HAVE_RSEQ
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index a08e82856563..7e3c07d6ad42 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -80,11 +80,6 @@ ifeq ($(CONFIG_X86_32),y)
# alignment instructions.
KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
- # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
- # a lot more stack due to the lack of sharing of stacklots:
- KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0400, \
- $(call cc-option,-fno-unit-at-a-time))
-
# CPU-specific tuning. Anything which can be shared with UML should go here.
include arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
index 0d41d68131cc..2e1382486e91 100644
--- a/arch/x86/boot/bitops.h
+++ b/arch/x86/boot/bitops.h
@@ -17,6 +17,7 @@
#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */
#include <linux/types.h>
+#include <asm/asm.h>
static inline bool constant_test_bit(int nr, const void *addr)
{
@@ -28,7 +29,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
bool v;
const u32 *p = (const u32 *)addr;
- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr));
return v;
}
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index e98522ea6f09..1458b1700fc7 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -34,74 +34,13 @@ static void setup_boot_services##bits(struct efi_config *c) \
\
table = (typeof(table))sys_table; \
\
- c->runtime_services = table->runtime; \
- c->boot_services = table->boottime; \
- c->text_output = table->con_out; \
+ c->runtime_services = table->runtime; \
+ c->boot_services = table->boottime; \
+ c->text_output = table->con_out; \
}
BOOT_SERVICES(32);
BOOT_SERVICES(64);
-static inline efi_status_t __open_volume32(void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_32_t *image = __image;
- efi_file_handle_32_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
- unsigned long func;
-
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to handle fs_proto\n");
- return status;
- }
-
- func = (unsigned long)io->open_volume;
- status = efi_early->call(func, io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
-static inline efi_status_t __open_volume64(void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_64_t *image = __image;
- efi_file_handle_64_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
- unsigned long func;
-
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to handle fs_proto\n");
- return status;
- }
-
- func = (unsigned long)io->open_volume;
- status = efi_early->call(func, io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
-efi_status_t
-efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
-{
- if (efi_early->is64)
- return __open_volume64(__image, __fh);
-
- return __open_volume32(__image, __fh);
-}
-
void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
{
efi_call_proto(efi_simple_text_output_protocol, output_string,
@@ -109,7 +48,7 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
}
static efi_status_t
-__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
struct pci_setup_rom *rom = NULL;
efi_status_t status;
@@ -134,16 +73,16 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for rom\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'rom'\n");
return status;
}
memset(rom, 0, sizeof(*rom));
- rom->data.type = SETUP_PCI;
- rom->data.len = size - sizeof(struct setup_data);
- rom->data.next = 0;
- rom->pcilen = pci->romsize;
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = pci->romsize;
*__rom = rom;
status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
@@ -179,96 +118,6 @@ free_struct:
return status;
}
-static void
-setup_efi_pci32(struct boot_params *params, void **pci_handle,
- unsigned long size)
-{
- efi_pci_io_protocol_t *pci = NULL;
- efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 *handles = (u32 *)(unsigned long)pci_handle;
- efi_status_t status;
- unsigned long nr_pci;
- struct setup_data *data;
- int i;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- nr_pci = size / sizeof(u32);
- for (i = 0; i < nr_pci; i++) {
- struct pci_setup_rom *rom = NULL;
- u32 h = handles[i];
-
- status = efi_call_early(handle_protocol, h,
- &pci_proto, (void **)&pci);
-
- if (status != EFI_SUCCESS)
- continue;
-
- if (!pci)
- continue;
-
- status = __setup_efi_pci(pci, &rom);
- if (status != EFI_SUCCESS)
- continue;
-
- if (data)
- data->next = (unsigned long)rom;
- else
- params->hdr.setup_data = (unsigned long)rom;
-
- data = (struct setup_data *)rom;
-
- }
-}
-
-static void
-setup_efi_pci64(struct boot_params *params, void **pci_handle,
- unsigned long size)
-{
- efi_pci_io_protocol_t *pci = NULL;
- efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u64 *handles = (u64 *)(unsigned long)pci_handle;
- efi_status_t status;
- unsigned long nr_pci;
- struct setup_data *data;
- int i;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- nr_pci = size / sizeof(u64);
- for (i = 0; i < nr_pci; i++) {
- struct pci_setup_rom *rom = NULL;
- u64 h = handles[i];
-
- status = efi_call_early(handle_protocol, h,
- &pci_proto, (void **)&pci);
-
- if (status != EFI_SUCCESS)
- continue;
-
- if (!pci)
- continue;
-
- status = __setup_efi_pci(pci, &rom);
- if (status != EFI_SUCCESS)
- continue;
-
- if (data)
- data->next = (unsigned long)rom;
- else
- params->hdr.setup_data = (unsigned long)rom;
-
- data = (struct setup_data *)rom;
-
- }
-}
-
/*
* There's no way to return an informative status from this function,
* because any analysis (and printing of error messages) needs to be
@@ -284,6 +133,9 @@ static void setup_efi_pci(struct boot_params *params)
void **pci_handle = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
unsigned long size = 0;
+ unsigned long nr_pci;
+ struct setup_data *data;
+ int i;
status = efi_call_early(locate_handle,
EFI_LOCATE_BY_PROTOCOL,
@@ -295,7 +147,7 @@ static void setup_efi_pci(struct boot_params *params)
size, (void **)&pci_handle);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for pci_handle\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n");
return;
}
@@ -307,10 +159,34 @@ static void setup_efi_pci(struct boot_params *params)
if (status != EFI_SUCCESS)
goto free_handle;
- if (efi_early->is64)
- setup_efi_pci64(params, pci_handle, size);
- else
- setup_efi_pci32(params, pci_handle, size);
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
+ for (i = 0; i < nr_pci; i++) {
+ efi_pci_io_protocol_t *pci = NULL;
+ struct pci_setup_rom *rom;
+
+ status = efi_call_early(handle_protocol,
+ efi_is_64bit() ? ((u64 *)pci_handle)[i]
+ : ((u32 *)pci_handle)[i],
+ &pci_proto, (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = preserve_pci_rom_image(pci, &rom);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (data)
+ data->next = (unsigned long)rom;
+ else
+ params->hdr.setup_data = (unsigned long)rom;
+
+ data = (struct setup_data *)rom;
+ }
free_handle:
efi_call_early(free_pool, pci_handle);
@@ -341,8 +217,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
size + sizeof(struct setup_data), &new);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table,
- "Failed to alloc mem for properties\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'properties'\n");
return;
}
@@ -358,9 +233,9 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
new->next = 0;
data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
- if (!data)
+ if (!data) {
boot_params->hdr.setup_data = (unsigned long)new;
- else {
+ } else {
while (data->next)
data = (struct setup_data *)(unsigned long)data->next;
data->next = (unsigned long)new;
@@ -380,81 +255,55 @@ static void setup_quirks(struct boot_params *boot_params)
}
}
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
static efi_status_t
-setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
{
- struct efi_uga_draw_protocol *uga = NULL, *first_uga;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+ efi_status_t status;
+ u32 width, height;
+ void **uga_handle = NULL;
+ efi_uga_draw_protocol_t *uga = NULL, *first_uga;
unsigned long nr_ugas;
- u32 *handles = (u32 *)uga_handle;
- efi_status_t status = EFI_INVALID_PARAMETER;
int i;
- first_uga = NULL;
- nr_ugas = size / sizeof(u32);
- for (i = 0; i < nr_ugas; i++) {
- efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 w, h, depth, refresh;
- void *pciio;
- u32 handle = handles[i];
-
- status = efi_call_early(handle_protocol, handle,
- &uga_proto, (void **)&uga);
- if (status != EFI_SUCCESS)
- continue;
-
- efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
-
- status = efi_early->call((unsigned long)uga->get_mode, uga,
- &w, &h, &depth, &refresh);
- if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- *width = w;
- *height = h;
-
- /*
- * Once we've found a UGA supporting PCIIO,
- * don't bother looking any further.
- */
- if (pciio)
- break;
-
- first_uga = uga;
- }
- }
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size, (void **)&uga_handle);
+ if (status != EFI_SUCCESS)
+ return status;
- return status;
-}
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ uga_proto, NULL, &size, uga_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
-static efi_status_t
-setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
-{
- struct efi_uga_draw_protocol *uga = NULL, *first_uga;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
- unsigned long nr_ugas;
- u64 *handles = (u64 *)uga_handle;
- efi_status_t status = EFI_INVALID_PARAMETER;
- int i;
+ height = 0;
+ width = 0;
first_uga = NULL;
- nr_ugas = size / sizeof(u64);
+ nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
for (i = 0; i < nr_ugas; i++) {
efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
u32 w, h, depth, refresh;
void *pciio;
- u64 handle = handles[i];
+ unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i]
+ : ((u32 *)uga_handle)[i];
status = efi_call_early(handle_protocol, handle,
- &uga_proto, (void **)&uga);
+ uga_proto, (void **)&uga);
if (status != EFI_SUCCESS)
continue;
+ pciio = NULL;
efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
- status = efi_early->call((unsigned long)uga->get_mode, uga,
- &w, &h, &depth, &refresh);
+ status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga,
+ &w, &h, &depth, &refresh);
if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- *width = w;
- *height = h;
+ width = w;
+ height = h;
/*
* Once we've found a UGA supporting PCIIO,
@@ -467,59 +316,28 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
}
}
- return status;
-}
-
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
- unsigned long size)
-{
- efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
-
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- size, (void **)&uga_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_call_early(locate_handle,
- EFI_LOCATE_BY_PROTOCOL,
- uga_proto, NULL, &size, uga_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- height = 0;
- width = 0;
-
- if (efi_early->is64)
- status = setup_uga64(uga_handle, size, &width, &height);
- else
- status = setup_uga32(uga_handle, size, &width, &height);
-
if (!width && !height)
goto free_handle;
/* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
- si->lfb_depth = 32;
- si->lfb_width = width;
- si->lfb_height = height;
+ si->lfb_depth = 32;
+ si->lfb_width = width;
+ si->lfb_height = height;
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
free_handle:
efi_call_early(free_pool, uga_handle);
+
return status;
}
@@ -586,7 +404,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
return NULL;
- if (efi_early->is64)
+ if (efi_is_64bit())
setup_boot_services64(efi_early);
else
setup_boot_services32(efi_early);
@@ -601,7 +419,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
status = efi_low_alloc(sys_table, 0x4000, 1,
(unsigned long *)&boot_params);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc lowmem for boot params\n");
+ efi_printk(sys_table, "Failed to allocate lowmem for boot params\n");
return NULL;
}
@@ -617,9 +435,9 @@ struct boot_params *make_boot_params(struct efi_config *c)
* Fill out some of the header fields ourselves because the
* EFI firmware loader doesn't load the first sector.
*/
- hdr->root_flags = 1;
- hdr->vid_mode = 0xffff;
- hdr->boot_flag = 0xAA55;
+ hdr->root_flags = 1;
+ hdr->vid_mode = 0xffff;
+ hdr->boot_flag = 0xAA55;
hdr->type_of_loader = 0x21;
@@ -627,6 +445,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
if (!cmdline_ptr)
goto fail;
+
hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
/* Fill in upper bits of command line address, NOP on 32 bit */
boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
@@ -663,10 +482,12 @@ struct boot_params *make_boot_params(struct efi_config *c)
boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32;
return boot_params;
+
fail2:
efi_free(sys_table, options_size, hdr->cmd_line_ptr);
fail:
efi_free(sys_table, 0x4000, (unsigned long)boot_params);
+
return NULL;
}
@@ -678,7 +499,7 @@ static void add_e820ext(struct boot_params *params,
unsigned long size;
e820ext->type = SETUP_E820_EXT;
- e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+ e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
e820ext->next = 0;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
@@ -692,8 +513,8 @@ static void add_e820ext(struct boot_params *params,
params->hdr.setup_data = (unsigned long)e820ext;
}
-static efi_status_t setup_e820(struct boot_params *params,
- struct setup_data *e820ext, u32 e820ext_size)
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
{
struct boot_e820_entry *entry = params->e820_table;
struct efi_info *efi = &params->efi_info;
@@ -814,11 +635,10 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
}
struct exit_boot_struct {
- struct boot_params *boot_params;
- struct efi_info *efi;
- struct setup_data *e820ext;
- __u32 e820ext_size;
- bool is64;
+ struct boot_params *boot_params;
+ struct efi_info *efi;
+ struct setup_data *e820ext;
+ __u32 e820ext_size;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -845,25 +665,25 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
first = false;
}
- signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+ signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+ : EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
- p->efi->efi_systab = (unsigned long)sys_table_arg;
- p->efi->efi_memdesc_size = *map->desc_size;
- p->efi->efi_memdesc_version = *map->desc_ver;
- p->efi->efi_memmap = (unsigned long)*map->map;
- p->efi->efi_memmap_size = *map->map_size;
+ p->efi->efi_systab = (unsigned long)sys_table_arg;
+ p->efi->efi_memdesc_size = *map->desc_size;
+ p->efi->efi_memdesc_version = *map->desc_ver;
+ p->efi->efi_memmap = (unsigned long)*map->map;
+ p->efi->efi_memmap_size = *map->map_size;
#ifdef CONFIG_X86_64
- p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
- p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
+ p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
+ p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
#endif
return EFI_SUCCESS;
}
-static efi_status_t exit_boot(struct boot_params *boot_params,
- void *handle, bool is64)
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
{
unsigned long map_sz, key, desc_size, buff_size;
efi_memory_desc_t *mem_map;
@@ -874,17 +694,16 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
struct efi_boot_memmap map;
struct exit_boot_struct priv;
- map.map = &mem_map;
- map.map_size = &map_sz;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_version;
- map.key_ptr = &key;
- map.buff_size = &buff_size;
- priv.boot_params = boot_params;
- priv.efi = &boot_params->efi_info;
- priv.e820ext = NULL;
- priv.e820ext_size = 0;
- priv.is64 = is64;
+ map.map = &mem_map;
+ map.map_size = &map_sz;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_version;
+ map.key_ptr = &key;
+ map.buff_size = &buff_size;
+ priv.boot_params = boot_params;
+ priv.efi = &boot_params->efi_info;
+ priv.e820ext = NULL;
+ priv.e820ext_size = 0;
/* Might as well exit boot services now */
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -892,10 +711,11 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
if (status != EFI_SUCCESS)
return status;
- e820ext = priv.e820ext;
- e820ext_size = priv.e820ext_size;
+ e820ext = priv.e820ext;
+ e820ext_size = priv.e820ext_size;
+
/* Historic? */
- boot_params->alt_mem_k = 32 * 1024;
+ boot_params->alt_mem_k = 32 * 1024;
status = setup_e820(boot_params, e820ext, e820ext_size);
if (status != EFI_SUCCESS)
@@ -908,8 +728,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
* On success we return a pointer to a boot_params structure, and NULL
* on failure.
*/
-struct boot_params *efi_main(struct efi_config *c,
- struct boot_params *boot_params)
+struct boot_params *
+efi_main(struct efi_config *c, struct boot_params *boot_params)
{
struct desc_ptr *gdt = NULL;
efi_loaded_image_t *image;
@@ -918,13 +738,11 @@ struct boot_params *efi_main(struct efi_config *c,
struct desc_struct *desc;
void *handle;
efi_system_table_t *_table;
- bool is64;
efi_early = c;
_table = (efi_system_table_t *)(unsigned long)efi_early->table;
handle = (void *)(unsigned long)efi_early->image_handle;
- is64 = efi_early->is64;
sys_table = _table;
@@ -932,7 +750,7 @@ struct boot_params *efi_main(struct efi_config *c,
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
goto fail;
- if (is64)
+ if (efi_is_64bit())
setup_boot_services64(efi_early);
else
setup_boot_services32(efi_early);
@@ -957,7 +775,7 @@ struct boot_params *efi_main(struct efi_config *c,
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
sizeof(*gdt), (void **)&gdt);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n");
goto fail;
}
@@ -965,7 +783,7 @@ struct boot_params *efi_main(struct efi_config *c,
status = efi_low_alloc(sys_table, gdt->size, 8,
(unsigned long *)&gdt->address);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for gdt\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n");
goto fail;
}
@@ -988,7 +806,7 @@ struct boot_params *efi_main(struct efi_config *c,
hdr->code32_start = bzimage_addr;
}
- status = exit_boot(boot_params, handle, is64);
+ status = exit_boot(boot_params, handle);
if (status != EFI_SUCCESS) {
efi_printk(sys_table, "exit_boot() failed!\n");
goto fail;
@@ -1002,19 +820,20 @@ struct boot_params *efi_main(struct efi_config *c,
if (IS_ENABLED(CONFIG_X86_64)) {
/* __KERNEL32_CS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
- desc->l = 0;
- desc->d = SEG_OP_SIZE_32BIT;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = SEG_OP_SIZE_32BIT;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
+
desc++;
} else {
/* Second entry is unused on 32-bit */
@@ -1022,15 +841,16 @@ struct boot_params *efi_main(struct efi_config *c,
}
/* __KERNEL_CS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+
if (IS_ENABLED(CONFIG_X86_64)) {
desc->l = 1;
desc->d = 0;
@@ -1038,41 +858,41 @@ struct boot_params *efi_main(struct efi_config *c,
desc->l = 0;
desc->d = SEG_OP_SIZE_32BIT;
}
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
/* __KERNEL_DS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
- desc->l = 0;
- desc->d = SEG_OP_SIZE_32BIT;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = SEG_OP_SIZE_32BIT;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
if (IS_ENABLED(CONFIG_X86_64)) {
/* Task segment value */
- desc->limit0 = 0x0000;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_TSS;
- desc->s = 0;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0x0;
- desc->avl = 0;
- desc->l = 0;
- desc->d = 0;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0x0000;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_TSS;
+ desc->s = 0;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0x0;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = 0;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
}
@@ -1082,5 +902,6 @@ struct boot_params *efi_main(struct efi_config *c,
return boot_params;
fail:
efi_printk(sys_table, "efi_main() failed!\n");
+
return NULL;
}
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index e799dc5c6448..8297387c4676 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -12,22 +12,22 @@
#define DESC_TYPE_CODE_DATA (1 << 0)
-struct efi_uga_draw_protocol_32 {
+typedef struct {
u32 get_mode;
u32 set_mode;
u32 blt;
-};
+} efi_uga_draw_protocol_32_t;
-struct efi_uga_draw_protocol_64 {
+typedef struct {
u64 get_mode;
u64 set_mode;
u64 blt;
-};
+} efi_uga_draw_protocol_64_t;
-struct efi_uga_draw_protocol {
+typedef struct {
void *get_mode;
void *set_mode;
void *blt;
-};
+} efi_uga_draw_protocol_t;
#endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index b87a7582853d..302517929932 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -102,7 +102,7 @@ static bool memmap_too_large;
/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
-unsigned long long mem_limit = ULLONG_MAX;
+static unsigned long long mem_limit = ULLONG_MAX;
enum mem_avoid_index {
@@ -215,7 +215,36 @@ static void mem_avoid_memmap(char *str)
memmap_too_large = true;
}
-static int handle_mem_memmap(void)
+/* Store the number of 1GB huge pages which users specified: */
+static unsigned long max_gb_huge_pages;
+
+static void parse_gb_huge_pages(char *param, char *val)
+{
+ static bool gbpage_sz;
+ char *p;
+
+ if (!strcmp(param, "hugepagesz")) {
+ p = val;
+ if (memparse(p, &p) != PUD_SIZE) {
+ gbpage_sz = false;
+ return;
+ }
+
+ if (gbpage_sz)
+ warn("Repeatedly set hugeTLB page size of 1G!\n");
+ gbpage_sz = true;
+ return;
+ }
+
+ if (!strcmp(param, "hugepages") && gbpage_sz) {
+ p = val;
+ max_gb_huge_pages = simple_strtoull(p, &p, 0);
+ return;
+ }
+}
+
+
+static int handle_mem_options(void)
{
char *args = (char *)get_cmd_line_ptr();
size_t len = strlen((char *)args);
@@ -223,7 +252,8 @@ static int handle_mem_memmap(void)
char *param, *val;
u64 mem_size;
- if (!strstr(args, "memmap=") && !strstr(args, "mem="))
+ if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
+ !strstr(args, "hugepages"))
return 0;
tmp_cmdline = malloc(len + 1);
@@ -248,6 +278,8 @@ static int handle_mem_memmap(void)
if (!strcmp(param, "memmap")) {
mem_avoid_memmap(val);
+ } else if (strstr(param, "hugepages")) {
+ parse_gb_huge_pages(param, val);
} else if (!strcmp(param, "mem")) {
char *p = val;
@@ -387,7 +419,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
/* We don't need to set a mapping for setup_data. */
/* Mark the memmap regions we need to avoid */
- handle_mem_memmap();
+ handle_mem_options();
#ifdef CONFIG_X86_VERBOSE_BOOTUP
/* Make sure video RAM can be used. */
@@ -466,6 +498,60 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
}
}
+/*
+ * Skip as many 1GB huge pages as possible in the passed region
+ * according to the number which users specified:
+ */
+static void
+process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
+{
+ unsigned long addr, size = 0;
+ struct mem_vector tmp;
+ int i = 0;
+
+ if (!max_gb_huge_pages) {
+ store_slot_info(region, image_size);
+ return;
+ }
+
+ addr = ALIGN(region->start, PUD_SIZE);
+ /* Did we raise the address above the passed in memory entry? */
+ if (addr < region->start + region->size)
+ size = region->size - (addr - region->start);
+
+ /* Check how many 1GB huge pages can be filtered out: */
+ while (size > PUD_SIZE && max_gb_huge_pages) {
+ size -= PUD_SIZE;
+ max_gb_huge_pages--;
+ i++;
+ }
+
+ /* No good 1GB huge pages found: */
+ if (!i) {
+ store_slot_info(region, image_size);
+ return;
+ }
+
+ /*
+ * Skip those 'i'*1GB good huge pages, and continue checking and
+ * processing the remaining head or tail part of the passed region
+ * if available.
+ */
+
+ if (addr >= region->start + image_size) {
+ tmp.start = region->start;
+ tmp.size = addr - region->start;
+ store_slot_info(&tmp, image_size);
+ }
+
+ size = region->size - (addr - region->start) - i * PUD_SIZE;
+ if (size >= image_size) {
+ tmp.start = addr + i * PUD_SIZE;
+ tmp.size = size;
+ store_slot_info(&tmp, image_size);
+ }
+}
+
static unsigned long slots_fetch_random(void)
{
unsigned long slot;
@@ -546,7 +632,7 @@ static void process_mem_region(struct mem_vector *entry,
/* If nothing overlaps, store the region and return. */
if (!mem_avoid_overlap(&region, &overlap)) {
- store_slot_info(&region, image_size);
+ process_gb_huge_pages(&region, image_size);
return;
}
@@ -556,7 +642,7 @@ static void process_mem_region(struct mem_vector *entry,
beginning.start = region.start;
beginning.size = overlap.start - region.start;
- store_slot_info(&beginning, image_size);
+ process_gb_huge_pages(&beginning, image_size);
}
/* Return if overlap extends to or past end of region. */
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 16f49123d747..c4428a176973 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -13,6 +13,7 @@
*/
#include <linux/types.h>
+#include <asm/asm.h>
#include "ctype.h"
#include "string.h"
@@ -28,8 +29,8 @@
int memcmp(const void *s1, const void *s2, size_t len)
{
bool diff;
- asm("repe; cmpsb; setnz %0"
- : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ asm("repe; cmpsb" CC_SET(nz)
+ : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
index 717bf0776421..5f7e43d4f64a 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -75,7 +75,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov LEN, %r8
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 5de7c0d46edf..acd11b3bf639 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = {
}
};
-static const struct x86_cpu_id aesni_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_AES),
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
static int __init crypto_aegis128_aesni_module_init(void)
{
- if (!x86_match_cpu(aesni_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_AES) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_aegis128_aesni_alg,
diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S
index 4eda2b8db9e1..491dd61c845c 100644
--- a/arch/x86/crypto/aegis128l-aesni-asm.S
+++ b/arch/x86/crypto/aegis128l-aesni-asm.S
@@ -66,7 +66,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG0, MSG0
pxor MSG1, MSG1
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 876e4866e633..2071c3d1ae07 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = {
}
};
-static const struct x86_cpu_id aesni_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_AES),
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
static int __init crypto_aegis128l_aesni_module_init(void)
{
- if (!x86_match_cpu(aesni_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_AES) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_aegis128l_aesni_alg,
diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S
index 32aae8397268..8870c7c5d9a4 100644
--- a/arch/x86/crypto/aegis256-aesni-asm.S
+++ b/arch/x86/crypto/aegis256-aesni-asm.S
@@ -59,7 +59,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov LEN, %r8
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 2b5dd3af8f4d..b5f2a8fd5a71 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = {
}
};
-static const struct x86_cpu_id aesni_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_AES),
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
static int __init crypto_aegis256_aesni_module_init(void)
{
- if (!x86_match_cpu(aesni_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_AES) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_aegis256_aesni_alg,
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index e762ef417562..9bd139569b41 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -258,7 +258,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.macro GCM_INIT Iv SUBKEY AAD AADLEN
mov \AADLEN, %r11
mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length
- xor %r11, %r11
+ xor %r11d, %r11d
mov %r11, InLen(%arg2) # ctx_data.in_length = 0
mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0
mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0
@@ -286,7 +286,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
movdqu HashKey(%arg2), %xmm13
add %arg5, InLen(%arg2)
- xor %r11, %r11 # initialise the data pointer offset as zero
+ xor %r11d, %r11d # initialise the data pointer offset as zero
PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation
sub %r11, %arg5 # sub partial block data used
@@ -702,7 +702,7 @@ _no_extra_mask_1_\@:
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- xor %rax,%rax
+ xor %eax, %eax
mov %rax, PBlockLen(%arg2)
jmp _dec_done_\@
@@ -737,7 +737,7 @@ _no_extra_mask_2_\@:
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- xor %rax,%rax
+ xor %eax, %eax
mov %rax, PBlockLen(%arg2)
jmp _encode_done_\@
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index faecb1518bf8..1985ea0b551b 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -463,7 +463,7 @@ _get_AAD_rest_final\@:
_get_AAD_done\@:
# initialize the data pointer offset as zero
- xor %r11, %r11
+ xor %r11d, %r11d
# start AES for num_initial_blocks blocks
mov arg5, %rax # rax = *Y0
@@ -1770,7 +1770,7 @@ _get_AAD_rest_final\@:
_get_AAD_done\@:
# initialize the data pointer offset as zero
- xor %r11, %r11
+ xor %r11d, %r11d
# start AES for num_initial_blocks blocks
mov arg5, %rax # rax = *Y0
diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S
index 07653d4582a6..de182c460f82 100644
--- a/arch/x86/crypto/morus1280-avx2-asm.S
+++ b/arch/x86/crypto/morus1280-avx2-asm.S
@@ -113,7 +113,7 @@ ENDPROC(__morus1280_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
vpxor MSG, MSG, MSG
mov %rcx, %r8
diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c
index f111f36d26dc..6634907d6ccd 100644
--- a/arch/x86/crypto/morus1280-avx2-glue.c
+++ b/arch/x86/crypto/morus1280-avx2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
-static const struct x86_cpu_id avx2_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_AVX2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id);
-
static int __init crypto_morus1280_avx2_module_init(void)
{
- if (!x86_match_cpu(avx2_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_morus1280_avx2_algs,
diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S
index bd1aa1b60869..da5d2905db60 100644
--- a/arch/x86/crypto/morus1280-sse2-asm.S
+++ b/arch/x86/crypto/morus1280-sse2-asm.S
@@ -235,7 +235,7 @@ ENDPROC(__morus1280_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG_LO, MSG_LO
pxor MSG_HI, MSG_HI
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index 839270aa713c..95cf857d2cbb 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
-static const struct x86_cpu_id sse2_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
static int __init crypto_morus1280_sse2_module_init(void)
{
- if (!x86_match_cpu(sse2_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_morus1280_sse2_algs,
diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S
index efa02816d921..414db480250e 100644
--- a/arch/x86/crypto/morus640-sse2-asm.S
+++ b/arch/x86/crypto/morus640-sse2-asm.S
@@ -113,7 +113,7 @@ ENDPROC(__morus640_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov %rcx, %r8
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 26b47e2db8d2..615fb7bc9a32 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
-static const struct x86_cpu_id sse2_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
static int __init crypto_morus640_sse2_module_init(void)
{
- if (!x86_match_cpu(sse2_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_morus640_sse2_algs,
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
index 6204bd53528c..613d0bfc3d84 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -96,7 +96,7 @@
# cleanup workspace
mov $8, %ecx
mov %rsp, %rdi
- xor %rax, %rax
+ xor %eax, %eax
rep stosq
mov %rbp, %rsp # deallocate workspace
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index c371bfee137a..2767c625a52c 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -65,7 +65,7 @@
# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else
# define preempt_stop(clobbers)
-# define resume_kernel restore_all
+# define resume_kernel restore_all_kernel
#endif
.macro TRACE_IRQS_IRET
@@ -77,6 +77,8 @@
#endif
.endm
+#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
+
/*
* User gs save/restore
*
@@ -154,7 +156,52 @@
#endif /* CONFIG_X86_32_LAZY_GS */
-.macro SAVE_ALL pt_regs_ax=%eax
+/* Unconditionally switch to user cr3 */
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+
+ movl %cr3, \scratch_reg
+ orl $PTI_SWITCH_MASK, \scratch_reg
+ movl \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+.macro BUG_IF_WRONG_CR3 no_user_check=0
+#ifdef CONFIG_DEBUG_ENTRY
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+ .if \no_user_check == 0
+ /* coming from usermode? */
+ testl $SEGMENT_RPL_MASK, PT_CS(%esp)
+ jz .Lend_\@
+ .endif
+ /* On user-cr3? */
+ movl %cr3, %eax
+ testl $PTI_SWITCH_MASK, %eax
+ jnz .Lend_\@
+ /* From userspace with kernel cr3 - BUG */
+ ud2
+.Lend_\@:
+#endif
+.endm
+
+/*
+ * Switch to kernel cr3 if not already loaded and return current cr3 in
+ * \scratch_reg
+ */
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+ movl %cr3, \scratch_reg
+ /* Test if we are already on kernel CR3 */
+ testl $PTI_SWITCH_MASK, \scratch_reg
+ jz .Lend_\@
+ andl $(~PTI_SWITCH_MASK), \scratch_reg
+ movl \scratch_reg, %cr3
+ /* Return original CR3 in \scratch_reg */
+ orl $PTI_SWITCH_MASK, \scratch_reg
+.Lend_\@:
+.endm
+
+.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
cld
PUSH_GS
pushl %fs
@@ -173,6 +220,29 @@
movl $(__KERNEL_PERCPU), %edx
movl %edx, %fs
SET_KERNEL_GS %edx
+
+ /* Switch to kernel stack if necessary */
+.if \switch_stacks > 0
+ SWITCH_TO_KERNEL_STACK
+.endif
+
+.endm
+
+.macro SAVE_ALL_NMI cr3_reg:req
+ SAVE_ALL
+
+ BUG_IF_WRONG_CR3
+
+ /*
+ * Now switch the CR3 when PTI is enabled.
+ *
+ * We can enter with either user or kernel cr3, the code will
+ * store the old cr3 in \cr3_reg and switches to the kernel cr3
+ * if necessary.
+ */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
+
+.Lend_\@:
.endm
/*
@@ -221,6 +291,349 @@
POP_GS_EX
.endm
+.macro RESTORE_ALL_NMI cr3_reg:req pop=0
+ /*
+ * Now switch the CR3 when PTI is enabled.
+ *
+ * We enter with kernel cr3 and switch the cr3 to the value
+ * stored on \cr3_reg, which is either a user or a kernel cr3.
+ */
+ ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
+
+ testl $PTI_SWITCH_MASK, \cr3_reg
+ jz .Lswitched_\@
+
+ /* User cr3 in \cr3_reg - write it to hardware cr3 */
+ movl \cr3_reg, %cr3
+
+.Lswitched_\@:
+
+ BUG_IF_WRONG_CR3
+
+ RESTORE_REGS pop=\pop
+.endm
+
+.macro CHECK_AND_APPLY_ESPFIX
+#ifdef CONFIG_X86_ESPFIX32
+#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+
+ ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
+
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
+ /*
+ * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+ * are returning to the kernel.
+ * See comments in process.c:copy_thread() for details.
+ */
+ movb PT_OLDSS(%esp), %ah
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
+ jne .Lend_\@ # returning to user-space with LDT SS
+
+ /*
+ * Setup and switch to ESPFIX stack
+ *
+ * We're returning to userspace with a 16 bit stack. The CPU will not
+ * restore the high word of ESP for us on executing iret... This is an
+ * "official" bug of all the x86-compatible CPUs, which we can work
+ * around to make dosemu and wine happy. We do this by preloading the
+ * high word of ESP with the high word of the userspace ESP while
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
+ shr $16, %edx
+ mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
+ pushl $__ESPFIX_SS
+ pushl %eax /* new kernel esp */
+ /*
+ * Disable interrupts, but do not irqtrace this section: we
+ * will soon execute iret and the tracer was already set to
+ * the irqstate after the IRET:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ lss (%esp), %esp /* switch to espfix segment */
+.Lend_\@:
+#endif /* CONFIG_X86_ESPFIX32 */
+.endm
+
+/*
+ * Called with pt_regs fully populated and kernel segments loaded,
+ * so we can access PER_CPU and use the integer registers.
+ *
+ * We need to be very careful here with the %esp switch, because an NMI
+ * can happen everywhere. If the NMI handler finds itself on the
+ * entry-stack, it will overwrite the task-stack and everything we
+ * copied there. So allocate the stack-frame on the task-stack and
+ * switch to it before we do any copying.
+ */
+
+#define CS_FROM_ENTRY_STACK (1 << 31)
+#define CS_FROM_USER_CR3 (1 << 30)
+
+.macro SWITCH_TO_KERNEL_STACK
+
+ ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+ BUG_IF_WRONG_CR3
+
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+
+ /*
+ * %eax now contains the entry cr3 and we carry it forward in
+ * that register for the time this macro runs
+ */
+
+ /* Are we on the entry stack? Bail out if not! */
+ movl PER_CPU_VAR(cpu_entry_area), %ecx
+ addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+ subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
+ cmpl $SIZEOF_entry_stack, %ecx
+ jae .Lend_\@
+
+ /* Load stack pointer into %esi and %edi */
+ movl %esp, %esi
+ movl %esi, %edi
+
+ /* Move %edi to the top of the entry stack */
+ andl $(MASK_entry_stack), %edi
+ addl $(SIZEOF_entry_stack), %edi
+
+ /* Load top of task-stack into %edi */
+ movl TSS_entry2task_stack(%edi), %edi
+
+ /*
+ * Clear unused upper bits of the dword containing the word-sized CS
+ * slot in pt_regs in case hardware didn't clear it for us.
+ */
+ andl $(0x0000ffff), PT_CS(%esp)
+
+ /* Special case - entry from kernel mode via entry stack */
+#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
+ movb PT_CS(%esp), %cl
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
+#else
+ movl PT_CS(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+#endif
+ cmpl $USER_RPL, %ecx
+ jb .Lentry_from_kernel_\@
+
+ /* Bytes to copy */
+ movl $PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
+ jz .Lcopy_pt_regs_\@
+
+ /*
+ * Stack-frame contains 4 additional segment registers when
+ * coming from VM86 mode
+ */
+ addl $(4 * 4), %ecx
+
+#endif
+.Lcopy_pt_regs_\@:
+
+ /* Allocate frame on task-stack */
+ subl %ecx, %edi
+
+ /* Switch to task-stack */
+ movl %edi, %esp
+
+ /*
+ * We are now on the task-stack and can safely copy over the
+ * stack-frame
+ */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ jmp .Lend_\@
+
+.Lentry_from_kernel_\@:
+
+ /*
+ * This handles the case when we enter the kernel from
+ * kernel-mode and %esp points to the entry-stack. When this
+ * happens we need to switch to the task-stack to run C code,
+ * but switch back to the entry-stack again when we approach
+ * iret and return to the interrupted code-path. This usually
+ * happens when we hit an exception while restoring user-space
+ * segment registers on the way back to user-space or when the
+ * sysenter handler runs with eflags.tf set.
+ *
+ * When we switch to the task-stack here, we can't trust the
+ * contents of the entry-stack anymore, as the exception handler
+ * might be scheduled out or moved to another CPU. Therefore we
+ * copy the complete entry-stack to the task-stack and set a
+ * marker in the iret-frame (bit 31 of the CS dword) to detect
+ * what we've done on the iret path.
+ *
+ * On the iret path we copy everything back and switch to the
+ * entry-stack, so that the interrupted kernel code-path
+ * continues on the same stack it was interrupted with.
+ *
+ * Be aware that an NMI can happen anytime in this code.
+ *
+ * %esi: Entry-Stack pointer (same as %esp)
+ * %edi: Top of the task stack
+ * %eax: CR3 on kernel entry
+ */
+
+ /* Calculate number of bytes on the entry stack in %ecx */
+ movl %esi, %ecx
+
+ /* %ecx to the top of entry-stack */
+ andl $(MASK_entry_stack), %ecx
+ addl $(SIZEOF_entry_stack), %ecx
+
+ /* Number of bytes on the entry stack to %ecx */
+ sub %esi, %ecx
+
+ /* Mark stackframe as coming from entry stack */
+ orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+
+ /*
+ * Test the cr3 used to enter the kernel and add a marker
+ * so that we can switch back to it before iret.
+ */
+ testl $PTI_SWITCH_MASK, %eax
+ jz .Lcopy_pt_regs_\@
+ orl $CS_FROM_USER_CR3, PT_CS(%esp)
+
+ /*
+ * %esi and %edi are unchanged, %ecx contains the number of
+ * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
+ * the stack-frame on task-stack and copy everything over
+ */
+ jmp .Lcopy_pt_regs_\@
+
+.Lend_\@:
+.endm
+
+/*
+ * Switch back from the kernel stack to the entry stack.
+ *
+ * The %esp register must point to pt_regs on the task stack. It will
+ * first calculate the size of the stack-frame to copy, depending on
+ * whether we return to VM86 mode or not. With that it uses 'rep movsl'
+ * to copy the contents of the stack over to the entry stack.
+ *
+ * We must be very careful here, as we can't trust the contents of the
+ * task-stack once we switched to the entry-stack. When an NMI happens
+ * while on the entry-stack, the NMI handler will switch back to the top
+ * of the task stack, overwriting our stack-frame we are about to copy.
+ * Therefore we switch the stack only after everything is copied over.
+ */
+.macro SWITCH_TO_ENTRY_STACK
+
+ ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+ /* Bytes to copy */
+ movl $PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+ testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
+ jz .Lcopy_pt_regs_\@
+
+ /* Additional 4 registers to copy when returning to VM86 mode */
+ addl $(4 * 4), %ecx
+
+.Lcopy_pt_regs_\@:
+#endif
+
+ /* Initialize source and destination for movsl */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+ subl %ecx, %edi
+ movl %esp, %esi
+
+ /* Save future stack pointer in %ebx */
+ movl %edi, %ebx
+
+ /* Copy over the stack-frame */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ /*
+ * Switch to entry-stack - needs to happen after everything is
+ * copied because the NMI handler will overwrite the task-stack
+ * when on entry-stack
+ */
+ movl %ebx, %esp
+
+.Lend_\@:
+.endm
+
+/*
+ * This macro handles the case when we return to kernel-mode on the iret
+ * path and have to switch back to the entry stack and/or user-cr3
+ *
+ * See the comments below the .Lentry_from_kernel_\@ label in the
+ * SWITCH_TO_KERNEL_STACK macro for more details.
+ */
+.macro PARANOID_EXIT_TO_KERNEL_MODE
+
+ /*
+ * Test if we entered the kernel with the entry-stack. Most
+ * likely we did not, because this code only runs on the
+ * return-to-kernel path.
+ */
+ testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+ jz .Lend_\@
+
+ /* Unlikely slow-path */
+
+ /* Clear marker from stack-frame */
+ andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
+
+ /* Copy the remaining task-stack contents to entry-stack */
+ movl %esp, %esi
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+
+ /* Bytes on the task-stack to ecx */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
+ subl %esi, %ecx
+
+ /* Allocate stack-frame on entry-stack */
+ subl %ecx, %edi
+
+ /*
+ * Save future stack-pointer, we must not switch until the
+ * copy is done, otherwise the NMI handler could destroy the
+ * contents of the task-stack we are about to copy.
+ */
+ movl %edi, %ebx
+
+ /* Do the copy */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ /* Safe to switch to entry-stack now */
+ movl %ebx, %esp
+
+ /*
+ * We came from entry-stack and need to check if we also need to
+ * switch back to user cr3.
+ */
+ testl $CS_FROM_USER_CR3, PT_CS(%esp)
+ jz .Lend_\@
+
+ /* Clear marker from stack-frame */
+ andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
+
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+.Lend_\@:
+.endm
/*
* %eax: prev task
* %edx: next task
@@ -351,9 +764,9 @@ ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
.Lneed_resched:
cmpl $0, PER_CPU_VAR(__preempt_count)
- jnz restore_all
+ jnz restore_all_kernel
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
+ jz restore_all_kernel
call preempt_schedule_irq
jmp .Lneed_resched
END(resume_kernel)
@@ -412,7 +825,21 @@ ENTRY(xen_sysenter_target)
* 0(%ebp) arg6
*/
ENTRY(entry_SYSENTER_32)
- movl TSS_sysenter_sp0(%esp), %esp
+ /*
+ * On entry-stack with all userspace-regs live - save and
+ * restore eflags and %eax to use it as scratch-reg for the cr3
+ * switch.
+ */
+ pushfl
+ pushl %eax
+ BUG_IF_WRONG_CR3 no_user_check=1
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+ popl %eax
+ popfl
+
+ /* Stack empty again, switch to task stack */
+ movl TSS_entry2task_stack(%esp), %esp
+
.Lsysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
@@ -421,7 +848,7 @@ ENTRY(entry_SYSENTER_32)
pushl $__USER_CS /* pt_regs->cs */
pushl $0 /* pt_regs->ip = 0 (placeholder) */
pushl %eax /* pt_regs->orig_ax */
- SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
+ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
/*
* SYSENTER doesn't filter flags, so we need to clear NT, AC
@@ -460,25 +887,49 @@ ENTRY(entry_SYSENTER_32)
/* Opportunistic SYSEXIT */
TRACE_IRQS_ON /* User mode traces as IRQs on. */
+
+ /*
+ * Setup entry stack - we keep the pointer in %eax and do the
+ * switch after almost all user-state is restored.
+ */
+
+ /* Load entry stack pointer and allocate frame for eflags/eax */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
+ subl $(2*4), %eax
+
+ /* Copy eflags and eax to entry stack */
+ movl PT_EFLAGS(%esp), %edi
+ movl PT_EAX(%esp), %esi
+ movl %edi, (%eax)
+ movl %esi, 4(%eax)
+
+ /* Restore user registers and segments */
movl PT_EIP(%esp), %edx /* pt_regs->ip */
movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
1: mov PT_FS(%esp), %fs
PTGS_TO_GS
+
popl %ebx /* pt_regs->bx */
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
popl %esi /* pt_regs->si */
popl %edi /* pt_regs->di */
popl %ebp /* pt_regs->bp */
- popl %eax /* pt_regs->ax */
+
+ /* Switch to entry stack */
+ movl %eax, %esp
+
+ /* Now ready to switch the cr3 */
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
/*
* Restore all flags except IF. (We restore IF separately because
* STI gives a one-instruction window in which we won't be interrupted,
* whereas POPF does not.)
*/
- addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
btrl $X86_EFLAGS_IF_BIT, (%esp)
+ BUG_IF_WRONG_CR3 no_user_check=1
popfl
+ popl %eax
/*
* Return back to the vDSO, which will pop ecx and edx.
@@ -532,7 +983,8 @@ ENDPROC(entry_SYSENTER_32)
ENTRY(entry_INT80_32)
ASM_CLAC
pushl %eax /* pt_regs->orig_ax */
- SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
+
+ SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
/*
* User mode is traced as though IRQs are on, and the interrupt gate
@@ -546,24 +998,17 @@ ENTRY(entry_INT80_32)
restore_all:
TRACE_IRQS_IRET
+ SWITCH_TO_ENTRY_STACK
.Lrestore_all_notrace:
-#ifdef CONFIG_X86_ESPFIX32
- ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
-
- movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
- /*
- * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
- * are returning to the kernel.
- * See comments in process.c:copy_thread() for details.
- */
- movb PT_OLDSS(%esp), %ah
- movb PT_CS(%esp), %al
- andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
- cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
- je .Lldt_ss # returning to user-space with LDT SS
-#endif
+ CHECK_AND_APPLY_ESPFIX
.Lrestore_nocheck:
- RESTORE_REGS 4 # skip orig_eax/error_code
+ /* Switch back to user CR3 */
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+ BUG_IF_WRONG_CR3
+
+ /* Restore user state */
+ RESTORE_REGS pop=4 # skip orig_eax/error_code
.Lirq_return:
/*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -572,46 +1017,33 @@ restore_all:
*/
INTERRUPT_RETURN
+restore_all_kernel:
+ TRACE_IRQS_IRET
+ PARANOID_EXIT_TO_KERNEL_MODE
+ BUG_IF_WRONG_CR3
+ RESTORE_REGS 4
+ jmp .Lirq_return
+
.section .fixup, "ax"
ENTRY(iret_exc )
pushl $0 # no error code
pushl $do_iret_error
- jmp common_exception
-.previous
- _ASM_EXTABLE(.Lirq_return, iret_exc)
-#ifdef CONFIG_X86_ESPFIX32
-.Lldt_ss:
-/*
- * Setup and switch to ESPFIX stack
- *
- * We're returning to userspace with a 16 bit stack. The CPU will not
- * restore the high word of ESP for us on executing iret... This is an
- * "official" bug of all the x86-compatible CPUs, which we can work
- * around to make dosemu and wine happy. We do this by preloading the
- * high word of ESP with the high word of the userspace ESP while
- * compensating for the offset by changing to the ESPFIX segment with
- * a base address that matches for the difference.
- */
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
- mov %esp, %edx /* load kernel esp */
- mov PT_OLDESP(%esp), %eax /* load userspace esp */
- mov %dx, %ax /* eax: new kernel esp */
- sub %eax, %edx /* offset (low word is 0) */
- shr $16, %edx
- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
- pushl $__ESPFIX_SS
- pushl %eax /* new kernel esp */
+#ifdef CONFIG_DEBUG_ENTRY
/*
- * Disable interrupts, but do not irqtrace this section: we
- * will soon execute iret and the tracer was already set to
- * the irqstate after the IRET:
+ * The stack-frame here is the one that iret faulted on, so its a
+ * return-to-user frame. We are on kernel-cr3 because we come here from
+ * the fixup code. This confuses the CR3 checker, so switch to user-cr3
+ * as the checker expects it.
*/
- DISABLE_INTERRUPTS(CLBR_ANY)
- lss (%esp), %esp /* switch to espfix segment */
- jmp .Lrestore_nocheck
+ pushl %eax
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+ popl %eax
#endif
+
+ jmp common_exception
+.previous
+ _ASM_EXTABLE(.Lirq_return, iret_exc)
ENDPROC(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK
@@ -671,7 +1103,8 @@ END(irq_entries_start)
common_interrupt:
ASM_CLAC
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
- SAVE_ALL
+
+ SAVE_ALL switch_stacks=1
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
movl %esp, %eax
@@ -679,16 +1112,16 @@ common_interrupt:
jmp ret_from_intr
ENDPROC(common_interrupt)
-#define BUILD_INTERRUPT3(name, nr, fn) \
-ENTRY(name) \
- ASM_CLAC; \
- pushl $~(nr); \
- SAVE_ALL; \
- ENCODE_FRAME_POINTER; \
- TRACE_IRQS_OFF \
- movl %esp, %eax; \
- call fn; \
- jmp ret_from_intr; \
+#define BUILD_INTERRUPT3(name, nr, fn) \
+ENTRY(name) \
+ ASM_CLAC; \
+ pushl $~(nr); \
+ SAVE_ALL switch_stacks=1; \
+ ENCODE_FRAME_POINTER; \
+ TRACE_IRQS_OFF \
+ movl %esp, %eax; \
+ call fn; \
+ jmp ret_from_intr; \
ENDPROC(name)
#define BUILD_INTERRUPT(name, nr) \
@@ -920,16 +1353,20 @@ common_exception:
pushl %es
pushl %ds
pushl %eax
+ movl $(__USER_DS), %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl $(__KERNEL_PERCPU), %eax
+ movl %eax, %fs
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
+ SWITCH_TO_KERNEL_STACK
ENCODE_FRAME_POINTER
cld
- movl $(__KERNEL_PERCPU), %ecx
- movl %ecx, %fs
UNWIND_ESPFIX_STACK
GS_TO_REG %ecx
movl PT_GS(%esp), %edi # get the function address
@@ -937,9 +1374,6 @@ common_exception:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
- movl $(__USER_DS), %ecx
- movl %ecx, %ds
- movl %ecx, %es
TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
CALL_NOSPEC %edi
@@ -948,40 +1382,12 @@ END(common_exception)
ENTRY(debug)
/*
- * #DB can happen at the first instruction of
- * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
- * happens, then we will be running on a very small stack. We
- * need to detect this condition and switch to the thread
- * stack before calling any C code at all.
- *
- * If you edit this code, keep in mind that NMIs can happen in here.
+ * Entry from sysenter is now handled in common_exception
*/
ASM_CLAC
pushl $-1 # mark this as an int
- SAVE_ALL
- ENCODE_FRAME_POINTER
- xorl %edx, %edx # error code 0
- movl %esp, %eax # pt_regs pointer
-
- /* Are we currently on the SYSENTER stack? */
- movl PER_CPU_VAR(cpu_entry_area), %ecx
- addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
- subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
- cmpl $SIZEOF_entry_stack, %ecx
- jb .Ldebug_from_sysenter_stack
-
- TRACE_IRQS_OFF
- call do_debug
- jmp ret_from_exception
-
-.Ldebug_from_sysenter_stack:
- /* We're on the SYSENTER stack. Switch off. */
- movl %esp, %ebx
- movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
- TRACE_IRQS_OFF
- call do_debug
- movl %ebx, %esp
- jmp ret_from_exception
+ pushl $do_debug
+ jmp common_exception
END(debug)
/*
@@ -993,6 +1399,7 @@ END(debug)
*/
ENTRY(nmi)
ASM_CLAC
+
#ifdef CONFIG_X86_ESPFIX32
pushl %eax
movl %ss, %eax
@@ -1002,7 +1409,7 @@ ENTRY(nmi)
#endif
pushl %eax # pt_regs->orig_ax
- SAVE_ALL
+ SAVE_ALL_NMI cr3_reg=%edi
ENCODE_FRAME_POINTER
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
@@ -1016,7 +1423,7 @@ ENTRY(nmi)
/* Not on SYSENTER stack. */
call do_nmi
- jmp .Lrestore_all_notrace
+ jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
/*
@@ -1027,7 +1434,11 @@ ENTRY(nmi)
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
call do_nmi
movl %ebx, %esp
- jmp .Lrestore_all_notrace
+
+.Lnmi_return:
+ CHECK_AND_APPLY_ESPFIX
+ RESTORE_ALL_NMI cr3_reg=%edi pop=4
+ jmp .Lirq_return
#ifdef CONFIG_X86_ESPFIX32
.Lnmi_espfix_stack:
@@ -1042,12 +1453,12 @@ ENTRY(nmi)
pushl 16(%esp)
.endr
pushl %eax
- SAVE_ALL
+ SAVE_ALL_NMI cr3_reg=%edi
ENCODE_FRAME_POINTER
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx, %edx # zero error code
call do_nmi
- RESTORE_REGS
+ RESTORE_ALL_NMI cr3_reg=%edi
lss 12+4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
@@ -1056,7 +1467,8 @@ END(nmi)
ENTRY(int3)
ASM_CLAC
pushl $-1 # mark this as an int
- SAVE_ALL
+
+ SAVE_ALL switch_stacks=1
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
xorl %edx, %edx # zero error code
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 8ae7ffda8f98..957dfb693ecc 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -92,7 +92,7 @@ END(native_usergs_sysret64)
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
- bt $9, EFLAGS(%rsp) /* interrupts off? */
+ btl $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
@@ -408,6 +408,7 @@ ENTRY(ret_from_fork)
1:
/* kernel thread */
+ UNWIND_HINT_EMPTY
movq %r12, %rdi
CALL_NOSPEC %rbx
/*
@@ -701,7 +702,7 @@ retint_kernel:
#ifdef CONFIG_PREEMPT
/* Interrupts are off */
/* Check if we need preemption */
- bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 261802b1cc50..9f695f517747 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -46,10 +46,8 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
- -Wl,--no-undefined \
- -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
- $(DISABLE_LTO)
+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+ -z max-page-size=4096 -z common-page-size=4096
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
@@ -58,9 +56,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(src
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
-define cmd_vdso2c
- $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+ cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
$(call if_changed,vdso2c)
@@ -95,10 +91,8 @@ CFLAGS_REMOVE_vvar.o = -pg
#
CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
- -Wl,-soname=linux-vdso.so.1 \
- -Wl,-z,max-page-size=4096 \
- -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
+ -z max-page-size=4096 -z common-page-size=4096
# x32-rebranded versions
vobjx32s-y := $(vobjs-y:.o=-x32.o)
@@ -123,7 +117,7 @@ $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE
$(call if_changed,vdso)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
targets += vdso32/vdso32.lds
targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
@@ -157,13 +151,13 @@ $(obj)/vdso32.so.dbg: FORCE \
# The DSO images are built using a special linker script.
#
quiet_cmd_vdso = VDSO $@
- cmd_vdso = $(CC) -nostdlib -o $@ \
+ cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ -T $(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
+ $(call ld-option, --build-id) -Bsymbolic
GCOV_PROFILE := n
#
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 86f0c15dcc2d..035c37481f57 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_disable(event);
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
}
x86_pmu_disable_event(event);
-
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_disable(event);
}
static void intel_pmu_del_event(struct perf_event *event)
@@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event)
x86_perf_event_update(event);
}
-static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_enable_fixed(struct perf_event *event)
{
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
- u64 ctrl_val, bits, mask;
+ u64 ctrl_val, mask, bits = 0;
/*
- * Enable IRQ generation (0x8),
+ * Enable IRQ generation (0x8), if not PEBS,
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
* if requested:
*/
- bits = 0x8ULL;
+ if (!event->attr.precise_ip)
+ bits |= 0x8;
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
bits |= 0x2;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
@@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (unlikely(event_is_checkpointed(event)))
cpuc->intel_cp_status |= (1ull << hwc->idx);
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_enable(event);
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
- intel_pmu_enable_fixed(hwc);
+ intel_pmu_enable_fixed(event);
return;
}
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_enable(event);
-
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
@@ -2280,7 +2282,10 @@ again:
* counters from the GLOBAL_STATUS mask and we always process PEBS
* events via drain_pebs().
*/
- status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ status &= ~cpuc->pebs_enabled;
+ else
+ status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
/*
* PEBS overflow sets bit 62 in the global status register
@@ -4072,7 +4077,6 @@ __init int intel_pmu_init(void)
intel_pmu_lbr_init_skl();
x86_pmu.event_constraints = intel_slm_event_constraints;
- x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
x86_pmu.extra_regs = intel_glm_extra_regs;
/*
* It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
@@ -4082,6 +4086,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_prec_dist = true;
x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu.get_event_constraints = glp_get_event_constraints;
x86_pmu.cpu_events = glm_events_attrs;
/* Goldmont Plus has 4-wide pipeline */
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8dbba77e0518..b7b01d762d32 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -713,12 +713,6 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
-struct event_constraint intel_glp_pebs_event_constraints[] = {
- /* Allow all events as PEBS with no flags */
- INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
- EVENT_CONSTRAINT_END
-};
-
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@@ -871,6 +865,13 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
}
}
+ /*
+ * Extended PEBS support
+ * Makes the PEBS code search the normal constraints.
+ */
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ return NULL;
+
return &emptyconstraint;
}
@@ -896,10 +897,16 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
u64 threshold;
+ int reserved;
+
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
+ else
+ reserved = x86_pmu.max_pebs_events;
if (cpuc->n_pebs == cpuc->n_large_pebs) {
threshold = ds->pebs_absolute_maximum -
- x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
+ reserved * x86_pmu.pebs_record_size;
} else {
threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
}
@@ -963,7 +970,11 @@ void intel_pmu_pebs_enable(struct perf_event *event)
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
*/
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
- ds->pebs_event_reset[hwc->idx] =
+ unsigned int idx = hwc->idx;
+
+ if (idx >= INTEL_PMC_IDX_FIXED)
+ idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
+ ds->pebs_event_reset[idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
ds->pebs_event_reset[hwc->idx] = 0;
@@ -1481,9 +1492,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct debug_store *ds = cpuc->ds;
struct perf_event *event;
void *base, *at, *top;
- short counts[MAX_PEBS_EVENTS] = {};
- short error[MAX_PEBS_EVENTS] = {};
- int bit, i;
+ short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ int bit, i, size;
+ u64 mask;
if (!x86_pmu.pebs_active)
return;
@@ -1493,6 +1505,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
ds->pebs_index = ds->pebs_buffer_base;
+ mask = (1ULL << x86_pmu.max_pebs_events) - 1;
+ size = x86_pmu.max_pebs_events;
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
+ mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
+ size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+ }
+
if (unlikely(base >= top)) {
/*
* The drain_pebs() could be called twice in a short period
@@ -1502,7 +1521,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* update the event->count for this case.
*/
for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
- x86_pmu.max_pebs_events) {
+ size) {
event = cpuc->events[bit];
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
intel_pmu_save_and_restart_reload(event, 0);
@@ -1515,12 +1534,12 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
u64 pebs_status;
pebs_status = p->status & cpuc->pebs_enabled;
- pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+ pebs_status &= mask;
/* PEBS v3 has more accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3) {
for_each_set_bit(bit, (unsigned long *)&pebs_status,
- x86_pmu.max_pebs_events)
+ size)
counts[bit]++;
continue;
@@ -1568,7 +1587,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
counts[bit]++;
}
- for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
+ for (bit = 0; bit < size; bit++) {
if ((counts[bit] == 0) && (error[bit] == 0))
continue;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index cf372b90557e..f3e006bed9a7 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -216,6 +216,8 @@ static void intel_pmu_lbr_reset_64(void)
void intel_pmu_lbr_reset(void)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
if (!x86_pmu.lbr_nr)
return;
@@ -223,6 +225,9 @@ void intel_pmu_lbr_reset(void)
intel_pmu_lbr_reset_32();
else
intel_pmu_lbr_reset_64();
+
+ cpuc->last_task_ctx = NULL;
+ cpuc->last_log_id = 0;
}
/*
@@ -334,6 +339,7 @@ static inline u64 rdlbr_to(unsigned int idx)
static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
unsigned lbr_idx, mask;
u64 tos;
@@ -344,9 +350,21 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
return;
}
- mask = x86_pmu.lbr_nr - 1;
tos = task_ctx->tos;
- for (i = 0; i < tos; i++) {
+ /*
+ * Does not restore the LBR registers, if
+ * - No one else touched them, and
+ * - Did not enter C6
+ */
+ if ((task_ctx == cpuc->last_task_ctx) &&
+ (task_ctx->log_id == cpuc->last_log_id) &&
+ rdlbr_from(tos)) {
+ task_ctx->lbr_stack_state = LBR_NONE;
+ return;
+ }
+
+ mask = x86_pmu.lbr_nr - 1;
+ for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
@@ -354,14 +372,24 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+
+ for (; i < x86_pmu.lbr_nr; i++) {
+ lbr_idx = (tos - i) & mask;
+ wrlbr_from(lbr_idx, 0);
+ wrlbr_to(lbr_idx, 0);
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+ wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+ }
+
wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
}
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned lbr_idx, mask;
- u64 tos;
+ u64 tos, from;
int i;
if (task_ctx->lbr_callstack_users == 0) {
@@ -371,15 +399,22 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
- for (i = 0; i < tos; i++) {
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
- task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+ from = rdlbr_from(lbr_idx);
+ if (!from)
+ break;
+ task_ctx->lbr_from[i] = from;
task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+ task_ctx->valid_lbrs = i;
task_ctx->tos = tos;
task_ctx->lbr_stack_state = LBR_VALID;
+
+ cpuc->last_task_ctx = task_ctx;
+ cpuc->last_log_id = ++task_ctx->log_id;
}
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
@@ -531,7 +566,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
*/
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{
- bool need_info = false;
+ bool need_info = false, call_stack = false;
unsigned long mask = x86_pmu.lbr_nr - 1;
int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos();
@@ -542,7 +577,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
if (cpuc->lbr_sel) {
need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
if (cpuc->lbr_sel->config & LBR_CALL_STACK)
- num = tos;
+ call_stack = true;
}
for (i = 0; i < num; i++) {
@@ -555,6 +590,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
from = rdlbr_from(lbr_idx);
to = rdlbr_to(lbr_idx);
+ /*
+ * Read LBR call stack entries
+ * until invalid entry (0s) is detected.
+ */
+ if (call_stack && !from)
+ break;
+
if (lbr_format == LBR_FORMAT_INFO && need_info) {
u64 info;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 9f3711470ec1..156286335351 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -163,6 +163,7 @@ struct intel_excl_cntrs {
unsigned core_id; /* per-core: core id */
};
+struct x86_perf_task_context;
#define MAX_LBR_ENTRIES 32
enum {
@@ -214,6 +215,8 @@ struct cpu_hw_events {
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
struct er_account *lbr_sel;
u64 br_sel;
+ struct x86_perf_task_context *last_task_ctx;
+ int last_log_id;
/*
* Intel host/guest exclude bits
@@ -648,8 +651,10 @@ struct x86_perf_task_context {
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
int tos;
+ int valid_lbrs;
int lbr_callstack_users;
int lbr_stack_state;
+ int log_id;
};
#define x86_add_quirk(func_) \
@@ -668,6 +673,7 @@ do { \
#define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
+#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 402338365651..5b0f613428c2 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -31,6 +31,8 @@
#include <asm/mshyperv.h>
#include <asm/apic.h>
+#include <asm/trace/hyperv.h>
+
static struct apic orig_apic;
static u64 hv_apic_icr_read(void)
@@ -99,6 +101,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
int nr_bank = 0;
int ret = 1;
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+
local_irq_save(flags);
arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -130,10 +135,10 @@ ipi_mask_ex_done:
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
- struct ipi_arg_non_ex **arg;
- struct ipi_arg_non_ex *ipi_arg;
+ struct ipi_arg_non_ex ipi_arg;
int ret = 1;
- unsigned long flags;
+
+ trace_hyperv_send_ipi_mask(mask, vector);
if (cpumask_empty(mask))
return true;
@@ -144,40 +149,43 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return false;
- if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- return __send_ipi_mask_ex(mask, vector);
-
- local_irq_save(flags);
- arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
-
- ipi_arg = *arg;
- if (unlikely(!ipi_arg))
- goto ipi_mask_done;
-
- ipi_arg->vector = vector;
- ipi_arg->reserved = 0;
- ipi_arg->cpu_mask = 0;
+ /*
+ * From the supplied CPU set we need to figure out if we can get away
+ * with cheaper HVCALL_SEND_IPI hypercall. This is possible when the
+ * highest VP number in the set is < 64. As VP numbers are usually in
+ * ascending order and match Linux CPU ids, here is an optimization:
+ * we check the VP number for the highest bit in the supplied set first
+ * so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is
+ * a must. We will also check all VP numbers when walking the supplied
+ * CPU set to remain correct in all cases.
+ */
+ if (hv_cpu_number_to_vp_number(cpumask_last(mask)) >= 64)
+ goto do_ex_hypercall;
+
+ ipi_arg.vector = vector;
+ ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL)
- goto ipi_mask_done;
+ return false;
/*
* This particular version of the IPI hypercall can
* only target upto 64 CPUs.
*/
if (vcpu >= 64)
- goto ipi_mask_done;
+ goto do_ex_hypercall;
- __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask);
+ __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
}
- ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL);
-
-ipi_mask_done:
- local_irq_restore(flags);
+ ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
+ ipi_arg.cpu_mask);
return ((ret == 0) ? true : false);
+
+do_ex_hypercall:
+ return __send_ipi_mask_ex(mask, vector);
}
static bool __send_ipi_one(int cpu, int vector)
@@ -233,10 +241,7 @@ static void hv_send_ipi_self(int vector)
void __init hv_apic_init(void)
{
if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
- if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- pr_info("Hyper-V: Using ext hypercalls for IPI\n");
- else
- pr_info("Hyper-V: Using IPI hypercalls\n");
+ pr_info("Hyper-V: Using IPI hypercalls\n");
/*
* Set the IPI entry points.
*/
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index de27615c51ea..1147e1fed7ff 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -16,6 +16,8 @@
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info);
/*
* Fills in gva_list starting from offset. Returns the number of items added.
@@ -93,10 +95,29 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
if (cpumask_equal(cpus, cpu_present_mask)) {
flush->flags |= HV_FLUSH_ALL_PROCESSORS;
} else {
+ /*
+ * From the supplied CPU set we need to figure out if we can get
+ * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
+ * hypercalls. This is possible when the highest VP number in
+ * the set is < 64. As VP numbers are usually in ascending order
+ * and match Linux CPU ids, here is an optimization: we check
+ * the VP number for the highest bit in the supplied set first
+ * so we can quickly find out if using *_EX hypercalls is a
+ * must. We will also check all VP numbers when walking the
+ * supplied CPU set to remain correct in all cases.
+ */
+ if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64)
+ goto do_ex_hypercall;
+
for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu);
- if (vcpu >= 64)
+ if (vcpu == VP_INVAL) {
+ local_irq_restore(flags);
goto do_native;
+ }
+
+ if (vcpu >= 64)
+ goto do_ex_hypercall;
__set_bit(vcpu, (unsigned long *)
&flush->processor_mask);
@@ -123,7 +144,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
gva_n, 0, flush, NULL);
}
+ goto check_status;
+
+do_ex_hypercall:
+ status = hyperv_flush_tlb_others_ex(cpus, info);
+check_status:
local_irq_restore(flags);
if (!(status & HV_HYPERCALL_RESULT_MASK))
@@ -132,35 +158,22 @@ do_native:
native_flush_tlb_others(cpus, info);
}
-static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
- const struct flush_tlb_info *info)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info)
{
int nr_bank = 0, max_gvas, gva_n;
struct hv_tlb_flush_ex **flush_pcpu;
struct hv_tlb_flush_ex *flush;
- u64 status = U64_MAX;
- unsigned long flags;
+ u64 status;
- trace_hyperv_mmu_flush_tlb_others(cpus, info);
-
- if (!hv_hypercall_pg)
- goto do_native;
-
- if (cpumask_empty(cpus))
- return;
-
- local_irq_save(flags);
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return U64_MAX;
flush_pcpu = (struct hv_tlb_flush_ex **)
this_cpu_ptr(hyperv_pcpu_input_arg);
flush = *flush_pcpu;
- if (unlikely(!flush)) {
- local_irq_restore(flags);
- goto do_native;
- }
-
if (info->mm) {
/*
* AddressSpace argument must match the CR3 with PCID bits
@@ -176,15 +189,10 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->hv_vp_set.valid_bank_mask = 0;
- if (!cpumask_equal(cpus, cpu_present_mask)) {
- flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
- }
-
- if (!nr_bank) {
- flush->hv_vp_set.format = HV_GENERIC_SET_ALL;
- flush->flags |= HV_FLUSH_ALL_PROCESSORS;
- }
+ flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
+ if (nr_bank < 0)
+ return U64_MAX;
/*
* We can flush not more than max_gvas with one hypercall. Flush the
@@ -213,12 +221,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
gva_n, nr_bank, flush, NULL);
}
- local_irq_restore(flags);
-
- if (!(status & HV_HYPERCALL_RESULT_MASK))
- return;
-do_native:
- native_flush_tlb_others(cpus, info);
+ return status;
}
void hyperv_setup_mmu_ops(void)
@@ -226,11 +229,6 @@ void hyperv_setup_mmu_ops(void)
if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
return;
- if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
- pr_info("Using hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
- } else {
- pr_info("Using ext hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
- }
+ pr_info("Using hypercall for remote TLB flush\n");
+ pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
}
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 0db6bec95489..b143717b92b3 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -80,6 +80,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
@@ -91,6 +92,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
*
* Atomically increments @v by 1.
*/
+#define arch_atomic_inc arch_atomic_inc
static __always_inline void arch_atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
@@ -103,6 +105,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic_dec arch_atomic_dec
static __always_inline void arch_atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
@@ -117,6 +120,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
@@ -130,6 +134,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
@@ -144,6 +149,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
+#define arch_atomic_add_negative arch_atomic_add_negative
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
@@ -173,9 +179,6 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
return arch_atomic_add_return(-i, v);
}
-#define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v))
-#define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v))
-
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
@@ -199,7 +202,7 @@ static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
- return xchg(&v->counter, new);
+ return arch_xchg(&v->counter, new);
}
static inline void arch_atomic_and(int i, atomic_t *v)
@@ -253,27 +256,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
return val;
}
-/**
- * __arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns the old value of @v.
- */
-static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 92212bf0484f..ef959f02d070 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -158,6 +158,7 @@ static inline long long arch_atomic64_inc_return(atomic64_t *v)
"S" (v) : "memory", "ecx");
return a;
}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
static inline long long arch_atomic64_dec_return(atomic64_t *v)
{
@@ -166,6 +167,7 @@ static inline long long arch_atomic64_dec_return(atomic64_t *v)
"S" (v) : "memory", "ecx");
return a;
}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
/**
* arch_atomic64_add - add integer to atomic64 variable
@@ -198,25 +200,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
}
/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer to type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static inline int arch_atomic64_sub_and_test(long long i, atomic64_t *v)
-{
- return arch_atomic64_sub_return(i, v) == 0;
-}
-
-/**
* arch_atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
+#define arch_atomic64_inc arch_atomic64_inc
static inline void arch_atomic64_inc(atomic64_t *v)
{
__alternative_atomic64(inc, inc_return, /* no output */,
@@ -229,6 +218,7 @@ static inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic64_dec arch_atomic64_dec
static inline void arch_atomic64_dec(atomic64_t *v)
{
__alternative_atomic64(dec, dec_return, /* no output */,
@@ -236,46 +226,6 @@ static inline void arch_atomic64_dec(atomic64_t *v)
}
/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer to type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static inline int arch_atomic64_dec_and_test(atomic64_t *v)
-{
- return arch_atomic64_dec_return(v) == 0;
-}
-
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer to type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static inline int arch_atomic64_inc_and_test(atomic64_t *v)
-{
- return arch_atomic64_inc_return(v) == 0;
-}
-
-/**
- * arch_atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer to type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static inline int arch_atomic64_add_negative(long long i, atomic64_t *v)
-{
- return arch_atomic64_add_return(i, v) < 0;
-}
-
-/**
* arch_atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
@@ -295,7 +245,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
return (int)a;
}
-
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
int r;
@@ -304,6 +254,7 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
return r;
}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
{
long long r;
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 6106b59d3260..4343d9b4f30e 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -71,6 +71,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
@@ -82,6 +83,7 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
*
* Atomically increments @v by 1.
*/
+#define arch_atomic64_inc arch_atomic64_inc
static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
@@ -95,6 +97,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic64_dec arch_atomic64_dec
static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
@@ -110,6 +113,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
@@ -123,6 +127,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
@@ -137,6 +142,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
+#define arch_atomic64_add_negative arch_atomic64_add_negative
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
@@ -169,9 +175,6 @@ static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
return xadd(&v->counter, -i);
}
-#define arch_atomic64_inc_return(v) (arch_atomic64_add_return(1, (v)))
-#define arch_atomic64_dec_return(v) (arch_atomic64_sub_return(1, (v)))
-
static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
return arch_cmpxchg(&v->counter, old, new);
@@ -185,46 +188,7 @@ static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, l
static inline long arch_atomic64_xchg(atomic64_t *v, long new)
{
- return xchg(&v->counter, new);
-}
-
-/**
- * arch_atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- s64 c = arch_atomic64_read(v);
- do {
- if (unlikely(c == u))
- return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
- return true;
-}
-
-#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
-
-/*
- * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = arch_atomic64_read(v);
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
- return dec;
+ return arch_xchg(&v->counter, new);
}
static inline void arch_atomic64_and(long i, atomic64_t *v)
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index e3efd8a06066..a55d79b233d3 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -75,7 +75,7 @@ extern void __add_wrong_size(void)
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
* information around.
*/
-#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
+#define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index bfca3b346c74..072e5459fe2f 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -10,13 +10,13 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 5701f5cecd31..b5c60faf8429 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -219,6 +219,7 @@
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_IBRS_ENHANCED ( 7*32+29) /* Enhanced IBRS */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -229,7 +230,7 @@
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
-
+#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index f59c39835a5a..a1f0e90d0818 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -49,11 +49,14 @@ static inline int hw_breakpoint_slots(int type)
return HBP_NUM;
}
+struct perf_event_attr;
struct perf_event;
struct pmu;
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index cf090e584202..7ed08a7c3398 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -76,4 +76,17 @@
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
+/* Useful macros */
+#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
+{ \
+ .vendor = X86_VENDOR_INTEL, \
+ .family = _family, \
+ .model = _model, \
+ .feature = X86_FEATURE_ANY, \
+ .driver_data = (kernel_ulong_t)&_driver_data \
+}
+
+#define INTEL_CPU_FAM6(_model, _driver_data) \
+ INTEL_CPU_FAM_ANY(6, INTEL_FAM6_##_model, _driver_data)
+
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index fe04491130ae..52f815a80539 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -80,35 +80,6 @@ enum intel_mid_cpu_type {
extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
-/**
- * struct intel_mid_ops - Interface between intel-mid & sub archs
- * @arch_setup: arch_setup function to re-initialize platform
- * structures (x86_init, x86_platform_init)
- *
- * This structure can be extended if any new interface is required
- * between intel-mid & its sub arch files.
- */
-struct intel_mid_ops {
- void (*arch_setup)(void);
-};
-
-/* Helper API's for INTEL_MID_OPS_INIT */
-#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
- [cpuid] = get_##cpuname##_ops
-
-/* Maximum number of CPU ops */
-#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
-
-/*
- * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
- * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
- */
-#define INTEL_MID_OPS_INIT { \
- DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
- DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
- DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
-};
-
#ifdef CONFIG_X86_INTEL_MID
static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
@@ -136,20 +107,6 @@ enum intel_mid_timer_options {
extern enum intel_mid_timer_options intel_mid_timer_options;
-/*
- * Penwell uses spread spectrum clock, so the freq number is not exactly
- * the same as reported by MSR based on SDM.
- */
-#define FSB_FREQ_83SKU 83200
-#define FSB_FREQ_100SKU 99840
-#define FSB_FREQ_133SKU 133000
-
-#define FSB_FREQ_167SKU 167000
-#define FSB_FREQ_200SKU 200000
-#define FSB_FREQ_267SKU 267000
-#define FSB_FREQ_333SKU 333000
-#define FSB_FREQ_400SKU 400000
-
/* Bus Select SoC Fuse value */
#define BSEL_SOC_FUSE_MASK 0x7
/* FSB 133MHz */
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
index 62a9f4966b42..ae26df1c2789 100644
--- a/arch/x86/include/asm/intel_ds.h
+++ b/arch/x86/include/asm/intel_ds.h
@@ -8,6 +8,7 @@
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 8
+#define MAX_FIXED_PEBS_EVENTS 3
/*
* A debug store configuration.
@@ -23,7 +24,7 @@ struct debug_store {
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
- u64 pebs_event_reset[MAX_PEBS_EVENTS];
+ u64 pebs_event_reset[MAX_PEBS_EVENTS + MAX_FIXED_PEBS_EVENTS];
} __aligned(PAGE_SIZE);
DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c4fc17220df9..c14f2a74b2be 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -13,6 +13,8 @@
* Interrupt control:
*/
+/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
+extern inline unsigned long native_save_fl(void);
extern inline unsigned long native_save_fl(void)
{
unsigned long flags;
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 367d99cff426..c8cec1b39b88 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -78,7 +78,7 @@ struct arch_specific_insn {
* boostable = true: This instruction has been boosted: we have
* added a relative jump after the instruction copy in insn,
* so no single-step and fixup are needed (unless there's
- * a post_handler or break_handler).
+ * a post_handler).
*/
bool boostable;
bool if_modifier;
@@ -111,9 +111,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_old_flags;
unsigned long kprobe_saved_flags;
- unsigned long *jprobe_saved_sp;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
deleted file mode 100644
index 46185263d9c2..000000000000
--- a/arch/x86/include/asm/kvm_guest.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_KVM_GUEST_H
-#define _ASM_X86_KVM_GUEST_H
-
-int kvm_setup_vsyscall_timeinfo(void);
-
-#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 3aea2658323a..4c723632c036 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -7,7 +7,6 @@
#include <uapi/asm/kvm_para.h>
extern void kvmclock_init(void);
-extern int kvm_register_clock(char *txt);
#ifdef CONFIG_KVM_GUEST
bool kvm_check_and_clear_guest_paused(void);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index bbc796eb0a3b..eeeb9289c764 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -71,12 +71,7 @@ struct ldt_struct {
static inline void *ldt_slot_va(int slot)
{
-#ifdef CONFIG_X86_64
return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
-#else
- BUG();
- return (void *)fix_to_virt(FIX_HOLE);
-#endif
}
/*
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 5a7375ed5f7c..19886fef1dfc 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -194,6 +194,40 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
return hv_status;
}
+/* Fast hypercall with 16 bytes of input */
+static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
+{
+ u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
+
+#ifdef CONFIG_X86_64
+ {
+ __asm__ __volatile__("mov %4, %%r8\n"
+ CALL_NOSPEC
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ "+c" (control), "+d" (input1)
+ : "r" (input2),
+ THUNK_TARGET(hv_hypercall_pg)
+ : "cc", "r8", "r9", "r10", "r11");
+ }
+#else
+ {
+ u32 input1_hi = upper_32_bits(input1);
+ u32 input1_lo = lower_32_bits(input1);
+ u32 input2_hi = upper_32_bits(input2);
+ u32 input2_lo = lower_32_bits(input2);
+
+ __asm__ __volatile__ (CALL_NOSPEC
+ : "=A"(hv_status),
+ "+c"(input1_lo), ASM_CALL_CONSTRAINT
+ : "A" (control), "b" (input1_hi),
+ "D"(input2_hi), "S"(input2_lo),
+ THUNK_TARGET(hv_hypercall_pg)
+ : "cc");
+ }
+#endif
+ return hv_status;
+}
+
/*
* Rep hypercalls. Callers of this functions are supposed to ensure that
* rep_count and varhead_size comply with Hyper-V hypercall definition.
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f6f6c63da62f..fd2a8c1b88bc 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -214,7 +214,7 @@ enum spectre_v2_mitigation {
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
SPECTRE_V2_RETPOLINE_GENERIC,
SPECTRE_V2_RETPOLINE_AMD,
- SPECTRE_V2_IBRS,
+ SPECTRE_V2_IBRS_ENHANCED,
};
/* The Speculative Store Bypass disable variants */
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
index 9c9dc579bd7d..46f516dd80ce 100644
--- a/arch/x86/include/asm/orc_types.h
+++ b/arch/x86/include/asm/orc_types.h
@@ -88,6 +88,7 @@ struct orc_entry {
unsigned sp_reg:4;
unsigned bp_reg:4;
unsigned type:2;
+ unsigned end:1;
} __packed;
/*
@@ -101,6 +102,7 @@ struct unwind_hint {
s16 sp_offset;
u8 sp_reg;
u8 type;
+ u8 end;
};
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index a06b07399d17..e9202a0de8f0 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -450,9 +450,10 @@ do { \
bool __ret; \
typeof(pcp1) __o1 = (o1), __n1 = (n1); \
typeof(pcp2) __o2 = (o2), __n2 = (n2); \
- asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
- : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
- : "b" (__n1), "c" (__n2), "a" (__o1)); \
+ asm volatile("cmpxchg8b "__percpu_arg(1) \
+ CC_SET(z) \
+ : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \
+ : "b" (__n1), "c" (__n2)); \
__ret; \
})
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 685ffe8a0eaf..c399ea5eea41 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -19,6 +19,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
+#endif
*pmdp = pmd;
}
@@ -58,6 +61,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#ifdef CONFIG_SMP
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
+#endif
return __pmd(xchg((pmdval_t *)xp, 0));
}
#else
@@ -67,6 +73,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#ifdef CONFIG_SMP
static inline pud_t native_pudp_get_and_clear(pud_t *xp)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
+#endif
return __pud(xchg((pudval_t *)xp, 0));
}
#else
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index f982ef808e7e..6deb6cd236e3 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -35,4 +35,7 @@ typedef union {
#define PTRS_PER_PTE 1024
+/* This covers all VMSPLIT_* and VMSPLIT_*_OPT variants */
+#define PGD_KERNEL_START (CONFIG_PAGE_OFFSET >> PGDIR_SHIFT)
+
#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index f24df59c40b2..f2ca3139ca22 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -98,6 +98,9 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
+#endif
set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
}
@@ -229,6 +232,10 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
{
union split_pud res, *orig = (union split_pud *)pudp;
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
+#endif
+
/* xchg acts as a barrier before setting of the high bits */
res.pud_low = xchg(&orig->pud_low, 0);
res.pud_high = orig->pud_high;
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 6a59a6d0cc50..858358a82b14 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -21,9 +21,10 @@ typedef union {
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PARAVIRT
-#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
+#define SHARED_KERNEL_PMD ((!static_cpu_has(X86_FEATURE_PTI) && \
+ (pv_info.shared_kernel_pmd)))
#else
-#define SHARED_KERNEL_PMD 1
+#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
#endif
/*
@@ -45,5 +46,6 @@ typedef union {
#define PTRS_PER_PTE 512
#define MAX_POSSIBLE_PHYSMEM_BITS 36
+#define PGD_KERNEL_START (CONFIG_PAGE_OFFSET >> PGDIR_SHIFT)
#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5715647fc4fe..a1cb3339da8d 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -30,11 +30,14 @@ int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
void ptdump_walk_pgd_level_checkwx(void);
+void ptdump_walk_user_pgd_level_checkwx(void);
#ifdef CONFIG_DEBUG_WX
-#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
+#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
+#define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
#else
-#define debug_checkwx() do { } while (0)
+#define debug_checkwx() do { } while (0)
+#define debug_checkwx_user() do { } while (0)
#endif
/*
@@ -640,8 +643,31 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
+
+/*
+ * Take a PGD location (pgdp) and a pgd value that needs to be set there.
+ * Populates the user and returns the resulting PGD that must be set in
+ * the kernel copy of the page tables.
+ */
+static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return pgd;
+ return __pti_set_user_pgtbl(pgdp, pgd);
+}
+#else /* CONFIG_PAGE_TABLE_ISOLATION */
+static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+ return pgd;
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
#endif /* __ASSEMBLY__ */
+
#ifdef CONFIG_X86_32
# include <asm/pgtable_32.h>
#else
@@ -1154,6 +1180,70 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
}
}
#endif
+/*
+ * Page table pages are page-aligned. The lower half of the top
+ * level is used for userspace and the top half for the kernel.
+ *
+ * Returns true for parts of the PGD that map userspace and
+ * false for the parts that map the kernel.
+ */
+static inline bool pgdp_maps_userspace(void *__ptr)
+{
+ unsigned long ptr = (unsigned long)__ptr;
+
+ return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
+}
+
+static inline int pgd_large(pgd_t pgd) { return 0; }
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
+ * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
+ * the user one is in the last 4k. To switch between them, you
+ * just need to flip the 12th bit in their addresses.
+ */
+#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
+
+/*
+ * This generates better code than the inline assembly in
+ * __set_bit().
+ */
+static inline void *ptr_set_bit(void *ptr, int bit)
+{
+ unsigned long __ptr = (unsigned long)ptr;
+
+ __ptr |= BIT(bit);
+ return (void *)__ptr;
+}
+static inline void *ptr_clear_bit(void *ptr, int bit)
+{
+ unsigned long __ptr = (unsigned long)ptr;
+
+ __ptr &= ~BIT(bit);
+ return (void *)__ptr;
+}
+
+static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
+{
+ return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
+{
+ return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
+{
+ return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
+{
+ return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 88a056b01db4..b3ec519e3982 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -34,8 +34,6 @@ static inline void check_pgt_cache(void) { }
void paging_init(void);
void sync_initial_page_table(void);
-static inline int pgd_large(pgd_t pgd) { return 0; }
-
/*
* Define this if things work differently on an i386 and an i486:
* it will (on an i486) warn about kernel memory accesses that are
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index d9a001a4a872..b0bc0fff5f1f 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -50,13 +50,18 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \
& PMD_MASK)
-#define PKMAP_BASE \
+#define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
+#define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE)
+
+#define PKMAP_BASE \
+ ((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
+
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
-# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
+# define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE)
#endif
#define MODULES_VADDR VMALLOC_START
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3c5385f9a88f..acb6970e7bcf 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -132,90 +132,6 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp)
#endif
}
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-/*
- * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
- * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
- * the user one is in the last 4k. To switch between them, you
- * just need to flip the 12th bit in their addresses.
- */
-#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
-
-/*
- * This generates better code than the inline assembly in
- * __set_bit().
- */
-static inline void *ptr_set_bit(void *ptr, int bit)
-{
- unsigned long __ptr = (unsigned long)ptr;
-
- __ptr |= BIT(bit);
- return (void *)__ptr;
-}
-static inline void *ptr_clear_bit(void *ptr, int bit)
-{
- unsigned long __ptr = (unsigned long)ptr;
-
- __ptr &= ~BIT(bit);
- return (void *)__ptr;
-}
-
-static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
-{
- return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
-{
- return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
-{
- return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
-{
- return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
-}
-#endif /* CONFIG_PAGE_TABLE_ISOLATION */
-
-/*
- * Page table pages are page-aligned. The lower half of the top
- * level is used for userspace and the top half for the kernel.
- *
- * Returns true for parts of the PGD that map userspace and
- * false for the parts that map the kernel.
- */
-static inline bool pgdp_maps_userspace(void *__ptr)
-{
- unsigned long ptr = (unsigned long)__ptr;
-
- return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
-}
-
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
-
-/*
- * Take a PGD location (pgdp) and a pgd value that needs to be set there.
- * Populates the user and returns the resulting PGD that must be set in
- * the kernel copy of the page tables.
- */
-static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
-{
- if (!static_cpu_has(X86_FEATURE_PTI))
- return pgd;
- return __pti_set_user_pgd(pgdp, pgd);
-}
-#else
-static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
-{
- return pgd;
-}
-#endif
-
static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
{
pgd_t pgd;
@@ -226,7 +142,7 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
}
pgd = native_make_pgd(native_p4d_val(p4d));
- pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd);
+ pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
*p4dp = native_make_p4d(native_pgd_val(pgd));
}
@@ -237,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
- *pgdp = pti_set_user_pgd(pgdp, pgd);
+ *pgdp = pti_set_user_pgtbl(pgdp, pgd);
}
static inline void native_pgd_clear(pgd_t *pgd)
@@ -255,7 +171,6 @@ extern void sync_global_pgds(unsigned long start, unsigned long end);
/*
* Level 4 access.
*/
-static inline int pgd_large(pgd_t pgd) { return 0; }
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
/* PUD - Level3 access */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 054765ab2da2..04edd2d58211 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -115,6 +115,7 @@ extern unsigned int ptrs_per_p4d;
#define LDT_PGD_ENTRY_L5 -112UL
#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
+#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
#define __VMALLOC_BASE_L4 0xffffc90000000000UL
#define __VMALLOC_BASE_L5 0xffa0000000000000UL
@@ -153,4 +154,6 @@ extern unsigned int ptrs_per_p4d;
#define EARLY_DYNAMIC_PAGE_TABLES 64
+#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
+
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 99fff853c944..b64acb08a62b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -50,6 +50,7 @@
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
+#define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
@@ -266,14 +267,37 @@ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
typedef struct { pgdval_t pgd; } pgd_t;
+#ifdef CONFIG_X86_PAE
+
+/*
+ * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
+ * use it here.
+ */
+
+#define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK)
+#define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
+
+/*
+ * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
+ * All other bits are Reserved MBZ
+ */
+#define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
+ _PAGE_PWT | _PAGE_PCD | \
+ _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
+
+#else
+/* No need to mask any bits for !PAE */
+#define PGD_ALLOWED_BITS (~0ULL)
+#endif
+
static inline pgd_t native_make_pgd(pgdval_t val)
{
- return (pgd_t) { val };
+ return (pgd_t) { val & PGD_ALLOWED_BITS };
}
static inline pgdval_t native_pgd_val(pgd_t pgd)
{
- return pgd.pgd;
+ return pgd.pgd & PGD_ALLOWED_BITS;
}
static inline pgdval_t pgd_flags(pgd_t pgd)
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 625a52a5594f..02c2cbda4a74 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -39,10 +39,6 @@
#define CR3_PCID_MASK 0xFFFull
#define CR3_NOFLUSH BIT_ULL(63)
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-# define X86_CR3_PTI_PCID_USER_BIT 11
-#endif
-
#else
/*
* CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
@@ -53,4 +49,8 @@
#define CR3_NOFLUSH 0
#endif
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define X86_CR3_PTI_PCID_USER_BIT 11
+#endif
+
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cfd29ee8c3da..59663c08c949 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -966,6 +966,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+extern void free_kernel_image_pages(void *begin, void *end);
void default_idle(void);
#ifdef CONFIG_XEN
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
index 38a17f1d5c9d..5df09a0b80b8 100644
--- a/arch/x86/include/asm/pti.h
+++ b/arch/x86/include/asm/pti.h
@@ -6,10 +6,9 @@
#ifdef CONFIG_PAGE_TABLE_ISOLATION
extern void pti_init(void);
extern void pti_check_boottime_disable(void);
-extern void pti_clone_kernel_text(void);
+extern void pti_finalize(void);
#else
static inline void pti_check_boottime_disable(void) { }
-static inline void pti_clone_kernel_text(void) { }
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index 4cf11d88d3b3..19b90521954c 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -5,6 +5,7 @@
* PaX/grsecurity.
*/
#include <linux/refcount.h>
+#include <asm/bug.h>
/*
* This is the first portion of the refcount error handling, which lives in
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 5c019d23d06b..4a911a382ade 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -7,6 +7,7 @@
extern char __brk_base[], __brk_limit[];
extern struct exception_table_entry __stop___ex_table[];
+extern char __end_rodata_aligned[];
#if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[];
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index bd090367236c..34cffcef7375 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -46,6 +46,7 @@ int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
+int set_memory_np_noalias(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index eb5f7999a893..36bd243843d6 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -87,15 +87,25 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
#endif
/* This is used when switching tasks or entering/exiting vm86 mode. */
-static inline void update_sp0(struct task_struct *task)
+static inline void update_task_stack(struct task_struct *task)
{
- /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
+ /* sp0 always points to the entry trampoline stack, which is constant: */
#ifdef CONFIG_X86_32
- load_sp0(task->thread.sp0);
+ if (static_cpu_has(X86_FEATURE_XENPV))
+ load_sp0(task->thread.sp0);
+ else
+ this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
#else
+ /*
+ * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
+ * doesn't work on x86-32 because sp1 and
+ * cpu_current_top_of_stack have different values (because of
+ * the non-zero stack-padding on 32bit).
+ */
if (static_cpu_has(X86_FEATURE_XENPV))
load_sp0(task_top_of_stack(task));
#endif
+
}
#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 2ecd34e2d46c..e85ff65c43c3 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -37,5 +37,6 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+extern int after_bootmem;
#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6690cd3fc8b1..511bf5fae8b8 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -148,22 +148,6 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
#endif
-static inline bool tlb_defer_switch_to_init_mm(void)
-{
- /*
- * If we have PCID, then switching to init_mm is reasonably
- * fast. If we don't have PCID, then switching to init_mm is
- * quite slow, so we try to defer it in the hopes that we can
- * avoid it entirely. The latter approach runs the risk of
- * receiving otherwise unnecessary IPIs.
- *
- * This choice is just a heuristic. The tlb code can handle this
- * function returning true or false regardless of whether we have
- * PCID.
- */
- return !static_cpu_has(X86_FEATURE_PCID);
-}
-
struct tlb_context {
u64 ctx_id;
u64 tlb_gen;
@@ -554,4 +538,9 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
native_flush_tlb_others(mask, info)
#endif
+extern void tlb_flush_remove_tables(struct mm_struct *mm);
+extern void tlb_flush_remove_tables_local(void *arg);
+
+#define HAVE_TLB_FLUSH_REMOVE_TABLES
+
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h
index 4253bca99989..9c0d4b588e3f 100644
--- a/arch/x86/include/asm/trace/hyperv.h
+++ b/arch/x86/include/asm/trace/hyperv.h
@@ -28,6 +28,21 @@ TRACE_EVENT(hyperv_mmu_flush_tlb_others,
__entry->addr, __entry->end)
);
+TRACE_EVENT(hyperv_send_ipi_mask,
+ TP_PROTO(const struct cpumask *cpus,
+ int vector),
+ TP_ARGS(cpus, vector),
+ TP_STRUCT__entry(
+ __field(unsigned int, ncpus)
+ __field(int, vector)
+ ),
+ TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
+ __entry->vector = vector;
+ ),
+ TP_printk("ncpus %d vector %x",
+ __entry->ncpus, __entry->vector)
+ );
+
#endif /* CONFIG_HYPERV */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 2701d221583a..eb5bbfeccb66 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -33,13 +33,13 @@ static inline cycles_t get_cycles(void)
extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
-extern void tsc_early_delay_calibrate(void);
+extern void tsc_early_init(void);
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
-extern unsigned long native_calibrate_cpu(void);
+extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index bae46fc6b9de..0bcdb1279361 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -26,7 +26,7 @@
* the debuginfo as necessary. It will also warn if it sees any
* inconsistencies.
*/
-.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
+.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL end=0
#ifdef CONFIG_STACK_VALIDATION
.Lunwind_hint_ip_\@:
.pushsection .discard.unwind_hints
@@ -35,12 +35,14 @@
.short \sp_offset
.byte \sp_reg
.byte \type
+ .byte \end
+ .balign 4
.popsection
#endif
.endm
.macro UNWIND_HINT_EMPTY
- UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
+ UNWIND_HINT sp_reg=ORC_REG_UNDEFINED end=1
.endm
.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
@@ -86,19 +88,21 @@
#else /* !__ASSEMBLY__ */
-#define UNWIND_HINT(sp_reg, sp_offset, type) \
+#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
"987: \n\t" \
".pushsection .discard.unwind_hints\n\t" \
/* struct unwind_hint */ \
".long 987b - .\n\t" \
- ".short " __stringify(sp_offset) "\n\t" \
+ ".short " __stringify(sp_offset) "\n\t" \
".byte " __stringify(sp_reg) "\n\t" \
".byte " __stringify(type) "\n\t" \
+ ".byte " __stringify(end) "\n\t" \
+ ".balign 4 \n\t" \
".popsection\n\t"
-#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
+#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE, 0)
-#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
+#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE, 0)
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a481763a3776..014f214da581 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -668,6 +668,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
local_irq_save(flags);
memcpy(addr, opcode, len);
local_irq_restore(flags);
+ sync_core();
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
return addr;
@@ -693,6 +694,12 @@ void *text_poke(void *addr, const void *opcode, size_t len)
struct page *pages[2];
int i;
+ /*
+ * While boot memory allocator is runnig we cannot use struct
+ * pages as they are not yet initialized.
+ */
+ BUG_ON(!after_bootmem);
+
if (!core_kernel_text((unsigned long)addr)) {
pages[0] = vmalloc_to_page(addr);
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index adbda5847b14..07fa222f0c52 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -940,7 +940,7 @@ static int __init calibrate_APIC_clock(void)
if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
pr_warning("APIC timer disabled due to verification failure\n");
- return -1;
+ return -1;
}
return 0;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 35aaee4fc028..0954315842c0 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -218,7 +218,8 @@ static int reserve_irq_vector(struct irq_data *irqd)
return 0;
}
-static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
+static int
+assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
{
struct apic_chip_data *apicd = apic_chip_data(irqd);
bool resvd = apicd->has_reserved;
@@ -245,22 +246,12 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
return -EBUSY;
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
- if (vector > 0)
- apic_update_vector(irqd, vector, cpu);
trace_vector_alloc(irqd->irq, vector, resvd, vector);
- return vector;
-}
-
-static int assign_vector_locked(struct irq_data *irqd,
- const struct cpumask *dest)
-{
- struct apic_chip_data *apicd = apic_chip_data(irqd);
- int vector = allocate_vector(irqd, dest);
-
if (vector < 0)
return vector;
+ apic_update_vector(irqd, vector, cpu);
+ apic_update_irq_cfg(irqd, vector, cpu);
- apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
return 0;
}
@@ -433,7 +424,7 @@ static int activate_managed(struct irq_data *irqd)
pr_err("Managed startup irq %u, no vector available\n",
irqd->irq);
}
- return ret;
+ return ret;
}
static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d492752f79e1..391f358ebb4c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -394,10 +394,10 @@ extern int uv_hub_info_version(void)
EXPORT_SYMBOL(uv_hub_info_version);
/* Default UV memory block size is 2GB */
-static unsigned long mem_block_size = (2UL << 30);
+static unsigned long mem_block_size __initdata = (2UL << 30);
/* Kernel parameter to specify UV mem block size */
-static int parse_mem_block_size(char *ptr)
+static int __init parse_mem_block_size(char *ptr)
{
unsigned long size = memparse(ptr, NULL);
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index dcb008c320fe..01de31db300d 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -103,4 +103,9 @@ void common(void) {
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
+ DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
+
+ /* Offset for sp0 and sp1 into the tss_struct */
+ OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+ OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
}
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index a4a3be399f4b..82826f2275cc 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -46,8 +46,14 @@ void foo(void)
OFFSET(saved_context_gdt_desc, saved_context, gdt_desc);
BLANK();
- /* Offset from the sysenter stack to tss.sp0 */
- DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+ /*
+ * Offset from the entry stack to task stack stored in TSS. Kernel entry
+ * happens on the per-cpu entry-stack, and the asm code switches to the
+ * task-stack pointer stored in x86_tss.sp1, which is a copy of
+ * task->thread.sp0 where entry code can find it.
+ */
+ DEFINE(TSS_entry2task_stack,
+ offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index b2dcd161f514..3b9405e7ba2b 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -65,8 +65,6 @@ int main(void)
#undef ENTRY
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
- OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
- OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
BLANK();
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 7a40196967cb..347137e80bf5 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -35,7 +35,9 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o intel_rdt_ctrlmondata.o
+obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
+obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
+CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 38915fbfae73..b732438c1a1e 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -232,8 +232,6 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
}
}
- set_cpu_cap(c, X86_FEATURE_K7);
-
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
@@ -617,6 +615,14 @@ static void early_init_amd(struct cpuinfo_x86 *c)
early_init_amd_mc(c);
+#ifdef CONFIG_X86_32
+ if (c->x86 == 6)
+ set_cpu_cap(c, X86_FEATURE_K7);
+#endif
+
+ if (c->x86 >= 0xf)
+ set_cpu_cap(c, X86_FEATURE_K8);
+
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
/*
@@ -863,9 +869,6 @@ static void init_amd(struct cpuinfo_x86 *c)
init_amd_cacheinfo(c);
- if (c->x86 >= 0xf)
- set_cpu_cap(c, X86_FEATURE_K8);
-
if (cpu_has(c, X86_FEATURE_XMM2)) {
unsigned long long val;
int ret;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 5c0ea39311fe..405a9a61bb89 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -130,6 +130,7 @@ static const char *spectre_v2_strings[] = {
[SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
[SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
[SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+ [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
};
#undef pr_fmt
@@ -313,23 +314,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd;
}
-/* Check for Skylake-like CPUs (for RSB handling) */
-static bool __init is_skylake_era(void)
-{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6) {
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_SKYLAKE_MOBILE:
- case INTEL_FAM6_SKYLAKE_DESKTOP:
- case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_KABYLAKE_MOBILE:
- case INTEL_FAM6_KABYLAKE_DESKTOP:
- return true;
- }
- }
- return false;
-}
-
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -349,6 +333,13 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_CMD_FORCE:
case SPECTRE_V2_CMD_AUTO:
+ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+ mode = SPECTRE_V2_IBRS_ENHANCED;
+ /* Force it so VMEXIT will restore correctly */
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ goto specv2_set_mode;
+ }
if (IS_ENABLED(CONFIG_RETPOLINE))
goto retpoline_auto;
break;
@@ -386,26 +377,20 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
}
+specv2_set_mode:
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
/*
- * If neither SMEP nor PTI are available, there is a risk of
- * hitting userspace addresses in the RSB after a context switch
- * from a shallow call stack to a deeper one. To prevent this fill
- * the entire RSB, even when using IBRS.
+ * If spectre v2 protection has been enabled, unconditionally fill
+ * RSB during a context switch; this protects against two independent
+ * issues:
*
- * Skylake era CPUs have a separate issue with *underflow* of the
- * RSB, when they will predict 'ret' targets from the generic BTB.
- * The proper mitigation for this is IBRS. If IBRS is not supported
- * or deactivated in favour of retpolines the RSB fill on context
- * switch is required.
+ * - RSB underflow (and switch to BTB) on Skylake+
+ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
*/
- if ((!boot_cpu_has(X86_FEATURE_PTI) &&
- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
- }
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
@@ -415,9 +400,16 @@ retpoline_auto:
/*
* Retpoline means the kernel is safe because it has no indirect
- * branches. But firmware isn't, so use IBRS to protect that.
+ * branches. Enhanced IBRS protects firmware too, so, enable restricted
+ * speculation around firmware calls only when Enhanced IBRS isn't
+ * supported.
+ *
+ * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
+ * the user might select retpoline on the kernel command line and if
+ * the CPU supports Enhanced IBRS, kernel might un-intentionally not
+ * enable IBRS around firmware calls.
*/
- if (boot_cpu_has(X86_FEATURE_IBRS)) {
+ if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index eb4cb3efd20e..ba6b8bb1c036 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1005,6 +1005,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+ if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
if (x86_match_cpu(cpu_no_meltdown))
return;
@@ -1016,6 +1019,24 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
}
/*
+ * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
+ * unfortunately, that's not true in practice because of early VIA
+ * chips and (more importantly) broken virtualizers that are not easy
+ * to detect. In the latter case it doesn't even *fail* reliably, so
+ * probing for it doesn't even work. Disable it completely on 32-bit
+ * unless we can find a reliable way to detect all the broken cases.
+ * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
+ */
+static void detect_nopl(void)
+{
+#ifdef CONFIG_X86_32
+ setup_clear_cpu_cap(X86_FEATURE_NOPL);
+#else
+ setup_force_cpu_cap(X86_FEATURE_NOPL);
+#endif
+}
+
+/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
* cache alignment.
@@ -1089,6 +1110,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
*/
if (!pgtable_l5_enabled())
setup_clear_cpu_cap(X86_FEATURE_LA57);
+
+ detect_nopl();
}
void __init early_cpu_init(void)
@@ -1124,24 +1147,6 @@ void __init early_cpu_init(void)
early_identify_cpu(&boot_cpu_data);
}
-/*
- * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
- * unfortunately, that's not true in practice because of early VIA
- * chips and (more importantly) broken virtualizers that are not easy
- * to detect. In the latter case it doesn't even *fail* reliably, so
- * probing for it doesn't even work. Disable it completely on 32-bit
- * unless we can find a reliable way to detect all the broken cases.
- * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
- */
-static void detect_nopl(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_32
- clear_cpu_cap(c, X86_FEATURE_NOPL);
-#else
- set_cpu_cap(c, X86_FEATURE_NOPL);
-#endif
-}
-
static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
@@ -1204,8 +1209,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
get_model_name(c); /* Default name */
- detect_nopl(c);
-
detect_null_seg_behavior(c);
/*
@@ -1804,11 +1807,12 @@ void cpu_init(void)
enter_lazy_tlb(&init_mm, curr);
/*
- * Initialize the TSS. Don't bother initializing sp0, as the initial
- * task never enters user mode.
+ * Initialize the TSS. sp0 points to the entry trampoline stack
+ * regardless of what task is running.
*/
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc();
+ load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
load_mm_ldt(&init_mm);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index eb75564f2d25..c050cd6066af 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -465,14 +465,17 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
+#define x86_VMX_FEATURE_EPT_CAP_AD 0x00200000
u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
+ u32 msr_vpid_cap, msr_ept_cap;
clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
clear_cpu_cap(c, X86_FEATURE_VNMI);
clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
clear_cpu_cap(c, X86_FEATURE_EPT);
clear_cpu_cap(c, X86_FEATURE_VPID);
+ clear_cpu_cap(c, X86_FEATURE_EPT_AD);
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
msr_ctl = vmx_msr_high | vmx_msr_low;
@@ -487,8 +490,13 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
(msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
- if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
+ if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
set_cpu_cap(c, X86_FEATURE_EPT);
+ rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
+ msr_ept_cap, msr_vpid_cap);
+ if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
+ set_cpu_cap(c, X86_FEATURE_EPT_AD);
+ }
if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
set_cpu_cap(c, X86_FEATURE_VPID);
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index ec4754f81cbd..abb71ac70443 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -859,6 +859,8 @@ static __init bool get_rdt_resources(void)
return (rdt_mon_capable || rdt_alloc_capable);
}
+static enum cpuhp_state rdt_online;
+
static int __init intel_rdt_late_init(void)
{
struct rdt_resource *r;
@@ -880,6 +882,7 @@ static int __init intel_rdt_late_init(void)
cpuhp_remove_state(state);
return ret;
}
+ rdt_online = state;
for_each_alloc_capable_rdt_resource(r)
pr_info("Intel RDT %s allocation detected\n", r->name);
@@ -891,3 +894,11 @@ static int __init intel_rdt_late_init(void)
}
late_initcall(intel_rdt_late_init);
+
+static void __exit intel_rdt_exit(void)
+{
+ cpuhp_remove_state(rdt_online);
+ rdtgroup_exit();
+}
+
+__exitcall(intel_rdt_exit);
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 39752825e376..4e588f36228f 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -81,6 +81,34 @@ enum rdt_group_type {
};
/**
+ * enum rdtgrp_mode - Mode of a RDT resource group
+ * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations
+ * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed
+ * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
+ * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
+ * allowed AND the allocations are Cache Pseudo-Locked
+ *
+ * The mode of a resource group enables control over the allowed overlap
+ * between allocations associated with different resource groups (classes
+ * of service). User is able to modify the mode of a resource group by
+ * writing to the "mode" resctrl file associated with the resource group.
+ *
+ * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by
+ * writing the appropriate text to the "mode" file. A resource group enters
+ * "pseudo-locked" mode after the schemata is written while the resource
+ * group is in "pseudo-locksetup" mode.
+ */
+enum rdtgrp_mode {
+ RDT_MODE_SHAREABLE = 0,
+ RDT_MODE_EXCLUSIVE,
+ RDT_MODE_PSEUDO_LOCKSETUP,
+ RDT_MODE_PSEUDO_LOCKED,
+
+ /* Must be last */
+ RDT_NUM_MODES,
+};
+
+/**
* struct mongroup - store mon group's data in resctrl fs.
* @mon_data_kn kernlfs node for the mon_data directory
* @parent: parent rdtgrp
@@ -95,6 +123,43 @@ struct mongroup {
};
/**
+ * struct pseudo_lock_region - pseudo-lock region information
+ * @r: RDT resource to which this pseudo-locked region
+ * belongs
+ * @d: RDT domain to which this pseudo-locked region
+ * belongs
+ * @cbm: bitmask of the pseudo-locked region
+ * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread
+ * completion
+ * @thread_done: variable used by waitqueue to test if pseudo-locking
+ * thread completed
+ * @cpu: core associated with the cache on which the setup code
+ * will be run
+ * @line_size: size of the cache lines
+ * @size: size of pseudo-locked region in bytes
+ * @kmem: the kernel memory associated with pseudo-locked region
+ * @minor: minor number of character device associated with this
+ * region
+ * @debugfs_dir: pointer to this region's directory in the debugfs
+ * filesystem
+ * @pm_reqs: Power management QoS requests related to this region
+ */
+struct pseudo_lock_region {
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+ u32 cbm;
+ wait_queue_head_t lock_thread_wq;
+ int thread_done;
+ int cpu;
+ unsigned int line_size;
+ unsigned int size;
+ void *kmem;
+ unsigned int minor;
+ struct dentry *debugfs_dir;
+ struct list_head pm_reqs;
+};
+
+/**
* struct rdtgroup - store rdtgroup's data in resctrl file system.
* @kn: kernfs node
* @rdtgroup_list: linked list for all rdtgroups
@@ -106,16 +171,20 @@ struct mongroup {
* @type: indicates type of this rdtgroup - either
* monitor only or ctrl_mon group
* @mon: mongroup related data
+ * @mode: mode of resource group
+ * @plr: pseudo-locked region
*/
struct rdtgroup {
- struct kernfs_node *kn;
- struct list_head rdtgroup_list;
- u32 closid;
- struct cpumask cpu_mask;
- int flags;
- atomic_t waitcount;
- enum rdt_group_type type;
- struct mongroup mon;
+ struct kernfs_node *kn;
+ struct list_head rdtgroup_list;
+ u32 closid;
+ struct cpumask cpu_mask;
+ int flags;
+ atomic_t waitcount;
+ enum rdt_group_type type;
+ struct mongroup mon;
+ enum rdtgrp_mode mode;
+ struct pseudo_lock_region *plr;
};
/* rdtgroup.flags */
@@ -148,6 +217,7 @@ extern struct list_head rdt_all_groups;
extern int max_name_width, max_data_width;
int __init rdtgroup_init(void);
+void __exit rdtgroup_exit(void);
/**
* struct rftype - describe each file in the resctrl file system
@@ -216,22 +286,24 @@ struct mbm_state {
* @mbps_val: When mba_sc is enabled, this holds the bandwidth in MBps
* @new_ctrl: new ctrl value to be loaded
* @have_new_ctrl: did user provide new_ctrl for this domain
+ * @plr: pseudo-locked region (if any) associated with domain
*/
struct rdt_domain {
- struct list_head list;
- int id;
- struct cpumask cpu_mask;
- unsigned long *rmid_busy_llc;
- struct mbm_state *mbm_total;
- struct mbm_state *mbm_local;
- struct delayed_work mbm_over;
- struct delayed_work cqm_limbo;
- int mbm_work_cpu;
- int cqm_work_cpu;
- u32 *ctrl_val;
- u32 *mbps_val;
- u32 new_ctrl;
- bool have_new_ctrl;
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ unsigned long *rmid_busy_llc;
+ struct mbm_state *mbm_total;
+ struct mbm_state *mbm_local;
+ struct delayed_work mbm_over;
+ struct delayed_work cqm_limbo;
+ int mbm_work_cpu;
+ int cqm_work_cpu;
+ u32 *ctrl_val;
+ u32 *mbps_val;
+ u32 new_ctrl;
+ bool have_new_ctrl;
+ struct pseudo_lock_region *plr;
};
/**
@@ -351,7 +423,7 @@ struct rdt_resource {
struct rdt_cache cache;
struct rdt_membw membw;
const char *format_str;
- int (*parse_ctrlval) (char *buf, struct rdt_resource *r,
+ int (*parse_ctrlval) (void *data, struct rdt_resource *r,
struct rdt_domain *d);
struct list_head evt_list;
int num_rmid;
@@ -359,8 +431,8 @@ struct rdt_resource {
unsigned long fflags;
};
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d);
+int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
+int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -368,7 +440,7 @@ extern struct rdt_resource rdt_resources_all[];
extern struct rdtgroup rdtgroup_default;
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
-int __init rdtgroup_init(void);
+extern struct dentry *debugfs_resctrl;
enum {
RDT_RESOURCE_L3,
@@ -439,13 +511,32 @@ void rdt_last_cmd_printf(const char *fmt, ...);
void rdt_ctrl_update(void *arg);
struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
void rdtgroup_kn_unlock(struct kernfs_node *kn);
+int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
+int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
+ umode_t mask);
struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head **pos);
ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
int rdtgroup_schemata_show(struct kernfs_open_file *of,
struct seq_file *s, void *v);
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ u32 _cbm, int closid, bool exclusive);
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
+ u32 cbm);
+enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
+int rdtgroup_tasks_assigned(struct rdtgroup *r);
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
+int rdt_pseudo_lock_init(void);
+void rdt_pseudo_lock_release(void);
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
+int update_domains(struct rdt_resource *r, int closid);
+void closid_free(int closid);
int alloc_rmid(void);
void free_rmid(u32 rmid);
int rdt_get_mon_l3_config(struct rdt_resource *r);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index 116d57b248d3..af358ca05160 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -64,9 +64,10 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
-int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
{
unsigned long data;
+ char *buf = _buf;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
@@ -87,7 +88,7 @@ int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
* Additionally Haswell requires at least two bits set.
*/
-static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
+static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long first_bit, zero_bit, val;
unsigned int cbm_len = r->cache.cbm_len;
@@ -122,22 +123,64 @@ static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
+struct rdt_cbm_parse_data {
+ struct rdtgroup *rdtgrp;
+ char *buf;
+};
+
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
{
- unsigned long data;
+ struct rdt_cbm_parse_data *data = _data;
+ struct rdtgroup *rdtgrp = data->rdtgrp;
+ u32 cbm_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
return -EINVAL;
}
- if(!cbm_validate(buf, &data, r))
+ /*
+ * Cannot set up more than one pseudo-locked region in a cache
+ * hierarchy.
+ */
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
+ rdtgroup_pseudo_locked_in_hierarchy(d)) {
+ rdt_last_cmd_printf("pseudo-locked region in hierarchy\n");
return -EINVAL;
- d->new_ctrl = data;
+ }
+
+ if (!cbm_validate(data->buf, &cbm_val, r))
+ return -EINVAL;
+
+ if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
+ rdtgrp->mode == RDT_MODE_SHAREABLE) &&
+ rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
+ rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The CBM may not overlap with the CBM of another closid if
+ * either is exclusive.
+ */
+ if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
+ rdt_last_cmd_printf("overlaps with exclusive group\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
+ if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ rdt_last_cmd_printf("overlaps with other group\n");
+ return -EINVAL;
+ }
+ }
+
+ d->new_ctrl = cbm_val;
d->have_new_ctrl = true;
return 0;
@@ -149,8 +192,10 @@ int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
* separated by ";". The "id" is in decimal, and must match one of
* the "id"s for this resource.
*/
-static int parse_line(char *line, struct rdt_resource *r)
+static int parse_line(char *line, struct rdt_resource *r,
+ struct rdtgroup *rdtgrp)
{
+ struct rdt_cbm_parse_data data;
char *dom = NULL, *id;
struct rdt_domain *d;
unsigned long dom_id;
@@ -167,15 +212,32 @@ next:
dom = strim(dom);
list_for_each_entry(d, &r->domains, list) {
if (d->id == dom_id) {
- if (r->parse_ctrlval(dom, r, d))
+ data.buf = dom;
+ data.rdtgrp = rdtgrp;
+ if (r->parse_ctrlval(&data, r, d))
return -EINVAL;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * In pseudo-locking setup mode and just
+ * parsed a valid CBM that should be
+ * pseudo-locked. Only one locked region per
+ * resource group and domain so just do
+ * the required initialization for single
+ * region and return.
+ */
+ rdtgrp->plr->r = r;
+ rdtgrp->plr->d = d;
+ rdtgrp->plr->cbm = d->new_ctrl;
+ d->plr = rdtgrp->plr;
+ return 0;
+ }
goto next;
}
}
return -EINVAL;
}
-static int update_domains(struct rdt_resource *r, int closid)
+int update_domains(struct rdt_resource *r, int closid)
{
struct msr_param msr_param;
cpumask_var_t cpu_mask;
@@ -220,13 +282,14 @@ done:
return 0;
}
-static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
+static int rdtgroup_parse_resource(char *resname, char *tok,
+ struct rdtgroup *rdtgrp)
{
struct rdt_resource *r;
for_each_alloc_enabled_rdt_resource(r) {
- if (!strcmp(resname, r->name) && closid < r->num_closid)
- return parse_line(tok, r);
+ if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
+ return parse_line(tok, r, rdtgrp);
}
rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
return -EINVAL;
@@ -239,7 +302,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
struct rdt_domain *dom;
struct rdt_resource *r;
char *tok, *resname;
- int closid, ret = 0;
+ int ret = 0;
/* Valid input requires a trailing newline */
if (nbytes == 0 || buf[nbytes - 1] != '\n')
@@ -253,7 +316,15 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
}
rdt_last_cmd_clear();
- closid = rdtgrp->closid;
+ /*
+ * No changes to pseudo-locked region allowed. It has to be removed
+ * and re-created instead.
+ */
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("resource group is pseudo-locked\n");
+ goto out;
+ }
for_each_alloc_enabled_rdt_resource(r) {
list_for_each_entry(dom, &r->domains, list)
@@ -272,17 +343,27 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
ret = -EINVAL;
goto out;
}
- ret = rdtgroup_parse_resource(resname, tok, closid);
+ ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
if (ret)
goto out;
}
for_each_alloc_enabled_rdt_resource(r) {
- ret = update_domains(r, closid);
+ ret = update_domains(r, rdtgrp->closid);
if (ret)
goto out;
}
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * If pseudo-locking fails we keep the resource group in
+ * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
+ * active and updated for just the domain the pseudo-locked
+ * region was requested for.
+ */
+ ret = rdtgroup_pseudo_lock_create(rdtgrp);
+ }
+
out:
rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
@@ -318,10 +399,18 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
- closid = rdtgrp->closid;
- for_each_alloc_enabled_rdt_resource(r) {
- if (closid < r->num_closid)
- show_doms(s, r, closid);
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ for_each_alloc_enabled_rdt_resource(r)
+ seq_printf(s, "%s:uninitialized\n", r->name);
+ } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
+ rdtgrp->plr->d->id, rdtgrp->plr->cbm);
+ } else {
+ closid = rdtgrp->closid;
+ for_each_alloc_enabled_rdt_resource(r) {
+ if (closid < r->num_closid)
+ show_doms(s, r, closid);
+ }
}
} else {
ret = -ENOENT;
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
new file mode 100644
index 000000000000..40f3903ae5d9
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -0,0 +1,1522 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Resource Director Technology (RDT)
+ *
+ * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Author: Reinette Chatre <reinette.chatre@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/kthread.h>
+#include <linux/mman.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/intel-family.h>
+#include <asm/intel_rdt_sched.h>
+#include <asm/perf_event.h>
+
+#include "intel_rdt.h"
+
+#define CREATE_TRACE_POINTS
+#include "intel_rdt_pseudo_lock_event.h"
+
+/*
+ * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
+ * prefetcher state. Details about this register can be found in the MSR
+ * tables for specific platforms found in Intel's SDM.
+ */
+#define MSR_MISC_FEATURE_CONTROL 0x000001a4
+
+/*
+ * The bits needed to disable hardware prefetching varies based on the
+ * platform. During initialization we will discover which bits to use.
+ */
+static u64 prefetch_disable_bits;
+
+/*
+ * Major number assigned to and shared by all devices exposing
+ * pseudo-locked regions.
+ */
+static unsigned int pseudo_lock_major;
+static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
+static struct class *pseudo_lock_class;
+
+/**
+ * get_prefetch_disable_bits - prefetch disable bits of supported platforms
+ *
+ * Capture the list of platforms that have been validated to support
+ * pseudo-locking. This includes testing to ensure pseudo-locked regions
+ * with low cache miss rates can be created under variety of load conditions
+ * as well as that these pseudo-locked regions can maintain their low cache
+ * miss rates under variety of load conditions for significant lengths of time.
+ *
+ * After a platform has been validated to support pseudo-locking its
+ * hardware prefetch disable bits are included here as they are documented
+ * in the SDM.
+ *
+ * When adding a platform here also add support for its cache events to
+ * measure_cycles_perf_fn()
+ *
+ * Return:
+ * If platform is supported, the bits to disable hardware prefetchers, 0
+ * if platform is not supported.
+ */
+static u64 get_prefetch_disable_bits(void)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ boot_cpu_data.x86 != 6)
+ return 0;
+
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_BROADWELL_X:
+ /*
+ * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
+ * as:
+ * 0 L2 Hardware Prefetcher Disable (R/W)
+ * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W)
+ * 2 DCU Hardware Prefetcher Disable (R/W)
+ * 3 DCU IP Prefetcher Disable (R/W)
+ * 63:4 Reserved
+ */
+ return 0xF;
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ /*
+ * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
+ * as:
+ * 0 L2 Hardware Prefetcher Disable (R/W)
+ * 1 Reserved
+ * 2 DCU Hardware Prefetcher Disable (R/W)
+ * 63:3 Reserved
+ */
+ return 0x5;
+ }
+
+ return 0;
+}
+
+/*
+ * Helper to write 64bit value to MSR without tracing. Used when
+ * use of the cache should be restricted and use of registers used
+ * for local variables avoided.
+ */
+static inline void pseudo_wrmsrl_notrace(unsigned int msr, u64 val)
+{
+ __wrmsr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
+}
+
+/**
+ * pseudo_lock_minor_get - Obtain available minor number
+ * @minor: Pointer to where new minor number will be stored
+ *
+ * A bitmask is used to track available minor numbers. Here the next free
+ * minor number is marked as unavailable and returned.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int pseudo_lock_minor_get(unsigned int *minor)
+{
+ unsigned long first_bit;
+
+ first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
+
+ if (first_bit == MINORBITS)
+ return -ENOSPC;
+
+ __clear_bit(first_bit, &pseudo_lock_minor_avail);
+ *minor = first_bit;
+
+ return 0;
+}
+
+/**
+ * pseudo_lock_minor_release - Return minor number to available
+ * @minor: The minor number made available
+ */
+static void pseudo_lock_minor_release(unsigned int minor)
+{
+ __set_bit(minor, &pseudo_lock_minor_avail);
+}
+
+/**
+ * region_find_by_minor - Locate a pseudo-lock region by inode minor number
+ * @minor: The minor number of the device representing pseudo-locked region
+ *
+ * When the character device is accessed we need to determine which
+ * pseudo-locked region it belongs to. This is done by matching the minor
+ * number of the device to the pseudo-locked region it belongs.
+ *
+ * Minor numbers are assigned at the time a pseudo-locked region is associated
+ * with a cache instance.
+ *
+ * Return: On success return pointer to resource group owning the pseudo-locked
+ * region, NULL on failure.
+ */
+static struct rdtgroup *region_find_by_minor(unsigned int minor)
+{
+ struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
+
+ list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+ if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
+ rdtgrp_match = rdtgrp;
+ break;
+ }
+ }
+ return rdtgrp_match;
+}
+
+/**
+ * pseudo_lock_pm_req - A power management QoS request list entry
+ * @list: Entry within the @pm_reqs list for a pseudo-locked region
+ * @req: PM QoS request
+ */
+struct pseudo_lock_pm_req {
+ struct list_head list;
+ struct dev_pm_qos_request req;
+};
+
+static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
+{
+ struct pseudo_lock_pm_req *pm_req, *next;
+
+ list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
+ dev_pm_qos_remove_request(&pm_req->req);
+ list_del(&pm_req->list);
+ kfree(pm_req);
+ }
+}
+
+/**
+ * pseudo_lock_cstates_constrain - Restrict cores from entering C6
+ *
+ * To prevent the cache from being affected by power management entering
+ * C6 has to be avoided. This is accomplished by requesting a latency
+ * requirement lower than lowest C6 exit latency of all supported
+ * platforms as found in the cpuidle state tables in the intel_idle driver.
+ * At this time it is possible to do so with a single latency requirement
+ * for all supported platforms.
+ *
+ * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
+ * the ACPI latencies need to be considered while keeping in mind that C2
+ * may be set to map to deeper sleep states. In this case the latency
+ * requirement needs to prevent entering C2 also.
+ */
+static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
+{
+ struct pseudo_lock_pm_req *pm_req;
+ int cpu;
+ int ret;
+
+ for_each_cpu(cpu, &plr->d->cpu_mask) {
+ pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
+ if (!pm_req) {
+ rdt_last_cmd_puts("fail allocating mem for PM QoS\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ ret = dev_pm_qos_add_request(get_cpu_device(cpu),
+ &pm_req->req,
+ DEV_PM_QOS_RESUME_LATENCY,
+ 30);
+ if (ret < 0) {
+ rdt_last_cmd_printf("fail to add latency req cpu%d\n",
+ cpu);
+ kfree(pm_req);
+ ret = -1;
+ goto out_err;
+ }
+ list_add(&pm_req->list, &plr->pm_reqs);
+ }
+
+ return 0;
+
+out_err:
+ pseudo_lock_cstates_relax(plr);
+ return ret;
+}
+
+/**
+ * pseudo_lock_region_clear - Reset pseudo-lock region data
+ * @plr: pseudo-lock region
+ *
+ * All content of the pseudo-locked region is reset - any memory allocated
+ * freed.
+ *
+ * Return: void
+ */
+static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
+{
+ plr->size = 0;
+ plr->line_size = 0;
+ kfree(plr->kmem);
+ plr->kmem = NULL;
+ plr->r = NULL;
+ if (plr->d)
+ plr->d->plr = NULL;
+ plr->d = NULL;
+ plr->cbm = 0;
+ plr->debugfs_dir = NULL;
+}
+
+/**
+ * pseudo_lock_region_init - Initialize pseudo-lock region information
+ * @plr: pseudo-lock region
+ *
+ * Called after user provided a schemata to be pseudo-locked. From the
+ * schemata the &struct pseudo_lock_region is on entry already initialized
+ * with the resource, domain, and capacity bitmask. Here the information
+ * required for pseudo-locking is deduced from this data and &struct
+ * pseudo_lock_region initialized further. This information includes:
+ * - size in bytes of the region to be pseudo-locked
+ * - cache line size to know the stride with which data needs to be accessed
+ * to be pseudo-locked
+ * - a cpu associated with the cache instance on which the pseudo-locking
+ * flow can be executed
+ *
+ * Return: 0 on success, <0 on failure. Descriptive error will be written
+ * to last_cmd_status buffer.
+ */
+static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
+{
+ struct cpu_cacheinfo *ci;
+ int ret;
+ int i;
+
+ /* Pick the first cpu we find that is associated with the cache. */
+ plr->cpu = cpumask_first(&plr->d->cpu_mask);
+
+ if (!cpu_online(plr->cpu)) {
+ rdt_last_cmd_printf("cpu %u associated with cache not online\n",
+ plr->cpu);
+ ret = -ENODEV;
+ goto out_region;
+ }
+
+ ci = get_cpu_cacheinfo(plr->cpu);
+
+ plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == plr->r->cache_level) {
+ plr->line_size = ci->info_list[i].coherency_line_size;
+ return 0;
+ }
+ }
+
+ ret = -1;
+ rdt_last_cmd_puts("unable to determine cache line size\n");
+out_region:
+ pseudo_lock_region_clear(plr);
+ return ret;
+}
+
+/**
+ * pseudo_lock_init - Initialize a pseudo-lock region
+ * @rdtgrp: resource group to which new pseudo-locked region will belong
+ *
+ * A pseudo-locked region is associated with a resource group. When this
+ * association is created the pseudo-locked region is initialized. The
+ * details of the pseudo-locked region are not known at this time so only
+ * allocation is done and association established.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static int pseudo_lock_init(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr;
+
+ plr = kzalloc(sizeof(*plr), GFP_KERNEL);
+ if (!plr)
+ return -ENOMEM;
+
+ init_waitqueue_head(&plr->lock_thread_wq);
+ INIT_LIST_HEAD(&plr->pm_reqs);
+ rdtgrp->plr = plr;
+ return 0;
+}
+
+/**
+ * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
+ * @plr: pseudo-lock region
+ *
+ * Initialize the details required to set up the pseudo-locked region and
+ * allocate the contiguous memory that will be pseudo-locked to the cache.
+ *
+ * Return: 0 on success, <0 on failure. Descriptive error will be written
+ * to last_cmd_status buffer.
+ */
+static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
+{
+ int ret;
+
+ ret = pseudo_lock_region_init(plr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We do not yet support contiguous regions larger than
+ * KMALLOC_MAX_SIZE.
+ */
+ if (plr->size > KMALLOC_MAX_SIZE) {
+ rdt_last_cmd_puts("requested region exceeds maximum size\n");
+ ret = -E2BIG;
+ goto out_region;
+ }
+
+ plr->kmem = kzalloc(plr->size, GFP_KERNEL);
+ if (!plr->kmem) {
+ rdt_last_cmd_puts("unable to allocate memory\n");
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ ret = 0;
+ goto out;
+out_region:
+ pseudo_lock_region_clear(plr);
+out:
+ return ret;
+}
+
+/**
+ * pseudo_lock_free - Free a pseudo-locked region
+ * @rdtgrp: resource group to which pseudo-locked region belonged
+ *
+ * The pseudo-locked region's resources have already been released, or not
+ * yet created at this point. Now it can be freed and disassociated from the
+ * resource group.
+ *
+ * Return: void
+ */
+static void pseudo_lock_free(struct rdtgroup *rdtgrp)
+{
+ pseudo_lock_region_clear(rdtgrp->plr);
+ kfree(rdtgrp->plr);
+ rdtgrp->plr = NULL;
+}
+
+/**
+ * pseudo_lock_fn - Load kernel memory into cache
+ * @_rdtgrp: resource group to which pseudo-lock region belongs
+ *
+ * This is the core pseudo-locking flow.
+ *
+ * First we ensure that the kernel memory cannot be found in the cache.
+ * Then, while taking care that there will be as little interference as
+ * possible, the memory to be loaded is accessed while core is running
+ * with class of service set to the bitmask of the pseudo-locked region.
+ * After this is complete no future CAT allocations will be allowed to
+ * overlap with this bitmask.
+ *
+ * Local register variables are utilized to ensure that the memory region
+ * to be locked is the only memory access made during the critical locking
+ * loop.
+ *
+ * Return: 0. Waiter on waitqueue will be woken on completion.
+ */
+static int pseudo_lock_fn(void *_rdtgrp)
+{
+ struct rdtgroup *rdtgrp = _rdtgrp;
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ u32 rmid_p, closid_p;
+ unsigned long i;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use a regular
+ * variable to ensure we always use a valid pointer, but the cost
+ * is that this variable will enter the cache through evicting the
+ * memory we are trying to lock into the cache. Thus expect lower
+ * pseudo-locking success rate when KASAN is active.
+ */
+ unsigned int line_size;
+ unsigned int size;
+ void *mem_r;
+#else
+ register unsigned int line_size asm("esi");
+ register unsigned int size asm("edi");
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ /*
+ * Make sure none of the allocated memory is cached. If it is we
+ * will get a cache hit in below loop from outside of pseudo-locked
+ * region.
+ * wbinvd (as opposed to clflush/clflushopt) is required to
+ * increase likelihood that allocated cache portion will be filled
+ * with associated memory.
+ */
+ native_wbinvd();
+
+ /*
+ * Always called with interrupts enabled. By disabling interrupts
+ * ensure that we will not be preempted during this critical section.
+ */
+ local_irq_disable();
+
+ /*
+ * Call wrmsr and rdmsr as directly as possible to avoid tracing
+ * clobbering local register variables or affecting cache accesses.
+ *
+ * Disable the hardware prefetcher so that when the end of the memory
+ * being pseudo-locked is reached the hardware will not read beyond
+ * the buffer and evict pseudo-locked memory read earlier from the
+ * cache.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ closid_p = this_cpu_read(pqr_state.cur_closid);
+ rmid_p = this_cpu_read(pqr_state.cur_rmid);
+ mem_r = plr->kmem;
+ size = plr->size;
+ line_size = plr->line_size;
+ /*
+ * Critical section begin: start by writing the closid associated
+ * with the capacity bitmask of the cache region being
+ * pseudo-locked followed by reading of kernel memory to load it
+ * into the cache.
+ */
+ __wrmsr(IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
+ /*
+ * Cache was flushed earlier. Now access kernel memory to read it
+ * into cache region associated with just activated plr->closid.
+ * Loop over data twice:
+ * - In first loop the cache region is shared with the page walker
+ * as it populates the paging structure caches (including TLB).
+ * - In the second loop the paging structure caches are used and
+ * cache region is populated with the memory being referenced.
+ */
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ /*
+ * Add a barrier to prevent speculative execution of this
+ * loop reading beyond the end of the buffer.
+ */
+ rmb();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ for (i = 0; i < size; i += line_size) {
+ /*
+ * Add a barrier to prevent speculative execution of this
+ * loop reading beyond the end of the buffer.
+ */
+ rmb();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ /*
+ * Critical section end: restore closid with capacity bitmask that
+ * does not overlap with pseudo-locked region.
+ */
+ __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
+
+ /* Re-enable the hardware prefetcher(s) */
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+/**
+ * rdtgroup_monitor_in_progress - Test if monitoring in progress
+ * @r: resource group being queried
+ *
+ * Return: 1 if monitor groups have been created for this resource
+ * group, 0 otherwise.
+ */
+static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
+{
+ return !list_empty(&rdtgrp->mon.crdtgrp_list);
+}
+
+/**
+ * rdtgroup_locksetup_user_restrict - Restrict user access to group
+ * @rdtgrp: resource group needing access restricted
+ *
+ * A resource group used for cache pseudo-locking cannot have cpus or tasks
+ * assigned to it. This is communicated to the user by restricting access
+ * to all the files that can be used to make such changes.
+ *
+ * Permissions restored with rdtgroup_locksetup_user_restore()
+ *
+ * Return: 0 on success, <0 on failure. If a failure occurs during the
+ * restriction of access an attempt will be made to restore permissions but
+ * the state of the mode of these files will be uncertain when a failure
+ * occurs.
+ */
+static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
+ if (ret)
+ return ret;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
+ if (ret)
+ goto err_tasks;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
+ if (ret)
+ goto err_cpus;
+
+ if (rdt_mon_capable) {
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
+ if (ret)
+ goto err_cpus_list;
+ }
+
+ ret = 0;
+ goto out;
+
+err_cpus_list:
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
+err_cpus:
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
+err_tasks:
+ rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_user_restore - Restore user access to group
+ * @rdtgrp: resource group needing access restored
+ *
+ * Restore all file access previously removed using
+ * rdtgroup_locksetup_user_restrict()
+ *
+ * Return: 0 on success, <0 on failure. If a failure occurs during the
+ * restoration of access an attempt will be made to restrict permissions
+ * again but the state of the mode of these files will be uncertain when
+ * a failure occurs.
+ */
+static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
+ if (ret)
+ return ret;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
+ if (ret)
+ goto err_tasks;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
+ if (ret)
+ goto err_cpus;
+
+ if (rdt_mon_capable) {
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
+ if (ret)
+ goto err_cpus_list;
+ }
+
+ ret = 0;
+ goto out;
+
+err_cpus_list:
+ rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
+err_cpus:
+ rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
+err_tasks:
+ rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_enter - Resource group enters locksetup mode
+ * @rdtgrp: resource group requested to enter locksetup mode
+ *
+ * A resource group enters locksetup mode to reflect that it would be used
+ * to represent a pseudo-locked region and is in the process of being set
+ * up to do so. A resource group used for a pseudo-locked region would
+ * lose the closid associated with it so we cannot allow it to have any
+ * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
+ * future. Monitoring of a pseudo-locked region is not allowed either.
+ *
+ * The above and more restrictions on a pseudo-locked region are checked
+ * for and enforced before the resource group enters the locksetup mode.
+ *
+ * Returns: 0 if the resource group successfully entered locksetup mode, <0
+ * on failure. On failure the last_cmd_status buffer is updated with text to
+ * communicate details of failure to the user.
+ */
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ /*
+ * The default resource group can neither be removed nor lose the
+ * default closid associated with it.
+ */
+ if (rdtgrp == &rdtgroup_default) {
+ rdt_last_cmd_puts("cannot pseudo-lock default group\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cache Pseudo-locking not supported when CDP is enabled.
+ *
+ * Some things to consider if you would like to enable this
+ * support (using L3 CDP as example):
+ * - When CDP is enabled two separate resources are exposed,
+ * L3DATA and L3CODE, but they are actually on the same cache.
+ * The implication for pseudo-locking is that if a
+ * pseudo-locked region is created on a domain of one
+ * resource (eg. L3CODE), then a pseudo-locked region cannot
+ * be created on that same domain of the other resource
+ * (eg. L3DATA). This is because the creation of a
+ * pseudo-locked region involves a call to wbinvd that will
+ * affect all cache allocations on particular domain.
+ * - Considering the previous, it may be possible to only
+ * expose one of the CDP resources to pseudo-locking and
+ * hide the other. For example, we could consider to only
+ * expose L3DATA and since the L3 cache is unified it is
+ * still possible to place instructions there are execute it.
+ * - If only one region is exposed to pseudo-locking we should
+ * still keep in mind that availability of a portion of cache
+ * for pseudo-locking should take into account both resources.
+ * Similarly, if a pseudo-locked region is created in one
+ * resource, the portion of cache used by it should be made
+ * unavailable to all future allocations from both resources.
+ */
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
+ rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
+ rdt_last_cmd_puts("CDP enabled\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Not knowing the bits to disable prefetching implies that this
+ * platform does not support Cache Pseudo-Locking.
+ */
+ prefetch_disable_bits = get_prefetch_disable_bits();
+ if (prefetch_disable_bits == 0) {
+ rdt_last_cmd_puts("pseudo-locking not supported\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_monitor_in_progress(rdtgrp)) {
+ rdt_last_cmd_puts("monitoring in progress\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_tasks_assigned(rdtgrp)) {
+ rdt_last_cmd_puts("tasks assigned to resource group\n");
+ return -EINVAL;
+ }
+
+ if (!cpumask_empty(&rdtgrp->cpu_mask)) {
+ rdt_last_cmd_puts("CPUs assigned to resource group\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
+ rdt_last_cmd_puts("unable to modify resctrl permissions\n");
+ return -EIO;
+ }
+
+ ret = pseudo_lock_init(rdtgrp);
+ if (ret) {
+ rdt_last_cmd_puts("unable to init pseudo-lock region\n");
+ goto out_release;
+ }
+
+ /*
+ * If this system is capable of monitoring a rmid would have been
+ * allocated when the control group was created. This is not needed
+ * anymore when this group would be used for pseudo-locking. This
+ * is safe to call on platforms not capable of monitoring.
+ */
+ free_rmid(rdtgrp->mon.rmid);
+
+ ret = 0;
+ goto out;
+
+out_release:
+ rdtgroup_locksetup_user_restore(rdtgrp);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_exit - resource group exist locksetup mode
+ * @rdtgrp: resource group
+ *
+ * When a resource group exits locksetup mode the earlier restrictions are
+ * lifted.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ if (rdt_mon_capable) {
+ ret = alloc_rmid();
+ if (ret < 0) {
+ rdt_last_cmd_puts("out of RMIDs\n");
+ return ret;
+ }
+ rdtgrp->mon.rmid = ret;
+ }
+
+ ret = rdtgroup_locksetup_user_restore(rdtgrp);
+ if (ret) {
+ free_rmid(rdtgrp->mon.rmid);
+ return ret;
+ }
+
+ pseudo_lock_free(rdtgrp);
+ return 0;
+}
+
+/**
+ * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
+ * @d: RDT domain
+ * @_cbm: CBM to test
+ *
+ * @d represents a cache instance and @_cbm a capacity bitmask that is
+ * considered for it. Determine if @_cbm overlaps with any existing
+ * pseudo-locked region on @d.
+ *
+ * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * otherwise.
+ */
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+{
+ unsigned long *cbm = (unsigned long *)&_cbm;
+ unsigned long *cbm_b;
+ unsigned int cbm_len;
+
+ if (d->plr) {
+ cbm_len = d->plr->r->cache.cbm_len;
+ cbm_b = (unsigned long *)&d->plr->cbm;
+ if (bitmap_intersects(cbm, cbm_b, cbm_len))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
+ * @d: RDT domain under test
+ *
+ * The setup of a pseudo-locked region affects all cache instances within
+ * the hierarchy of the region. It is thus essential to know if any
+ * pseudo-locked regions exist within a cache hierarchy to prevent any
+ * attempts to create new pseudo-locked regions in the same hierarchy.
+ *
+ * Return: true if a pseudo-locked region exists in the hierarchy of @d or
+ * if it is not possible to test due to memory allocation issue,
+ * false otherwise.
+ */
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
+{
+ cpumask_var_t cpu_with_psl;
+ struct rdt_resource *r;
+ struct rdt_domain *d_i;
+ bool ret = false;
+
+ if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
+ return true;
+
+ /*
+ * First determine which cpus have pseudo-locked regions
+ * associated with them.
+ */
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d_i, &r->domains, list) {
+ if (d_i->plr)
+ cpumask_or(cpu_with_psl, cpu_with_psl,
+ &d_i->cpu_mask);
+ }
+ }
+
+ /*
+ * Next test if new pseudo-locked region would intersect with
+ * existing region.
+ */
+ if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
+ ret = true;
+
+ free_cpumask_var(cpu_with_psl);
+ return ret;
+}
+
+/**
+ * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
+ * @_plr: pseudo-lock region to measure
+ *
+ * There is no deterministic way to test if a memory region is cached. One
+ * way is to measure how long it takes to read the memory, the speed of
+ * access is a good way to learn how close to the cpu the data was. Even
+ * more, if the prefetcher is disabled and the memory is read at a stride
+ * of half the cache line, then a cache miss will be easy to spot since the
+ * read of the first half would be significantly slower than the read of
+ * the second half.
+ *
+ * Return: 0. Waiter on waitqueue will be woken on completion.
+ */
+static int measure_cycles_lat_fn(void *_plr)
+{
+ struct pseudo_lock_region *plr = _plr;
+ unsigned long i;
+ u64 start, end;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use a regular
+ * variable to ensure we always use a valid pointer to access memory.
+ * The cost is that accessing this pointer, which could be in
+ * cache, will be included in the measurement of memory read latency.
+ */
+ void *mem_r;
+#else
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ local_irq_disable();
+ /*
+ * The wrmsr call may be reordered with the assignment below it.
+ * Call wrmsr as directly as possible to avoid tracing clobbering
+ * local register variable used for memory pointer.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ mem_r = plr->kmem;
+ /*
+ * Dummy execute of the time measurement to load the needed
+ * instructions into the L1 instruction cache.
+ */
+ start = rdtsc_ordered();
+ for (i = 0; i < plr->size; i += 32) {
+ start = rdtsc_ordered();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ end = rdtsc_ordered();
+ trace_pseudo_lock_mem_latency((u32)(end - start));
+ }
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+static int measure_cycles_perf_fn(void *_plr)
+{
+ unsigned long long l3_hits = 0, l3_miss = 0;
+ u64 l3_hit_bits = 0, l3_miss_bits = 0;
+ struct pseudo_lock_region *plr = _plr;
+ unsigned long long l2_hits, l2_miss;
+ u64 l2_hit_bits, l2_miss_bits;
+ unsigned long i;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use regular variables
+ * at the cost of including cache access latency to these variables
+ * in the measurements.
+ */
+ unsigned int line_size;
+ unsigned int size;
+ void *mem_r;
+#else
+ register unsigned int line_size asm("esi");
+ register unsigned int size asm("edi");
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ /*
+ * Non-architectural event for the Goldmont Microarchitecture
+ * from Intel x86 Architecture Software Developer Manual (SDM):
+ * MEM_LOAD_UOPS_RETIRED D1H (event number)
+ * Umask values:
+ * L1_HIT 01H
+ * L2_HIT 02H
+ * L1_MISS 08H
+ * L2_MISS 10H
+ *
+ * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
+ * has two "no fix" errata associated with it: BDM35 and BDM100. On
+ * this platform we use the following events instead:
+ * L2_RQSTS 24H (Documented in https://download.01.org/perfmon/BDW/)
+ * REFERENCES FFH
+ * MISS 3FH
+ * LONGEST_LAT_CACHE 2EH (Documented in SDM)
+ * REFERENCE 4FH
+ * MISS 41H
+ */
+
+ /*
+ * Start by setting flags for IA32_PERFEVTSELx:
+ * OS (Operating system mode) 0x2
+ * INT (APIC interrupt enable) 0x10
+ * EN (Enable counter) 0x40
+ *
+ * Then add the Umask value and event number to select performance
+ * event.
+ */
+
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ l2_hit_bits = (0x52ULL << 16) | (0x2 << 8) | 0xd1;
+ l2_miss_bits = (0x52ULL << 16) | (0x10 << 8) | 0xd1;
+ break;
+ case INTEL_FAM6_BROADWELL_X:
+ /* On BDW the l2_hit_bits count references, not hits */
+ l2_hit_bits = (0x52ULL << 16) | (0xff << 8) | 0x24;
+ l2_miss_bits = (0x52ULL << 16) | (0x3f << 8) | 0x24;
+ /* On BDW the l3_hit_bits count references, not hits */
+ l3_hit_bits = (0x52ULL << 16) | (0x4f << 8) | 0x2e;
+ l3_miss_bits = (0x52ULL << 16) | (0x41 << 8) | 0x2e;
+ break;
+ default:
+ goto out;
+ }
+
+ local_irq_disable();
+ /*
+ * Call wrmsr direcly to avoid the local register variables from
+ * being overwritten due to reordering of their assignment with
+ * the wrmsr calls.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ /* Disable events and reset counters */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 1, 0x0);
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 2, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 3, 0x0);
+ }
+ /* Set and enable the L2 counters */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, l2_hit_bits);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, l2_miss_bits);
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
+ l3_hit_bits);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+ l3_miss_bits);
+ }
+ mem_r = plr->kmem;
+ size = plr->size;
+ line_size = plr->line_size;
+ for (i = 0; i < size; i += line_size) {
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ /*
+ * Call wrmsr directly (no tracing) to not influence
+ * the cache access counters as they are disabled.
+ */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0,
+ l2_hit_bits & ~(0x40ULL << 16));
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1,
+ l2_miss_bits & ~(0x40ULL << 16));
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
+ l3_hit_bits & ~(0x40ULL << 16));
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+ l3_miss_bits & ~(0x40ULL << 16));
+ }
+ l2_hits = native_read_pmc(0);
+ l2_miss = native_read_pmc(1);
+ if (l3_hit_bits > 0) {
+ l3_hits = native_read_pmc(2);
+ l3_miss = native_read_pmc(3);
+ }
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+ /*
+ * On BDW we count references and misses, need to adjust. Sometimes
+ * the "hits" counter is a bit more than the references, for
+ * example, x references but x + 1 hits. To not report invalid
+ * hit values in this case we treat that as misses eaqual to
+ * references.
+ */
+ if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
+ l2_hits -= (l2_miss > l2_hits ? l2_hits : l2_miss);
+ trace_pseudo_lock_l2(l2_hits, l2_miss);
+ if (l3_hit_bits > 0) {
+ if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
+ l3_hits -= (l3_miss > l3_hits ? l3_hits : l3_miss);
+ trace_pseudo_lock_l3(l3_hits, l3_miss);
+ }
+
+out:
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+/**
+ * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
+ *
+ * The measurement of latency to access a pseudo-locked region should be
+ * done from a cpu that is associated with that pseudo-locked region.
+ * Determine which cpu is associated with this region and start a thread on
+ * that cpu to perform the measurement, wait for that thread to complete.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ struct task_struct *thread;
+ unsigned int cpu;
+ int ret = -1;
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ if (rdtgrp->flags & RDT_DELETED) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ plr->thread_done = 0;
+ cpu = cpumask_first(&plr->d->cpu_mask);
+ if (!cpu_online(cpu)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (sel == 1)
+ thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
+ cpu_to_node(cpu),
+ "pseudo_lock_measure/%u",
+ cpu);
+ else if (sel == 2)
+ thread = kthread_create_on_node(measure_cycles_perf_fn, plr,
+ cpu_to_node(cpu),
+ "pseudo_lock_measure/%u",
+ cpu);
+ else
+ goto out;
+
+ if (IS_ERR(thread)) {
+ ret = PTR_ERR(thread);
+ goto out;
+ }
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+
+ ret = wait_event_interruptible(plr->lock_thread_wq,
+ plr->thread_done == 1);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+
+out:
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+ return ret;
+}
+
+static ssize_t pseudo_lock_measure_trigger(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rdtgroup *rdtgrp = file->private_data;
+ size_t buf_size;
+ char buf[32];
+ int ret;
+ int sel;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ ret = kstrtoint(buf, 10, &sel);
+ if (ret == 0) {
+ if (sel != 1)
+ return -EINVAL;
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (ret)
+ return ret;
+ ret = pseudo_lock_measure_cycles(rdtgrp, sel);
+ if (ret == 0)
+ ret = count;
+ debugfs_file_put(file->f_path.dentry);
+ }
+
+ return ret;
+}
+
+static const struct file_operations pseudo_measure_fops = {
+ .write = pseudo_lock_measure_trigger,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+/**
+ * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
+ * @rdtgrp: resource group to which pseudo-lock region belongs
+ *
+ * Called when a resource group in the pseudo-locksetup mode receives a
+ * valid schemata that should be pseudo-locked. Since the resource group is
+ * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
+ * allocated and initialized with the essential information. If a failure
+ * occurs the resource group remains in the pseudo-locksetup mode with the
+ * &struct pseudo_lock_region associated with it, but cleared from all
+ * information and ready for the user to re-attempt pseudo-locking by
+ * writing the schemata again.
+ *
+ * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
+ * on failure. Descriptive error will be written to last_cmd_status buffer.
+ */
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ struct task_struct *thread;
+ unsigned int new_minor;
+ struct device *dev;
+ int ret;
+
+ ret = pseudo_lock_region_alloc(plr);
+ if (ret < 0)
+ return ret;
+
+ ret = pseudo_lock_cstates_constrain(plr);
+ if (ret < 0) {
+ ret = -EINVAL;
+ goto out_region;
+ }
+
+ plr->thread_done = 0;
+
+ thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp,
+ cpu_to_node(plr->cpu),
+ "pseudo_lock/%u", plr->cpu);
+ if (IS_ERR(thread)) {
+ ret = PTR_ERR(thread);
+ rdt_last_cmd_printf("locking thread returned error %d\n", ret);
+ goto out_cstates;
+ }
+
+ kthread_bind(thread, plr->cpu);
+ wake_up_process(thread);
+
+ ret = wait_event_interruptible(plr->lock_thread_wq,
+ plr->thread_done == 1);
+ if (ret < 0) {
+ /*
+ * If the thread does not get on the CPU for whatever
+ * reason and the process which sets up the region is
+ * interrupted then this will leave the thread in runnable
+ * state and once it gets on the CPU it will derefence
+ * the cleared, but not freed, plr struct resulting in an
+ * empty pseudo-locking loop.
+ */
+ rdt_last_cmd_puts("locking thread interrupted\n");
+ goto out_cstates;
+ }
+
+ ret = pseudo_lock_minor_get(&new_minor);
+ if (ret < 0) {
+ rdt_last_cmd_puts("unable to obtain a new minor number\n");
+ goto out_cstates;
+ }
+
+ /*
+ * Unlock access but do not release the reference. The
+ * pseudo-locked region will still be here on return.
+ *
+ * The mutex has to be released temporarily to avoid a potential
+ * deadlock with the mm->mmap_sem semaphore which is obtained in
+ * the device_create() and debugfs_create_dir() callpath below
+ * as well as before the mmap() callback is called.
+ */
+ mutex_unlock(&rdtgroup_mutex);
+
+ if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
+ plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
+ debugfs_resctrl);
+ if (!IS_ERR_OR_NULL(plr->debugfs_dir))
+ debugfs_create_file("pseudo_lock_measure", 0200,
+ plr->debugfs_dir, rdtgrp,
+ &pseudo_measure_fops);
+ }
+
+ dev = device_create(pseudo_lock_class, NULL,
+ MKDEV(pseudo_lock_major, new_minor),
+ rdtgrp, "%s", rdtgrp->kn->name);
+
+ mutex_lock(&rdtgroup_mutex);
+
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ rdt_last_cmd_printf("failed to create character device: %d\n",
+ ret);
+ goto out_debugfs;
+ }
+
+ /* We released the mutex - check if group was removed while we did so */
+ if (rdtgrp->flags & RDT_DELETED) {
+ ret = -ENODEV;
+ goto out_device;
+ }
+
+ plr->minor = new_minor;
+
+ rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
+ closid_free(rdtgrp->closid);
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
+
+ ret = 0;
+ goto out;
+
+out_device:
+ device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
+out_debugfs:
+ debugfs_remove_recursive(plr->debugfs_dir);
+ pseudo_lock_minor_release(new_minor);
+out_cstates:
+ pseudo_lock_cstates_relax(plr);
+out_region:
+ pseudo_lock_region_clear(plr);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
+ * @rdtgrp: resource group to which the pseudo-locked region belongs
+ *
+ * The removal of a pseudo-locked region can be initiated when the resource
+ * group is removed from user space via a "rmdir" from userspace or the
+ * unmount of the resctrl filesystem. On removal the resource group does
+ * not go back to pseudo-locksetup mode before it is removed, instead it is
+ * removed directly. There is thus assymmetry with the creation where the
+ * &struct pseudo_lock_region is removed here while it was not created in
+ * rdtgroup_pseudo_lock_create().
+ *
+ * Return: void
+ */
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * Default group cannot be a pseudo-locked region so we can
+ * free closid here.
+ */
+ closid_free(rdtgrp->closid);
+ goto free;
+ }
+
+ pseudo_lock_cstates_relax(plr);
+ debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
+ device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
+ pseudo_lock_minor_release(plr->minor);
+
+free:
+ pseudo_lock_free(rdtgrp);
+}
+
+static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
+{
+ struct rdtgroup *rdtgrp;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ rdtgrp = region_find_by_minor(iminor(inode));
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+
+ filp->private_data = rdtgrp;
+ atomic_inc(&rdtgrp->waitcount);
+ /* Perform a non-seekable open - llseek is not supported */
+ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+
+ mutex_unlock(&rdtgroup_mutex);
+
+ return 0;
+}
+
+static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
+{
+ struct rdtgroup *rdtgrp;
+
+ mutex_lock(&rdtgroup_mutex);
+ rdtgrp = filp->private_data;
+ WARN_ON(!rdtgrp);
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+ filp->private_data = NULL;
+ atomic_dec(&rdtgrp->waitcount);
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
+static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
+{
+ /* Not supported */
+ return -EINVAL;
+}
+
+static const struct vm_operations_struct pseudo_mmap_ops = {
+ .mremap = pseudo_lock_dev_mremap,
+};
+
+static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ struct pseudo_lock_region *plr;
+ struct rdtgroup *rdtgrp;
+ unsigned long physical;
+ unsigned long psize;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ rdtgrp = filp->private_data;
+ WARN_ON(!rdtgrp);
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+
+ plr = rdtgrp->plr;
+
+ /*
+ * Task is required to run with affinity to the cpus associated
+ * with the pseudo-locked region. If this is not the case the task
+ * may be scheduled elsewhere and invalidate entries in the
+ * pseudo-locked region.
+ */
+ if (!cpumask_subset(&current->cpus_allowed, &plr->d->cpu_mask)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EINVAL;
+ }
+
+ physical = __pa(plr->kmem) >> PAGE_SHIFT;
+ psize = plr->size - off;
+
+ if (off > plr->size) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENOSPC;
+ }
+
+ /*
+ * Ensure changes are carried directly to the memory being mapped,
+ * do not allow copy-on-write mapping.
+ */
+ if (!(vma->vm_flags & VM_SHARED)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EINVAL;
+ }
+
+ if (vsize > psize) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENOSPC;
+ }
+
+ memset(plr->kmem + off, 0, vsize);
+
+ if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
+ vsize, vma->vm_page_prot)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EAGAIN;
+ }
+ vma->vm_ops = &pseudo_mmap_ops;
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
+static const struct file_operations pseudo_lock_dev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = NULL,
+ .write = NULL,
+ .open = pseudo_lock_dev_open,
+ .release = pseudo_lock_dev_release,
+ .mmap = pseudo_lock_dev_mmap,
+};
+
+static char *pseudo_lock_devnode(struct device *dev, umode_t *mode)
+{
+ struct rdtgroup *rdtgrp;
+
+ rdtgrp = dev_get_drvdata(dev);
+ if (mode)
+ *mode = 0600;
+ return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
+}
+
+int rdt_pseudo_lock_init(void)
+{
+ int ret;
+
+ ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
+ if (ret < 0)
+ return ret;
+
+ pseudo_lock_major = ret;
+
+ pseudo_lock_class = class_create(THIS_MODULE, "pseudo_lock");
+ if (IS_ERR(pseudo_lock_class)) {
+ ret = PTR_ERR(pseudo_lock_class);
+ unregister_chrdev(pseudo_lock_major, "pseudo_lock");
+ return ret;
+ }
+
+ pseudo_lock_class->devnode = pseudo_lock_devnode;
+ return 0;
+}
+
+void rdt_pseudo_lock_release(void)
+{
+ class_destroy(pseudo_lock_class);
+ pseudo_lock_class = NULL;
+ unregister_chrdev(pseudo_lock_major, "pseudo_lock");
+ pseudo_lock_major = 0;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h
new file mode 100644
index 000000000000..2c041e6d9f05
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM resctrl
+
+#if !defined(_TRACE_PSEUDO_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PSEUDO_LOCK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(pseudo_lock_mem_latency,
+ TP_PROTO(u32 latency),
+ TP_ARGS(latency),
+ TP_STRUCT__entry(__field(u32, latency)),
+ TP_fast_assign(__entry->latency = latency),
+ TP_printk("latency=%u", __entry->latency)
+ );
+
+TRACE_EVENT(pseudo_lock_l2,
+ TP_PROTO(u64 l2_hits, u64 l2_miss),
+ TP_ARGS(l2_hits, l2_miss),
+ TP_STRUCT__entry(__field(u64, l2_hits)
+ __field(u64, l2_miss)),
+ TP_fast_assign(__entry->l2_hits = l2_hits;
+ __entry->l2_miss = l2_miss;),
+ TP_printk("hits=%llu miss=%llu",
+ __entry->l2_hits, __entry->l2_miss));
+
+TRACE_EVENT(pseudo_lock_l3,
+ TP_PROTO(u64 l3_hits, u64 l3_miss),
+ TP_ARGS(l3_hits, l3_miss),
+ TP_STRUCT__entry(__field(u64, l3_hits)
+ __field(u64, l3_miss)),
+ TP_fast_assign(__entry->l3_hits = l3_hits;
+ __entry->l3_miss = l3_miss;),
+ TP_printk("hits=%llu miss=%llu",
+ __entry->l3_hits, __entry->l3_miss));
+
+#endif /* _TRACE_PSEUDO_LOCK_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event
+#include <trace/define_trace.h>
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 749856a2e736..d6d7ea7349d0 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -20,7 +20,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cacheinfo.h>
#include <linux/cpu.h>
+#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kernfs.h>
@@ -55,6 +57,8 @@ static struct kernfs_node *kn_mondata;
static struct seq_buf last_cmd_status;
static char last_cmd_status_buf[512];
+struct dentry *debugfs_resctrl;
+
void rdt_last_cmd_clear(void)
{
lockdep_assert_held(&rdtgroup_mutex);
@@ -121,11 +125,65 @@ static int closid_alloc(void)
return closid;
}
-static void closid_free(int closid)
+void closid_free(int closid)
{
closid_free_map |= 1 << closid;
}
+/**
+ * closid_allocated - test if provided closid is in use
+ * @closid: closid to be tested
+ *
+ * Return: true if @closid is currently associated with a resource group,
+ * false if @closid is free
+ */
+static bool closid_allocated(unsigned int closid)
+{
+ return (closid_free_map & (1 << closid)) == 0;
+}
+
+/**
+ * rdtgroup_mode_by_closid - Return mode of resource group with closid
+ * @closid: closid if the resource group
+ *
+ * Each resource group is associated with a @closid. Here the mode
+ * of a resource group can be queried by searching for it using its closid.
+ *
+ * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
+ */
+enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
+{
+ struct rdtgroup *rdtgrp;
+
+ list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+ if (rdtgrp->closid == closid)
+ return rdtgrp->mode;
+ }
+
+ return RDT_NUM_MODES;
+}
+
+static const char * const rdt_mode_str[] = {
+ [RDT_MODE_SHAREABLE] = "shareable",
+ [RDT_MODE_EXCLUSIVE] = "exclusive",
+ [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
+ [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
+};
+
+/**
+ * rdtgroup_mode_str - Return the string representation of mode
+ * @mode: the resource group mode as &enum rdtgroup_mode
+ *
+ * Return: string representation of valid mode, "unknown" otherwise
+ */
+static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
+{
+ if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
+ return "unknown";
+
+ return rdt_mode_str[mode];
+}
+
/* set uid and gid of rdtgroup dirs and files to that of the creator */
static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
{
@@ -207,8 +265,12 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
- seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
- cpumask_pr_args(&rdtgrp->cpu_mask));
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
+ cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
+ else
+ seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
+ cpumask_pr_args(&rdtgrp->cpu_mask));
} else {
ret = -ENOENT;
}
@@ -394,6 +456,13 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
goto unlock;
}
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto unlock;
+ }
+
if (is_cpu_list(of))
ret = cpulist_parse(buf, newmask);
else
@@ -509,6 +578,32 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
return ret;
}
+/**
+ * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
+ * @r: Resource group
+ *
+ * Return: 1 if tasks have been assigned to @r, 0 otherwise
+ */
+int rdtgroup_tasks_assigned(struct rdtgroup *r)
+{
+ struct task_struct *p, *t;
+ int ret = 0;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ rcu_read_lock();
+ for_each_process_thread(p, t) {
+ if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
+ (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
+ ret = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int rdtgroup_task_write_permission(struct task_struct *task,
struct kernfs_open_file *of)
{
@@ -570,13 +665,22 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
rdt_last_cmd_clear();
- if (rdtgrp)
- ret = rdtgroup_move_task(pid, rdtgrp, of);
- else
- ret = -ENOENT;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto unlock;
+ }
+ ret = rdtgroup_move_task(pid, rdtgrp, of);
+
+unlock:
rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
@@ -662,6 +766,94 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
return 0;
}
+/**
+ * rdt_bit_usage_show - Display current usage of resources
+ *
+ * A domain is a shared resource that can now be allocated differently. Here
+ * we display the current regions of the domain as an annotated bitmask.
+ * For each domain of this resource its allocation bitmask
+ * is annotated as below to indicate the current usage of the corresponding bit:
+ * 0 - currently unused
+ * X - currently available for sharing and used by software and hardware
+ * H - currently used by hardware only but available for software use
+ * S - currently used and shareable by software only
+ * E - currently used exclusively by one resource group
+ * P - currently pseudo-locked by one resource group
+ */
+static int rdt_bit_usage_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+ u32 sw_shareable = 0, hw_shareable = 0;
+ u32 exclusive = 0, pseudo_locked = 0;
+ struct rdt_domain *dom;
+ int i, hwb, swb, excl, psl;
+ enum rdtgrp_mode mode;
+ bool sep = false;
+ u32 *ctrl;
+
+ mutex_lock(&rdtgroup_mutex);
+ hw_shareable = r->cache.shareable_bits;
+ list_for_each_entry(dom, &r->domains, list) {
+ if (sep)
+ seq_putc(seq, ';');
+ ctrl = dom->ctrl_val;
+ sw_shareable = 0;
+ exclusive = 0;
+ seq_printf(seq, "%d=", dom->id);
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ if (!closid_allocated(i))
+ continue;
+ mode = rdtgroup_mode_by_closid(i);
+ switch (mode) {
+ case RDT_MODE_SHAREABLE:
+ sw_shareable |= *ctrl;
+ break;
+ case RDT_MODE_EXCLUSIVE:
+ exclusive |= *ctrl;
+ break;
+ case RDT_MODE_PSEUDO_LOCKSETUP:
+ /*
+ * RDT_MODE_PSEUDO_LOCKSETUP is possible
+ * here but not included since the CBM
+ * associated with this CLOSID in this mode
+ * is not initialized and no task or cpu can be
+ * assigned this CLOSID.
+ */
+ break;
+ case RDT_MODE_PSEUDO_LOCKED:
+ case RDT_NUM_MODES:
+ WARN(1,
+ "invalid mode for closid %d\n", i);
+ break;
+ }
+ }
+ for (i = r->cache.cbm_len - 1; i >= 0; i--) {
+ pseudo_locked = dom->plr ? dom->plr->cbm : 0;
+ hwb = test_bit(i, (unsigned long *)&hw_shareable);
+ swb = test_bit(i, (unsigned long *)&sw_shareable);
+ excl = test_bit(i, (unsigned long *)&exclusive);
+ psl = test_bit(i, (unsigned long *)&pseudo_locked);
+ if (hwb && swb)
+ seq_putc(seq, 'X');
+ else if (hwb && !swb)
+ seq_putc(seq, 'H');
+ else if (!hwb && swb)
+ seq_putc(seq, 'S');
+ else if (excl)
+ seq_putc(seq, 'E');
+ else if (psl)
+ seq_putc(seq, 'P');
+ else /* Unused bits remain */
+ seq_putc(seq, '0');
+ }
+ sep = true;
+ }
+ seq_putc(seq, '\n');
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
static int rdt_min_bw_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
@@ -740,6 +932,269 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
return nbytes;
}
+/*
+ * rdtgroup_mode_show - Display mode of this resource group
+ */
+static int rdtgroup_mode_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
+
+ rdtgroup_kn_unlock(of->kn);
+ return 0;
+}
+
+/**
+ * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
+ * @r: Resource to which domain instance @d belongs.
+ * @d: The domain instance for which @closid is being tested.
+ * @cbm: Capacity bitmask being tested.
+ * @closid: Intended closid for @cbm.
+ * @exclusive: Only check if overlaps with exclusive resource groups
+ *
+ * Checks if provided @cbm intended to be used for @closid on domain
+ * @d overlaps with any other closids or other hardware usage associated
+ * with this domain. If @exclusive is true then only overlaps with
+ * resource groups in exclusive mode will be considered. If @exclusive
+ * is false then overlaps with any resource group or hardware entities
+ * will be considered.
+ *
+ * Return: false if CBM does not overlap, true if it does.
+ */
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ u32 _cbm, int closid, bool exclusive)
+{
+ unsigned long *cbm = (unsigned long *)&_cbm;
+ unsigned long *ctrl_b;
+ enum rdtgrp_mode mode;
+ u32 *ctrl;
+ int i;
+
+ /* Check for any overlap with regions used by hardware directly */
+ if (!exclusive) {
+ if (bitmap_intersects(cbm,
+ (unsigned long *)&r->cache.shareable_bits,
+ r->cache.cbm_len))
+ return true;
+ }
+
+ /* Check for overlap with other resource groups */
+ ctrl = d->ctrl_val;
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ ctrl_b = (unsigned long *)ctrl;
+ mode = rdtgroup_mode_by_closid(i);
+ if (closid_allocated(i) && i != closid &&
+ mode != RDT_MODE_PSEUDO_LOCKSETUP) {
+ if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+ if (exclusive) {
+ if (mode == RDT_MODE_EXCLUSIVE)
+ return true;
+ continue;
+ }
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
+ *
+ * An exclusive resource group implies that there should be no sharing of
+ * its allocated resources. At the time this group is considered to be
+ * exclusive this test can determine if its current schemata supports this
+ * setting by testing for overlap with all other resource groups.
+ *
+ * Return: true if resource group can be exclusive, false if there is overlap
+ * with allocations of other resource groups and thus this resource group
+ * cannot be exclusive.
+ */
+static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
+{
+ int closid = rdtgrp->closid;
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d, &r->domains, list) {
+ if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
+ rdtgrp->closid, false))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * rdtgroup_mode_write - Modify the resource group's mode
+ *
+ */
+static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdtgroup *rdtgrp;
+ enum rdtgrp_mode mode;
+ int ret = 0;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+ buf[nbytes - 1] = '\0';
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ rdt_last_cmd_clear();
+
+ mode = rdtgrp->mode;
+
+ if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
+ (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
+ (!strcmp(buf, "pseudo-locksetup") &&
+ mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
+ (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
+ goto out;
+
+ if (mode == RDT_MODE_PSEUDO_LOCKED) {
+ rdt_last_cmd_printf("cannot change pseudo-locked group\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!strcmp(buf, "shareable")) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = rdtgroup_locksetup_exit(rdtgrp);
+ if (ret)
+ goto out;
+ }
+ rdtgrp->mode = RDT_MODE_SHAREABLE;
+ } else if (!strcmp(buf, "exclusive")) {
+ if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
+ rdt_last_cmd_printf("schemata overlaps\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = rdtgroup_locksetup_exit(rdtgrp);
+ if (ret)
+ goto out;
+ }
+ rdtgrp->mode = RDT_MODE_EXCLUSIVE;
+ } else if (!strcmp(buf, "pseudo-locksetup")) {
+ ret = rdtgroup_locksetup_enter(rdtgrp);
+ if (ret)
+ goto out;
+ rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
+ } else {
+ rdt_last_cmd_printf("unknown/unsupported mode\n");
+ ret = -EINVAL;
+ }
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+ return ret ?: nbytes;
+}
+
+/**
+ * rdtgroup_cbm_to_size - Translate CBM to size in bytes
+ * @r: RDT resource to which @d belongs.
+ * @d: RDT domain instance.
+ * @cbm: bitmask for which the size should be computed.
+ *
+ * The bitmask provided associated with the RDT domain instance @d will be
+ * translated into how many bytes it represents. The size in bytes is
+ * computed by first dividing the total cache size by the CBM length to
+ * determine how many bytes each bit in the bitmask represents. The result
+ * is multiplied with the number of bits set in the bitmask.
+ */
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
+ struct rdt_domain *d, u32 cbm)
+{
+ struct cpu_cacheinfo *ci;
+ unsigned int size = 0;
+ int num_b, i;
+
+ num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+ ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == r->cache_level) {
+ size = ci->info_list[i].size / r->cache.cbm_len * num_b;
+ break;
+ }
+ }
+
+ return size;
+}
+
+/**
+ * rdtgroup_size_show - Display size in bytes of allocated regions
+ *
+ * The "size" file mirrors the layout of the "schemata" file, printing the
+ * size in bytes of each region instead of the capacity bitmask.
+ *
+ */
+static int rdtgroup_size_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+ unsigned int size;
+ bool sep = false;
+ u32 cbm;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
+ size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+ rdtgrp->plr->d,
+ rdtgrp->plr->cbm);
+ seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+ goto out;
+ }
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ seq_printf(s, "%*s:", max_name_width, r->name);
+ list_for_each_entry(d, &r->domains, list) {
+ if (sep)
+ seq_putc(s, ';');
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ size = 0;
+ } else {
+ cbm = d->ctrl_val[rdtgrp->closid];
+ size = rdtgroup_cbm_to_size(r, d, cbm);
+ }
+ seq_printf(s, "%d=%u", d->id, size);
+ sep = true;
+ }
+ seq_putc(s, '\n');
+ }
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+
+ return 0;
+}
+
/* rdtgroup information files for one cache resource. */
static struct rftype res_common_files[] = {
{
@@ -792,6 +1247,13 @@ static struct rftype res_common_files[] = {
.fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
},
{
+ .name = "bit_usage",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_bit_usage_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
+ },
+ {
.name = "min_bandwidth",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
@@ -853,6 +1315,22 @@ static struct rftype res_common_files[] = {
.seq_show = rdtgroup_schemata_show,
.fflags = RF_CTRL_BASE,
},
+ {
+ .name = "mode",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_mode_write,
+ .seq_show = rdtgroup_mode_show,
+ .fflags = RF_CTRL_BASE,
+ },
+ {
+ .name = "size",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdtgroup_size_show,
+ .fflags = RF_CTRL_BASE,
+ },
+
};
static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
@@ -883,6 +1361,103 @@ error:
return ret;
}
+/**
+ * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
+ * @r: The resource group with which the file is associated.
+ * @name: Name of the file
+ *
+ * The permissions of named resctrl file, directory, or link are modified
+ * to not allow read, write, or execute by any user.
+ *
+ * WARNING: This function is intended to communicate to the user that the
+ * resctrl file has been locked down - that it is not relevant to the
+ * particular state the system finds itself in. It should not be relied
+ * on to protect from user access because after the file's permissions
+ * are restricted the user can still change the permissions using chmod
+ * from the command line.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
+{
+ struct iattr iattr = {.ia_valid = ATTR_MODE,};
+ struct kernfs_node *kn;
+ int ret = 0;
+
+ kn = kernfs_find_and_get_ns(r->kn, name, NULL);
+ if (!kn)
+ return -ENOENT;
+
+ switch (kernfs_type(kn)) {
+ case KERNFS_DIR:
+ iattr.ia_mode = S_IFDIR;
+ break;
+ case KERNFS_FILE:
+ iattr.ia_mode = S_IFREG;
+ break;
+ case KERNFS_LINK:
+ iattr.ia_mode = S_IFLNK;
+ break;
+ }
+
+ ret = kernfs_setattr(kn, &iattr);
+ kernfs_put(kn);
+ return ret;
+}
+
+/**
+ * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
+ * @r: The resource group with which the file is associated.
+ * @name: Name of the file
+ * @mask: Mask of permissions that should be restored
+ *
+ * Restore the permissions of the named file. If @name is a directory the
+ * permissions of its parent will be used.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
+ umode_t mask)
+{
+ struct iattr iattr = {.ia_valid = ATTR_MODE,};
+ struct kernfs_node *kn, *parent;
+ struct rftype *rfts, *rft;
+ int ret, len;
+
+ rfts = res_common_files;
+ len = ARRAY_SIZE(res_common_files);
+
+ for (rft = rfts; rft < rfts + len; rft++) {
+ if (!strcmp(rft->name, name))
+ iattr.ia_mode = rft->mode & mask;
+ }
+
+ kn = kernfs_find_and_get_ns(r->kn, name, NULL);
+ if (!kn)
+ return -ENOENT;
+
+ switch (kernfs_type(kn)) {
+ case KERNFS_DIR:
+ parent = kernfs_get_parent(kn);
+ if (parent) {
+ iattr.ia_mode |= parent->mode;
+ kernfs_put(parent);
+ }
+ iattr.ia_mode |= S_IFDIR;
+ break;
+ case KERNFS_FILE:
+ iattr.ia_mode |= S_IFREG;
+ break;
+ case KERNFS_LINK:
+ iattr.ia_mode |= S_IFLNK;
+ break;
+ }
+
+ ret = kernfs_setattr(kn, &iattr);
+ kernfs_put(kn);
+ return ret;
+}
+
static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
unsigned long fflags)
{
@@ -1224,6 +1799,9 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ rdtgroup_pseudo_lock_remove(rdtgrp);
kernfs_unbreak_active_protection(kn);
kernfs_put(rdtgrp->kn);
kfree(rdtgrp);
@@ -1289,10 +1867,16 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
rdtgroup_default.mon.mon_data_kn = kn_mondata;
}
+ ret = rdt_pseudo_lock_init();
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_mondata;
+ }
+
dentry = kernfs_mount(fs_type, flags, rdt_root,
RDTGROUP_SUPER_MAGIC, NULL);
if (IS_ERR(dentry))
- goto out_mondata;
+ goto out_psl;
if (rdt_alloc_capable)
static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
@@ -1310,6 +1894,8 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
goto out;
+out_psl:
+ rdt_pseudo_lock_release();
out_mondata:
if (rdt_mon_capable)
kernfs_remove(kn_mondata);
@@ -1447,6 +2033,10 @@ static void rmdir_all_sub(void)
if (rdtgrp == &rdtgroup_default)
continue;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ rdtgroup_pseudo_lock_remove(rdtgrp);
+
/*
* Give any CPUs back to the default group. We cannot copy
* cpu_online_mask because a CPU might have executed the
@@ -1483,6 +2073,8 @@ static void rdt_kill_sb(struct super_block *sb)
reset_all_ctrls(r);
cdp_disable_all();
rmdir_all_sub();
+ rdt_pseudo_lock_release();
+ rdtgroup_default.mode = RDT_MODE_SHAREABLE;
static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
static_branch_disable_cpuslocked(&rdt_mon_enable_key);
static_branch_disable_cpuslocked(&rdt_enable_key);
@@ -1682,6 +2274,114 @@ out_destroy:
return ret;
}
+/**
+ * cbm_ensure_valid - Enforce validity on provided CBM
+ * @_val: Candidate CBM
+ * @r: RDT resource to which the CBM belongs
+ *
+ * The provided CBM represents all cache portions available for use. This
+ * may be represented by a bitmap that does not consist of contiguous ones
+ * and thus be an invalid CBM.
+ * Here the provided CBM is forced to be a valid CBM by only considering
+ * the first set of contiguous bits as valid and clearing all bits.
+ * The intention here is to provide a valid default CBM with which a new
+ * resource group is initialized. The user can follow this with a
+ * modification to the CBM if the default does not satisfy the
+ * requirements.
+ */
+static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
+{
+ /*
+ * Convert the u32 _val to an unsigned long required by all the bit
+ * operations within this function. No more than 32 bits of this
+ * converted value can be accessed because all bit operations are
+ * additionally provided with cbm_len that is initialized during
+ * hardware enumeration using five bits from the EAX register and
+ * thus never can exceed 32 bits.
+ */
+ unsigned long *val = (unsigned long *)_val;
+ unsigned int cbm_len = r->cache.cbm_len;
+ unsigned long first_bit, zero_bit;
+
+ if (*val == 0)
+ return;
+
+ first_bit = find_first_bit(val, cbm_len);
+ zero_bit = find_next_zero_bit(val, cbm_len, first_bit);
+
+ /* Clear any remaining bits to ensure contiguous region */
+ bitmap_clear(val, zero_bit, cbm_len - zero_bit);
+}
+
+/**
+ * rdtgroup_init_alloc - Initialize the new RDT group's allocations
+ *
+ * A new RDT group is being created on an allocation capable (CAT)
+ * supporting system. Set this group up to start off with all usable
+ * allocations. That is, all shareable and unused bits.
+ *
+ * All-zero CBM is invalid. If there are no more shareable bits available
+ * on any domain then the entire allocation will fail.
+ */
+static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+{
+ u32 used_b = 0, unused_b = 0;
+ u32 closid = rdtgrp->closid;
+ struct rdt_resource *r;
+ enum rdtgrp_mode mode;
+ struct rdt_domain *d;
+ int i, ret;
+ u32 *ctrl;
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d, &r->domains, list) {
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
+ used_b |= *ctrl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl;
+ }
+ }
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ /*
+ * Force the initial CBM to be valid, user can
+ * modify the CBM based on system availability.
+ */
+ cbm_ensure_valid(&d->new_ctrl, r);
+ if (bitmap_weight((unsigned long *) &d->new_ctrl,
+ r->cache.cbm_len) <
+ r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("no space on %s:%d\n",
+ r->name, d->id);
+ return -ENOSPC;
+ }
+ d->have_new_ctrl = true;
+ }
+ }
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ ret = update_domains(r, rdtgrp->closid);
+ if (ret < 0) {
+ rdt_last_cmd_puts("failed to initialize allocations\n");
+ return ret;
+ }
+ rdtgrp->mode = RDT_MODE_SHAREABLE;
+ }
+
+ return 0;
+}
+
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
struct kernfs_node *prgrp_kn,
const char *name, umode_t mode,
@@ -1700,6 +2400,14 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
goto out_unlock;
}
+ if (rtype == RDTMON_GROUP &&
+ (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto out_unlock;
+ }
+
/* allocate the rdtgroup. */
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) {
@@ -1840,6 +2548,10 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
ret = 0;
rdtgrp->closid = closid;
+ ret = rdtgroup_init_alloc(rdtgrp);
+ if (ret < 0)
+ goto out_id_free;
+
list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
if (rdt_mon_capable) {
@@ -1850,15 +2562,16 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
if (ret) {
rdt_last_cmd_puts("kernfs subdir error\n");
- goto out_id_free;
+ goto out_del_list;
}
}
goto out_unlock;
+out_del_list:
+ list_del(&rdtgrp->rdtgroup_list);
out_id_free:
closid_free(closid);
- list_del(&rdtgrp->rdtgroup_list);
out_common_fail:
mkdir_rdt_prepare_clean(rdtgrp);
out_unlock:
@@ -1945,6 +2658,21 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
return 0;
}
+static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
+ struct rdtgroup *rdtgrp)
+{
+ rdtgrp->flags = RDT_DELETED;
+ list_del(&rdtgrp->rdtgroup_list);
+
+ /*
+ * one extra hold on this, will drop when we kfree(rdtgrp)
+ * in rdtgroup_kn_unlock()
+ */
+ kernfs_get(kn);
+ kernfs_remove(rdtgrp->kn);
+ return 0;
+}
+
static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
cpumask_var_t tmpmask)
{
@@ -1970,7 +2698,6 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
update_closid_rmid(tmpmask, NULL);
- rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
free_rmid(rdtgrp->mon.rmid);
@@ -1979,14 +2706,7 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
*/
free_all_child_rdtgrp(rdtgrp);
- list_del(&rdtgrp->rdtgroup_list);
-
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
- kernfs_remove(rdtgrp->kn);
+ rdtgroup_ctrl_remove(kn, rdtgrp);
return 0;
}
@@ -2014,13 +2734,19 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
* If the rdtgroup is a mon group and parent directory
* is a valid "mon_groups" directory, remove the mon group.
*/
- if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
- ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
- else if (rdtgrp->type == RDTMON_GROUP &&
- is_mon_groups(parent_kn, kn->name))
+ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ ret = rdtgroup_ctrl_remove(kn, rdtgrp);
+ } else {
+ ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
+ }
+ } else if (rdtgrp->type == RDTMON_GROUP &&
+ is_mon_groups(parent_kn, kn->name)) {
ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
- else
+ } else {
ret = -EPERM;
+ }
out:
rdtgroup_kn_unlock(kn);
@@ -2046,7 +2772,8 @@ static int __init rdtgroup_setup_root(void)
int ret;
rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
- KERNFS_ROOT_CREATE_DEACTIVATED,
+ KERNFS_ROOT_CREATE_DEACTIVATED |
+ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
&rdtgroup_default);
if (IS_ERR(rdt_root))
return PTR_ERR(rdt_root);
@@ -2102,6 +2829,29 @@ int __init rdtgroup_init(void)
if (ret)
goto cleanup_mountpoint;
+ /*
+ * Adding the resctrl debugfs directory here may not be ideal since
+ * it would let the resctrl debugfs directory appear on the debugfs
+ * filesystem before the resctrl filesystem is mounted.
+ * It may also be ok since that would enable debugging of RDT before
+ * resctrl is mounted.
+ * The reason why the debugfs directory is created here and not in
+ * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
+ * during the debugfs directory creation also &sb->s_type->i_mutex_key
+ * (the lockdep class of inode->i_rwsem). Other filesystem
+ * interactions (eg. SyS_getdents) have the lock ordering:
+ * &sb->s_type->i_mutex_key --> &mm->mmap_sem
+ * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
+ * is taken, thus creating dependency:
+ * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
+ * issues considering the other two lock dependencies.
+ * By creating the debugfs directory here we avoid a dependency
+ * that may cause deadlock (even though file operations cannot
+ * occur until the filesystem is mounted, but I do not know how to
+ * tell lockdep that).
+ */
+ debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
+
return 0;
cleanup_mountpoint:
@@ -2111,3 +2861,11 @@ cleanup_root:
return ret;
}
+
+void __exit rdtgroup_exit(void)
+{
+ debugfs_remove_recursive(debugfs_resctrl);
+ unregister_filesystem(&rdt_fs_type);
+ sysfs_remove_mount_point(fs_kobj, "resctrl");
+ kernfs_destroy_root(rdt_root);
+}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8c50754c09c1..4b767284b7f5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -123,8 +123,8 @@ void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
- /* We hope get_seconds stays lockless */
- m->time = get_seconds();
+ /* need the internal __ version to avoid deadlocks */
+ m->time = __ktime_get_real_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
m->cpuid = cpuid_eax(1);
m->socketid = cpu_data(m->extcpu).phys_proc_id;
@@ -1104,6 +1104,101 @@ static void mce_unmap_kpfn(unsigned long pfn)
}
#endif
+
+/*
+ * Cases where we avoid rendezvous handler timeout:
+ * 1) If this CPU is offline.
+ *
+ * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
+ * skip those CPUs which remain looping in the 1st kernel - see
+ * crash_nmi_callback().
+ *
+ * Note: there still is a small window between kexec-ing and the new,
+ * kdump kernel establishing a new #MC handler where a broadcasted MCE
+ * might not get handled properly.
+ */
+static bool __mc_check_crashing_cpu(int cpu)
+{
+ if (cpu_is_offline(cpu) ||
+ (crashing_cpu != -1 && crashing_cpu != cpu)) {
+ u64 mcgstatus;
+
+ mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+ if (mcgstatus & MCG_STATUS_RIPV) {
+ mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void __mc_scan_banks(struct mce *m, struct mce *final,
+ unsigned long *toclear, unsigned long *valid_banks,
+ int no_way_out, int *worst)
+{
+ struct mca_config *cfg = &mca_cfg;
+ int severity, i;
+
+ for (i = 0; i < cfg->banks; i++) {
+ __clear_bit(i, toclear);
+ if (!test_bit(i, valid_banks))
+ continue;
+
+ if (!mce_banks[i].ctl)
+ continue;
+
+ m->misc = 0;
+ m->addr = 0;
+ m->bank = i;
+
+ m->status = mce_rdmsrl(msr_ops.status(i));
+ if (!(m->status & MCI_STATUS_VAL))
+ continue;
+
+ /*
+ * Corrected or non-signaled errors are handled by
+ * machine_check_poll(). Leave them alone, unless this panics.
+ */
+ if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
+ !no_way_out)
+ continue;
+
+ /* Set taint even when machine check was not enabled. */
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+ severity = mce_severity(m, cfg->tolerant, NULL, true);
+
+ /*
+ * When machine check was for corrected/deferred handler don't
+ * touch, unless we're panicking.
+ */
+ if ((severity == MCE_KEEP_SEVERITY ||
+ severity == MCE_UCNA_SEVERITY) && !no_way_out)
+ continue;
+
+ __set_bit(i, toclear);
+
+ /* Machine check event was not enabled. Clear, but ignore. */
+ if (severity == MCE_NO_SEVERITY)
+ continue;
+
+ mce_read_aux(m, i);
+
+ /* assuming valid severity level != 0 */
+ m->severity = severity;
+
+ mce_log(m);
+
+ if (severity > *worst) {
+ *final = *m;
+ *worst = severity;
+ }
+ }
+
+ /* mce_clear_state will clear *final, save locally for use later */
+ *m = *final;
+}
+
/*
* The actual machine check handler. This only handles real
* exceptions when something got corrupted coming in through int 18.
@@ -1118,68 +1213,45 @@ static void mce_unmap_kpfn(unsigned long pfn)
*/
void do_machine_check(struct pt_regs *regs, long error_code)
{
+ DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
+ DECLARE_BITMAP(toclear, MAX_NR_BANKS);
struct mca_config *cfg = &mca_cfg;
+ int cpu = smp_processor_id();
+ char *msg = "Unknown";
struct mce m, *final;
- int i;
int worst = 0;
- int severity;
/*
* Establish sequential order between the CPUs entering the machine
* check handler.
*/
int order = -1;
+
/*
* If no_way_out gets set, there is no safe way to recover from this
* MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
*/
int no_way_out = 0;
+
/*
* If kill_it gets set, there might be a way to recover from this
* error.
*/
int kill_it = 0;
- DECLARE_BITMAP(toclear, MAX_NR_BANKS);
- DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
- char *msg = "Unknown";
/*
* MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
* on Intel.
*/
int lmce = 1;
- int cpu = smp_processor_id();
- /*
- * Cases where we avoid rendezvous handler timeout:
- * 1) If this CPU is offline.
- *
- * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
- * skip those CPUs which remain looping in the 1st kernel - see
- * crash_nmi_callback().
- *
- * Note: there still is a small window between kexec-ing and the new,
- * kdump kernel establishing a new #MC handler where a broadcasted MCE
- * might not get handled properly.
- */
- if (cpu_is_offline(cpu) ||
- (crashing_cpu != -1 && crashing_cpu != cpu)) {
- u64 mcgstatus;
-
- mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
- if (mcgstatus & MCG_STATUS_RIPV) {
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
- return;
- }
- }
+ if (__mc_check_crashing_cpu(cpu))
+ return;
ist_enter(regs);
this_cpu_inc(mce_exception_count);
- if (!cfg->banks)
- goto out;
-
mce_gather_info(&m, regs);
m.tsc = rdtsc();
@@ -1220,67 +1292,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
order = mce_start(&no_way_out);
}
- for (i = 0; i < cfg->banks; i++) {
- __clear_bit(i, toclear);
- if (!test_bit(i, valid_banks))
- continue;
- if (!mce_banks[i].ctl)
- continue;
-
- m.misc = 0;
- m.addr = 0;
- m.bank = i;
-
- m.status = mce_rdmsrl(msr_ops.status(i));
- if ((m.status & MCI_STATUS_VAL) == 0)
- continue;
-
- /*
- * Non uncorrected or non signaled errors are handled by
- * machine_check_poll. Leave them alone, unless this panics.
- */
- if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
- !no_way_out)
- continue;
-
- /*
- * Set taint even when machine check was not enabled.
- */
- add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
- severity = mce_severity(&m, cfg->tolerant, NULL, true);
-
- /*
- * When machine check was for corrected/deferred handler don't
- * touch, unless we're panicing.
- */
- if ((severity == MCE_KEEP_SEVERITY ||
- severity == MCE_UCNA_SEVERITY) && !no_way_out)
- continue;
- __set_bit(i, toclear);
- if (severity == MCE_NO_SEVERITY) {
- /*
- * Machine check event was not enabled. Clear, but
- * ignore.
- */
- continue;
- }
-
- mce_read_aux(&m, i);
-
- /* assuming valid severity level != 0 */
- m.severity = severity;
-
- mce_log(&m);
-
- if (severity > worst) {
- *final = m;
- worst = severity;
- }
- }
-
- /* mce_clear_state will clear *final, save locally for use later */
- m = *final;
+ __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
if (!no_way_out)
mce_clear_state(toclear);
@@ -1319,7 +1331,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (worst > 0)
mce_report_event(regs);
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-out:
+
sync_core();
if (worst != MCE_AR_SEVERITY && !kill_it)
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 666a284116ac..9c8652974f8e 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -22,8 +22,6 @@
#include <asm/stacktrace.h>
#include <asm/unwind.h>
-#define OPCODE_BUFSIZE 64
-
int panic_on_unrecovered_nmi;
int panic_on_io_nmi;
static int die_counter;
@@ -93,26 +91,18 @@ static void printk_stack_address(unsigned long address, int reliable,
*/
void show_opcodes(u8 *rip, const char *loglvl)
{
- unsigned int code_prologue = OPCODE_BUFSIZE * 2 / 3;
+#define PROLOGUE_SIZE 42
+#define EPILOGUE_SIZE 21
+#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
u8 opcodes[OPCODE_BUFSIZE];
- u8 *ip;
- int i;
-
- printk("%sCode: ", loglvl);
-
- ip = (u8 *)rip - code_prologue;
- if (probe_kernel_read(opcodes, ip, OPCODE_BUFSIZE)) {
- pr_cont("Bad RIP value.\n");
- return;
- }
- for (i = 0; i < OPCODE_BUFSIZE; i++, ip++) {
- if (ip == rip)
- pr_cont("<%02x> ", opcodes[i]);
- else
- pr_cont("%02x ", opcodes[i]);
+ if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
+ printk("%sCode: Bad RIP value.\n", loglvl);
+ } else {
+ printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
+ __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
+ opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
}
- pr_cont("\n");
}
void show_ip(struct pt_regs *regs, const char *loglvl)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index abe6df15a8fb..30f9cb2c0b55 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -512,11 +512,18 @@ ENTRY(initial_code)
ENTRY(setup_once_ref)
.long setup_once
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define PGD_ALIGN (2 * PAGE_SIZE)
+#define PTI_USER_PGD_FILL 1024
+#else
+#define PGD_ALIGN (PAGE_SIZE)
+#define PTI_USER_PGD_FILL 0
+#endif
/*
* BSS section
*/
__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
+ .align PGD_ALIGN
#ifdef CONFIG_X86_PAE
.globl initial_pg_pmd
initial_pg_pmd:
@@ -526,14 +533,17 @@ initial_pg_pmd:
initial_page_table:
.fill 1024,4,0
#endif
+ .align PGD_ALIGN
initial_pg_fixmap:
.fill 1024,4,0
-.globl empty_zero_page
-empty_zero_page:
- .fill 4096,1,0
.globl swapper_pg_dir
+ .align PGD_ALIGN
swapper_pg_dir:
.fill 1024,4,0
+ .fill PTI_USER_PGD_FILL,4,0
+.globl empty_zero_page
+empty_zero_page:
+ .fill 4096,1,0
EXPORT_SYMBOL(empty_zero_page)
/*
@@ -542,7 +552,7 @@ EXPORT_SYMBOL(empty_zero_page)
#ifdef CONFIG_X86_PAE
__PAGE_ALIGNED_DATA
/* Page-aligned for the benefit of paravirt? */
- .align PAGE_SIZE
+ .align PGD_ALIGN
ENTRY(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 8344dd2f310a..15ebc2fc166e 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -235,7 +235,7 @@ ENTRY(secondary_startup_64)
* address given in m16:64.
*/
pushq $.Lafter_lret # put return address on stack for unwinder
- xorq %rbp, %rbp # clear frame pointer
+ xorl %ebp, %ebp # clear frame pointer
movq initial_code(%rip), %rax
pushq $__KERNEL_CS # set correct cs
pushq %rax # target address in negative space
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8771766d46b6..34a5c1715148 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -169,28 +169,29 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
set_dr_addr_mask(0, i);
}
-/*
- * Check for virtual address in kernel space.
- */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+static int arch_bp_generic_len(int x86_len)
{
- unsigned int len;
- unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- va = info->address;
- len = bp->attr.bp_len;
-
- /*
- * We don't need to worry about va + len - 1 overflowing:
- * we already require that va is aligned to a multiple of len.
- */
- return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+ switch (x86_len) {
+ case X86_BREAKPOINT_LEN_1:
+ return HW_BREAKPOINT_LEN_1;
+ case X86_BREAKPOINT_LEN_2:
+ return HW_BREAKPOINT_LEN_2;
+ case X86_BREAKPOINT_LEN_4:
+ return HW_BREAKPOINT_LEN_4;
+#ifdef CONFIG_X86_64
+ case X86_BREAKPOINT_LEN_8:
+ return HW_BREAKPOINT_LEN_8;
+#endif
+ default:
+ return -EINVAL;
+ }
}
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
+ int len;
+
/* Type */
switch (x86_type) {
case X86_BREAKPOINT_EXECUTE:
@@ -211,42 +212,47 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
}
/* Len */
- switch (x86_len) {
- case X86_BREAKPOINT_LEN_1:
- *gen_len = HW_BREAKPOINT_LEN_1;
- break;
- case X86_BREAKPOINT_LEN_2:
- *gen_len = HW_BREAKPOINT_LEN_2;
- break;
- case X86_BREAKPOINT_LEN_4:
- *gen_len = HW_BREAKPOINT_LEN_4;
- break;
-#ifdef CONFIG_X86_64
- case X86_BREAKPOINT_LEN_8:
- *gen_len = HW_BREAKPOINT_LEN_8;
- break;
-#endif
- default:
+ len = arch_bp_generic_len(x86_len);
+ if (len < 0)
return -EINVAL;
- }
+ *gen_len = len;
return 0;
}
-
-static int arch_build_bp_info(struct perf_event *bp)
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned long va;
+ int len;
- info->address = bp->attr.bp_addr;
+ va = hw->address;
+ len = arch_bp_generic_len(hw->len);
+ WARN_ON_ONCE(len < 0);
+
+ /*
+ * We don't need to worry about va + len - 1 overflowing:
+ * we already require that va is aligned to a multiple of len.
+ */
+ return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+}
+
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ hw->address = attr->bp_addr;
+ hw->mask = 0;
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_W:
- info->type = X86_BREAKPOINT_WRITE;
+ hw->type = X86_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
- info->type = X86_BREAKPOINT_RW;
+ hw->type = X86_BREAKPOINT_RW;
break;
case HW_BREAKPOINT_X:
/*
@@ -254,23 +260,23 @@ static int arch_build_bp_info(struct perf_event *bp)
* acceptable for kprobes. On non-kprobes kernels, we don't
* allow kernel breakpoints at all.
*/
- if (bp->attr.bp_addr >= TASK_SIZE_MAX) {
+ if (attr->bp_addr >= TASK_SIZE_MAX) {
#ifdef CONFIG_KPROBES
- if (within_kprobe_blacklist(bp->attr.bp_addr))
+ if (within_kprobe_blacklist(attr->bp_addr))
return -EINVAL;
#else
return -EINVAL;
#endif
}
- info->type = X86_BREAKPOINT_EXECUTE;
+ hw->type = X86_BREAKPOINT_EXECUTE;
/*
* x86 inst breakpoints need to have a specific undefined len.
* But we still need to check userspace is not trying to setup
* an unsupported length, to get a range breakpoint for example.
*/
- if (bp->attr.bp_len == sizeof(long)) {
- info->len = X86_BREAKPOINT_LEN_X;
+ if (attr->bp_len == sizeof(long)) {
+ hw->len = X86_BREAKPOINT_LEN_X;
return 0;
}
default:
@@ -278,28 +284,26 @@ static int arch_build_bp_info(struct perf_event *bp)
}
/* Len */
- info->mask = 0;
-
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->len = X86_BREAKPOINT_LEN_1;
+ hw->len = X86_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->len = X86_BREAKPOINT_LEN_2;
+ hw->len = X86_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->len = X86_BREAKPOINT_LEN_4;
+ hw->len = X86_BREAKPOINT_LEN_4;
break;
#ifdef CONFIG_X86_64
case HW_BREAKPOINT_LEN_8:
- info->len = X86_BREAKPOINT_LEN_8;
+ hw->len = X86_BREAKPOINT_LEN_8;
break;
#endif
default:
/* AMD range breakpoint */
- if (!is_power_of_2(bp->attr.bp_len))
+ if (!is_power_of_2(attr->bp_len))
return -EINVAL;
- if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+ if (attr->bp_addr & (attr->bp_len - 1))
return -EINVAL;
if (!boot_cpu_has(X86_FEATURE_BPEXT))
@@ -312,8 +316,8 @@ static int arch_build_bp_info(struct perf_event *bp)
* breakpoints, then we'll have to check for kprobe-blacklisted
* addresses anywhere in the range.
*/
- info->mask = bp->attr.bp_len - 1;
- info->len = X86_BREAKPOINT_LEN_1;
+ hw->mask = attr->bp_len - 1;
+ hw->len = X86_BREAKPOINT_LEN_1;
}
return 0;
@@ -322,22 +326,23 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
- switch (info->len) {
+ switch (hw->len) {
case X86_BREAKPOINT_LEN_1:
align = 0;
- if (info->mask)
- align = info->mask;
+ if (hw->mask)
+ align = hw->mask;
break;
case X86_BREAKPOINT_LEN_2:
align = 1;
@@ -358,7 +363,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
- if (info->address & align)
+ if (hw->address & align)
return -EINVAL;
return 0;
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index e56c95be2808..eeea935e9bb5 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -37,15 +37,18 @@ static void bug_at(unsigned char *ip, int line)
BUG();
}
-static void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- void *(*poker)(void *, const void *, size_t),
- int init)
+static void __ref __jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ void *(*poker)(void *, const void *, size_t),
+ int init)
{
union jump_code_union code;
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
+ if (early_boot_irqs_disabled)
+ poker = text_poke_early;
+
if (type == JUMP_LABEL_JMP) {
if (init) {
/*
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index ae38dccf0c8f..2b949f4fd4d8 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -105,14 +105,4 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
}
#endif
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- return 0;
-}
-#endif
#endif
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6f4d42377fe5..b0d1e81c96bb 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -66,8 +66,6 @@
#include "common.h"
-void jprobe_return_end(void);
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -395,8 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
- (u8 *) real;
if ((s64) (s32) newdisp != newdisp) {
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
- pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
- src, real, insn->displacement.value);
return 0;
}
disp = (u8 *) dest + insn_offset_displacement(insn);
@@ -596,7 +592,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
* stepping.
*/
regs->ip = (unsigned long)p->ainsn.insn;
- preempt_enable_no_resched();
return;
}
#endif
@@ -640,8 +635,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
* Raise a BUG or we'll continue in an endless reentering loop
* and eventually a stack overflow.
*/
- printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
- p->addr);
+ pr_err("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
default:
@@ -669,12 +663,10 @@ int kprobe_int3_handler(struct pt_regs *regs)
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/*
- * We don't want to be preempted for the entire
- * duration of kprobe processing. We conditionally
- * re-enable preemption at the end of this function,
- * and also in reenter_kprobe() and setup_singlestep().
+ * We don't want to be preempted for the entire duration of kprobe
+ * processing. Since int3 and debug trap disables irqs and we clear
+ * IF while singlestepping, it must be no preemptible.
*/
- preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe(addr);
@@ -690,13 +682,14 @@ int kprobe_int3_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry
- * for jprobe processing, so get out doing nothing
- * more here.
+ * pre-handler and it returned non-zero, that means
+ * user handler setup registers to exit to another
+ * instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs))
setup_singlestep(p, regs, kcb, 0);
+ else
+ reset_current_kprobe();
return 1;
}
} else if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -710,18 +703,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
* the original instruction.
*/
regs->ip = (unsigned long)addr;
- preempt_enable_no_resched();
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- if (!skip_singlestep(p, regs, kcb))
- setup_singlestep(p, regs, kcb, 0);
- return 1;
- }
} /* else: not a kprobe fault; let the kernel handle it */
- preempt_enable_no_resched();
return 0;
}
NOKPROBE_SYMBOL(kprobe_int3_handler);
@@ -972,8 +956,6 @@ int kprobe_debug_handler(struct pt_regs *regs)
}
reset_current_kprobe();
out:
- preempt_enable_no_resched();
-
/*
* if somebody else is singlestepping across a probe point, flags
* will have TF set, in which case, continue the remaining processing
@@ -1020,7 +1002,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
- preempt_enable_no_resched();
} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
kcb->kprobe_status == KPROBE_HIT_SSDONE) {
/*
@@ -1083,93 +1064,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = stack_addr(regs);
- addr = (unsigned long)(kcb->jprobe_saved_sp);
-
- /*
- * As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
- * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
- * raw stack chunk with redzones:
- */
- __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
- regs->ip = (unsigned long)(jp->entry);
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer to get messed up.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- /* Unpoison stack redzones in the frames we are going to jump over. */
- kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
-
- asm volatile (
-#ifdef CONFIG_X86_64
- " xchg %%rbx,%%rsp \n"
-#else
- " xchgl %%ebx,%%esp \n"
-#endif
- " int3 \n"
- " .globl jprobe_return_end\n"
- " jprobe_return_end: \n"
- " nop \n"::"b"
- (kcb->jprobe_saved_sp):"memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-NOKPROBE_SYMBOL(jprobe_return_end);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- u8 *addr = (u8 *) (regs->ip - 1);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- void *saved_sp = kcb->jprobe_saved_sp;
-
- if ((addr > (u8 *) jprobe_return) &&
- (addr < (u8 *) jprobe_return_end)) {
- if (stack_addr(regs) != saved_sp) {
- struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
- printk(KERN_ERR
- "current sp %p does not match saved sp %p\n",
- stack_addr(regs), saved_sp);
- printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
- show_regs(saved_regs);
- printk(KERN_ERR "Current registers\n");
- show_regs(regs);
- BUG();
- }
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
- *regs = kcb->jprobe_saved_regs;
- __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
bool arch_within_kprobe_blacklist(unsigned long addr)
{
bool is_in_entry_trampoline_section = false;
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 8dc0161cec8f..ef819e19650b 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -25,36 +25,6 @@
#include "common.h"
-static nokprobe_inline
-void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb, unsigned long orig_ip)
-{
- /*
- * Emulate singlestep (and also recover regs->ip)
- * as if there is a 5byte nop
- */
- regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- __this_cpu_write(current_kprobe, NULL);
- if (orig_ip)
- regs->ip = orig_ip;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- if (kprobe_ftrace(p)) {
- __skip_singlestep(p, regs, kcb, 0);
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
@@ -75,18 +45,25 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
- /* To emulate trap based kprobes, preempt_disable here */
- preempt_disable();
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
- __skip_singlestep(p, regs, kcb, orig_ip);
- preempt_enable_no_resched();
+ /*
+ * Emulate singlestep (and also recover regs->ip)
+ * as if there is a 5byte nop
+ */
+ regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ regs->ip = orig_ip;
}
/*
- * If pre_handler returns !0, it sets regs->ip and
- * resets current kprobe, and keep preempt count +1.
+ * If pre_handler returns !0, it changes regs->ip. We have to
+ * skip emulating post_handler.
*/
+ __this_cpu_write(current_kprobe, NULL);
}
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 203d398802a3..eaf02f2e7300 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -491,7 +491,6 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
if (!reenter)
reset_current_kprobe();
- preempt_enable_no_resched();
return 1;
}
return 0;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5b2300b818af..09aaabb2bbf1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -45,7 +45,6 @@
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
-#include <asm/kvm_guest.h>
static int kvmapf = 1;
@@ -66,15 +65,6 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
-static int kvmclock_vsyscall = 1;
-static int __init parse_no_kvmclock_vsyscall(char *arg)
-{
- kvmclock_vsyscall = 0;
- return 0;
-}
-
-early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
-
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
@@ -154,7 +144,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
for (;;) {
if (!n.halted)
- prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
if (hlist_unhashed(&n.link))
break;
@@ -188,7 +178,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n)
if (n->halted)
smp_send_reschedule(n->cpu);
else if (swq_has_sleeper(&n->wq))
- swake_up(&n->wq);
+ swake_up_one(&n->wq);
}
static void apf_task_wake_all(void)
@@ -560,9 +550,6 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);
- if (kvmclock_vsyscall)
- kvm_setup_vsyscall_timeinfo();
-
#ifdef CONFIG_SMP
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
@@ -628,6 +615,7 @@ const __initconst struct hypervisor_x86 x86_hyper_kvm = {
.name = "KVM",
.detect = kvm_detect,
.type = X86_HYPER_KVM,
+ .init.init_platform = kvmclock_init,
.init.guest_late_init = kvm_guest_init,
.init.x2apic_available = kvm_para_available,
};
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 3b8e7c13c614..d2edd7e6c294 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -23,30 +23,56 @@
#include <asm/apic.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
-#include <linux/memblock.h>
+#include <linux/cpuhotplug.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/mm.h>
+#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/kvmclock.h>
-static int kvmclock __ro_after_init = 1;
-static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
-static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
-static u64 kvm_sched_clock_offset;
+static int kvmclock __initdata = 1;
+static int kvmclock_vsyscall __initdata = 1;
+static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
+static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
+static u64 kvm_sched_clock_offset __ro_after_init;
-static int parse_no_kvmclock(char *arg)
+static int __init parse_no_kvmclock(char *arg)
{
kvmclock = 0;
return 0;
}
early_param("no-kvmclock", parse_no_kvmclock);
-/* The hypervisor will put information about time periodically here */
-static struct pvclock_vsyscall_time_info *hv_clock;
-static struct pvclock_wall_clock *wall_clock;
+static int __init parse_no_kvmclock_vsyscall(char *arg)
+{
+ kvmclock_vsyscall = 0;
+ return 0;
+}
+early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
+/* Aligned to page sizes to match whats mapped via vsyscalls to userspace */
+#define HV_CLOCK_SIZE (sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS)
+#define HVC_BOOT_ARRAY_SIZE \
+ (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
+
+static struct pvclock_vsyscall_time_info
+ hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
+static struct pvclock_wall_clock wall_clock;
+static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+
+static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
+{
+ return &this_cpu_read(hv_clock_per_cpu)->pvti;
+}
+
+static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
+{
+ return this_cpu_read(hv_clock_per_cpu);
+}
/*
* The wallclock is the time of day when we booted. Since then, some time may
@@ -55,21 +81,10 @@ static struct pvclock_wall_clock *wall_clock;
*/
static void kvm_get_wallclock(struct timespec64 *now)
{
- struct pvclock_vcpu_time_info *vcpu_time;
- int low, high;
- int cpu;
-
- low = (int)slow_virt_to_phys(wall_clock);
- high = ((u64)slow_virt_to_phys(wall_clock) >> 32);
-
- native_write_msr(msr_kvm_wall_clock, low, high);
-
- cpu = get_cpu();
-
- vcpu_time = &hv_clock[cpu].pvti;
- pvclock_read_wallclock(wall_clock, vcpu_time, now);
-
- put_cpu();
+ wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
+ preempt_disable();
+ pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now);
+ preempt_enable();
}
static int kvm_set_wallclock(const struct timespec64 *now)
@@ -79,14 +94,10 @@ static int kvm_set_wallclock(const struct timespec64 *now)
static u64 kvm_clock_read(void)
{
- struct pvclock_vcpu_time_info *src;
u64 ret;
- int cpu;
preempt_disable_notrace();
- cpu = smp_processor_id();
- src = &hv_clock[cpu].pvti;
- ret = pvclock_clocksource_read(src);
+ ret = pvclock_clocksource_read(this_cpu_pvti());
preempt_enable_notrace();
return ret;
}
@@ -112,11 +123,11 @@ static inline void kvm_sched_clock_init(bool stable)
kvm_sched_clock_offset = kvm_clock_read();
pv_time_ops.sched_clock = kvm_sched_clock_read;
- printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
- kvm_sched_clock_offset);
+ pr_info("kvm-clock: using sched offset of %llu cycles",
+ kvm_sched_clock_offset);
BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) >
- sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
+ sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
}
/*
@@ -130,19 +141,11 @@ static inline void kvm_sched_clock_init(bool stable)
*/
static unsigned long kvm_get_tsc_khz(void)
{
- struct pvclock_vcpu_time_info *src;
- int cpu;
- unsigned long tsc_khz;
-
- cpu = get_cpu();
- src = &hv_clock[cpu].pvti;
- tsc_khz = pvclock_tsc_khz(src);
- put_cpu();
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- return tsc_khz;
+ return pvclock_tsc_khz(this_cpu_pvti());
}
-static void kvm_get_preset_lpj(void)
+static void __init kvm_get_preset_lpj(void)
{
unsigned long khz;
u64 lpj;
@@ -156,49 +159,40 @@ static void kvm_get_preset_lpj(void)
bool kvm_check_and_clear_guest_paused(void)
{
+ struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
bool ret = false;
- struct pvclock_vcpu_time_info *src;
- int cpu = smp_processor_id();
- if (!hv_clock)
+ if (!src)
return ret;
- src = &hv_clock[cpu].pvti;
- if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
- src->flags &= ~PVCLOCK_GUEST_STOPPED;
+ if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) {
+ src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED;
pvclock_touch_watchdogs();
ret = true;
}
-
return ret;
}
struct clocksource kvm_clock = {
- .name = "kvm-clock",
- .read = kvm_clock_get_cycles,
- .rating = 400,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .name = "kvm-clock",
+ .read = kvm_clock_get_cycles,
+ .rating = 400,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
EXPORT_SYMBOL_GPL(kvm_clock);
-int kvm_register_clock(char *txt)
+static void kvm_register_clock(char *txt)
{
- int cpu = smp_processor_id();
- int low, high, ret;
- struct pvclock_vcpu_time_info *src;
-
- if (!hv_clock)
- return 0;
+ struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
+ u64 pa;
- src = &hv_clock[cpu].pvti;
- low = (int)slow_virt_to_phys(src) | 1;
- high = ((u64)slow_virt_to_phys(src) >> 32);
- ret = native_write_msr_safe(msr_kvm_system_time, low, high);
- printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
- cpu, high, low, txt);
+ if (!src)
+ return;
- return ret;
+ pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
+ wrmsrl(msr_kvm_system_time, pa);
+ pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
}
static void kvm_save_sched_clock_state(void)
@@ -213,11 +207,7 @@ static void kvm_restore_sched_clock_state(void)
#ifdef CONFIG_X86_LOCAL_APIC
static void kvm_setup_secondary_clock(void)
{
- /*
- * Now that the first cpu already had this clocksource initialized,
- * we shouldn't fail.
- */
- WARN_ON(kvm_register_clock("secondary cpu clock"));
+ kvm_register_clock("secondary cpu clock");
}
#endif
@@ -245,100 +235,84 @@ static void kvm_shutdown(void)
native_machine_shutdown();
}
-static phys_addr_t __init kvm_memblock_alloc(phys_addr_t size,
- phys_addr_t align)
+static int __init kvm_setup_vsyscall_timeinfo(void)
{
- phys_addr_t mem;
+#ifdef CONFIG_X86_64
+ u8 flags;
- mem = memblock_alloc(size, align);
- if (!mem)
+ if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
return 0;
- if (sev_active()) {
- if (early_set_memory_decrypted((unsigned long)__va(mem), size))
- goto e_free;
- }
+ flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
+ if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+ return 0;
- return mem;
-e_free:
- memblock_free(mem, size);
+ kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+#endif
return 0;
}
+early_initcall(kvm_setup_vsyscall_timeinfo);
-static void __init kvm_memblock_free(phys_addr_t addr, phys_addr_t size)
+static int kvmclock_setup_percpu(unsigned int cpu)
{
- if (sev_active())
- early_set_memory_encrypted((unsigned long)__va(addr), size);
+ struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
- memblock_free(addr, size);
+ /*
+ * The per cpu area setup replicates CPU0 data to all cpu
+ * pointers. So carefully check. CPU0 has been set up in init
+ * already.
+ */
+ if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
+ return 0;
+
+ /* Use the static page for the first CPUs, allocate otherwise */
+ if (cpu < HVC_BOOT_ARRAY_SIZE)
+ p = &hv_clock_boot[cpu];
+ else
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+ per_cpu(hv_clock_per_cpu, cpu) = p;
+ return p ? 0 : -ENOMEM;
}
void __init kvmclock_init(void)
{
- struct pvclock_vcpu_time_info *vcpu_time;
- unsigned long mem, mem_wall_clock;
- int size, cpu, wall_clock_size;
u8 flags;
- size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
-
- if (!kvm_para_available())
+ if (!kvm_para_available() || !kvmclock)
return;
- if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
- } else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
- return;
-
- wall_clock_size = PAGE_ALIGN(sizeof(struct pvclock_wall_clock));
- mem_wall_clock = kvm_memblock_alloc(wall_clock_size, PAGE_SIZE);
- if (!mem_wall_clock)
- return;
-
- wall_clock = __va(mem_wall_clock);
- memset(wall_clock, 0, wall_clock_size);
-
- mem = kvm_memblock_alloc(size, PAGE_SIZE);
- if (!mem) {
- kvm_memblock_free(mem_wall_clock, wall_clock_size);
- wall_clock = NULL;
+ } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
return;
}
- hv_clock = __va(mem);
- memset(hv_clock, 0, size);
-
- if (kvm_register_clock("primary cpu clock")) {
- hv_clock = NULL;
- kvm_memblock_free(mem, size);
- kvm_memblock_free(mem_wall_clock, wall_clock_size);
- wall_clock = NULL;
+ if (cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "kvmclock:setup_percpu",
+ kvmclock_setup_percpu, NULL) < 0) {
return;
}
- printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+ pr_info("kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock);
- pvclock_set_pvti_cpu0_va(hv_clock);
+ this_cpu_write(hv_clock_per_cpu, &hv_clock_boot[0]);
+ kvm_register_clock("primary cpu clock");
+ pvclock_set_pvti_cpu0_va(hv_clock_boot);
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
- cpu = get_cpu();
- vcpu_time = &hv_clock[cpu].pvti;
- flags = pvclock_read_flags(vcpu_time);
-
+ flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT);
- put_cpu();
x86_platform.calibrate_tsc = kvm_get_tsc_khz;
x86_platform.calibrate_cpu = kvm_get_tsc_khz;
x86_platform.get_wallclock = kvm_get_wallclock;
x86_platform.set_wallclock = kvm_set_wallclock;
#ifdef CONFIG_X86_LOCAL_APIC
- x86_cpuinit.early_percpu_clock_init =
- kvm_setup_secondary_clock;
+ x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock;
#endif
x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
@@ -350,31 +324,3 @@ void __init kvmclock_init(void)
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
pv_info.name = "KVM";
}
-
-int __init kvm_setup_vsyscall_timeinfo(void)
-{
-#ifdef CONFIG_X86_64
- int cpu;
- u8 flags;
- struct pvclock_vcpu_time_info *vcpu_time;
- unsigned int size;
-
- if (!hv_clock)
- return 0;
-
- size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
-
- cpu = get_cpu();
-
- vcpu_time = &hv_clock[cpu].pvti;
- flags = pvclock_read_flags(vcpu_time);
-
- put_cpu();
-
- if (!(flags & PVCLOCK_TSC_STABLE_BIT))
- return 1;
-
- kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
-#endif
- return 0;
-}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c9b14020f4dd..733e6ace0fa4 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -100,6 +100,102 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
return new_ldt;
}
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+static void do_sanity_check(struct mm_struct *mm,
+ bool had_kernel_mapping,
+ bool had_user_mapping)
+{
+ if (mm->context.ldt) {
+ /*
+ * We already had an LDT. The top-level entry should already
+ * have been allocated and synchronized with the usermode
+ * tables.
+ */
+ WARN_ON(!had_kernel_mapping);
+ if (static_cpu_has(X86_FEATURE_PTI))
+ WARN_ON(!had_user_mapping);
+ } else {
+ /*
+ * This is the first time we're mapping an LDT for this process.
+ * Sync the pgd to the usermode tables.
+ */
+ WARN_ON(had_kernel_mapping);
+ if (static_cpu_has(X86_FEATURE_PTI))
+ WARN_ON(had_user_mapping);
+ }
+}
+
+#ifdef CONFIG_X86_PAE
+
+static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+
+ if (pgd->pgd == 0)
+ return NULL;
+
+ p4d = p4d_offset(pgd, va);
+ if (p4d_none(*p4d))
+ return NULL;
+
+ pud = pud_offset(p4d, va);
+ if (pud_none(*pud))
+ return NULL;
+
+ return pmd_offset(pud, va);
+}
+
+static void map_ldt_struct_to_user(struct mm_struct *mm)
+{
+ pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ pmd_t *k_pmd, *u_pmd;
+
+ k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
+ u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
+
+ if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ set_pmd(u_pmd, *k_pmd);
+}
+
+static void sanity_check_ldt_mapping(struct mm_struct *mm)
+{
+ pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ bool had_kernel, had_user;
+ pmd_t *k_pmd, *u_pmd;
+
+ k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
+ u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
+ had_kernel = (k_pmd->pmd != 0);
+ had_user = (u_pmd->pmd != 0);
+
+ do_sanity_check(mm, had_kernel, had_user);
+}
+
+#else /* !CONFIG_X86_PAE */
+
+static void map_ldt_struct_to_user(struct mm_struct *mm)
+{
+ pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
+
+ if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ set_pgd(kernel_to_user_pgdp(pgd), *pgd);
+}
+
+static void sanity_check_ldt_mapping(struct mm_struct *mm)
+{
+ pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ bool had_kernel = (pgd->pgd != 0);
+ bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
+
+ do_sanity_check(mm, had_kernel, had_user);
+}
+
+#endif /* CONFIG_X86_PAE */
+
/*
* If PTI is enabled, this maps the LDT into the kernelmode and
* usermode tables for the given mm.
@@ -115,9 +211,8 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
static int
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
{
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- bool is_vmalloc, had_top_level_entry;
unsigned long va;
+ bool is_vmalloc;
spinlock_t *ptl;
pgd_t *pgd;
int i;
@@ -131,13 +226,15 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
*/
WARN_ON(ldt->slot != -1);
+ /* Check if the current mappings are sane */
+ sanity_check_ldt_mapping(mm);
+
/*
* Did we already have the top level entry allocated? We can't
* use pgd_none() for this because it doens't do anything on
* 4-level page table kernels.
*/
pgd = pgd_offset(mm, LDT_BASE_ADDR);
- had_top_level_entry = (pgd->pgd != 0);
is_vmalloc = is_vmalloc_addr(ldt->entries);
@@ -172,41 +269,31 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
pte_unmap_unlock(ptep, ptl);
}
- if (mm->context.ldt) {
- /*
- * We already had an LDT. The top-level entry should already
- * have been allocated and synchronized with the usermode
- * tables.
- */
- WARN_ON(!had_top_level_entry);
- if (static_cpu_has(X86_FEATURE_PTI))
- WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
- } else {
- /*
- * This is the first time we're mapping an LDT for this process.
- * Sync the pgd to the usermode tables.
- */
- WARN_ON(had_top_level_entry);
- if (static_cpu_has(X86_FEATURE_PTI)) {
- WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
- set_pgd(kernel_to_user_pgdp(pgd), *pgd);
- }
- }
+ /* Propagate LDT mapping to the user page-table */
+ map_ldt_struct_to_user(mm);
va = (unsigned long)ldt_slot_va(slot);
flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
ldt->slot = slot;
-#endif
return 0;
}
+#else /* !CONFIG_PAGE_TABLE_ISOLATION */
+
+static int
+map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+{
+ return 0;
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
static void free_ldt_pgtables(struct mm_struct *mm)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
struct mmu_gather tlb;
unsigned long start = LDT_BASE_ADDR;
- unsigned long end = start + (1UL << PGDIR_SHIFT);
+ unsigned long end = LDT_END_ADDR;
if (!static_cpu_has(X86_FEATURE_PTI))
return;
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index d1ab07ec8c9a..5409c2800ab5 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -56,7 +56,7 @@ static void load_segments(void)
static void machine_kexec_free_page_tables(struct kimage *image)
{
- free_page((unsigned long)image->arch.pgd);
+ free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
image->arch.pgd = NULL;
#ifdef CONFIG_X86_PAE
free_page((unsigned long)image->arch.pmd0);
@@ -72,7 +72,8 @@ static void machine_kexec_free_page_tables(struct kimage *image)
static int machine_kexec_alloc_page_tables(struct kimage *image)
{
- image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PGD_ALLOCATION_ORDER);
#ifdef CONFIG_X86_PAE
image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 99dc79e76bdc..930c88341e4e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -88,10 +88,12 @@ unsigned paravirt_patch_call(void *insnbuf,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
- if (tgt_clobbers & ~site_clobbers)
- return len; /* target would clobber too much for this site */
- if (len < 5)
+ if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+ WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+#endif
return len; /* call too long for patch site */
+ }
b->opcode = 0xe8; /* call */
b->delta = delta;
@@ -106,8 +108,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
- if (len < 5)
+ if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+ WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+#endif
return len; /* call too long for patch site */
+ }
b->opcode = 0xe9; /* jmp */
b->delta = delta;
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 9edadabf04f6..9cb98f7b07c9 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -20,7 +20,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
-DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
index 4dfd90a75e63..2e9006c1e240 100644
--- a/arch/x86/kernel/pci-iommu_table.c
+++ b/arch/x86/kernel/pci-iommu_table.c
@@ -60,7 +60,7 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
p->detect, q->detect);
/* Heavy handed way..*/
- x->depend = 0;
+ x->depend = NULL;
}
}
diff --git a/arch/x86/kernel/pcspeaker.c b/arch/x86/kernel/pcspeaker.c
index da5190a1ea16..4a710ffffd9a 100644
--- a/arch/x86/kernel/pcspeaker.c
+++ b/arch/x86/kernel/pcspeaker.c
@@ -9,6 +9,6 @@ static __init int add_pcspkr(void)
pd = platform_device_register_simple("pcspkr", -1, NULL, 0);
- return IS_ERR(pd) ? PTR_ERR(pd) : 0;
+ return PTR_ERR_OR_ZERO(pd);
}
device_initcall(add_pcspkr);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 30ca2d1a9231..c93fcfdf1673 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,14 +57,12 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
*/
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
-#ifdef CONFIG_X86_64
/*
* .sp1 is cpu_current_top_of_stack. The init task never
* runs user code, but cpu_current_top_of_stack should still
* be well defined before the first context switch.
*/
.sp1 = TOP_OF_INIT_STACK,
-#endif
#ifdef CONFIG_X86_32
.ss0 = __KERNEL_DS,
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 0ae659de21eb..2924fd447e61 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -285,7 +285,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* current_thread_info(). Refresh the SYSENTER configuration in
* case prev or next is vm86.
*/
- update_sp0(next_p);
+ update_task_stack(next_p);
refresh_sysenter_cs(next);
this_cpu_write(cpu_current_top_of_stack,
(unsigned long)task_stack_page(next_p) +
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 12bb445fb98d..476e3ddf8890 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -478,7 +478,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
/* Reload sp0. */
- update_sp0(next_p);
+ update_task_stack(next_p);
/*
* Now maybe reload the debug registers and handle I/O bitmaps
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2f86d883dd95..5d32c55aeb8b 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -866,6 +866,8 @@ void __init setup_arch(char **cmdline_p)
idt_setup_early_traps();
early_cpu_init();
+ arch_init_ideal_nops();
+ jump_label_init();
early_ioremap_init();
setup_olpc_ofw_pgd();
@@ -1012,6 +1014,7 @@ void __init setup_arch(char **cmdline_p)
*/
init_hypervisor_platform();
+ tsc_early_init();
x86_init.resources.probe_roms();
/* after parse_early_param, so could debug it */
@@ -1197,11 +1200,6 @@ void __init setup_arch(char **cmdline_p)
memblock_find_dma_reserve();
-#ifdef CONFIG_KVM_GUEST
- kvmclock_init();
-#endif
-
- tsc_early_delay_calibrate();
if (!early_xdbc_setup_hardware())
early_xdbc_register_console();
@@ -1272,8 +1270,6 @@ void __init setup_arch(char **cmdline_p)
mcheck_init();
- arch_init_ideal_nops();
-
register_refined_jiffies(CLOCK_TICK_RATE);
#ifdef CONFIG_EFI
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 093f2ea5dd56..7627455047c2 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -81,16 +81,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
-#define STACKTRACE_DUMP_ONCE(task) ({ \
- static bool __section(.data.unlikely) __dumped; \
- \
- if (!__dumped) { \
- __dumped = true; \
- WARN_ON(1); \
- show_stack(task, NULL); \
- } \
-})
-
static int __always_inline
__save_stack_trace_reliable(struct stack_trace *trace,
struct task_struct *task)
@@ -99,30 +89,25 @@ __save_stack_trace_reliable(struct stack_trace *trace,
struct pt_regs *regs;
unsigned long addr;
- for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
+ for (unwind_start(&state, task, NULL, NULL);
+ !unwind_done(&state) && !unwind_error(&state);
unwind_next_frame(&state)) {
regs = unwind_get_entry_regs(&state, NULL);
if (regs) {
+ /* Success path for user tasks */
+ if (user_mode(regs))
+ goto success;
+
/*
* Kernel mode registers on the stack indicate an
* in-kernel interrupt or exception (e.g., preemption
* or a page fault), which can make frame pointers
* unreliable.
*/
- if (!user_mode(regs))
- return -EINVAL;
- /*
- * The last frame contains the user mode syscall
- * pt_regs. Skip it and finish the unwind.
- */
- unwind_next_frame(&state);
- if (!unwind_done(&state)) {
- STACKTRACE_DUMP_ONCE(task);
+ if (IS_ENABLED(CONFIG_FRAME_POINTER))
return -EINVAL;
- }
- break;
}
addr = unwind_get_return_address(&state);
@@ -132,21 +117,22 @@ __save_stack_trace_reliable(struct stack_trace *trace,
* generated code which __kernel_text_address() doesn't know
* about.
*/
- if (!addr) {
- STACKTRACE_DUMP_ONCE(task);
+ if (!addr)
return -EINVAL;
- }
if (save_stack_address(trace, addr, false))
return -EINVAL;
}
/* Check for stack corruption */
- if (unwind_error(&state)) {
- STACKTRACE_DUMP_ONCE(task);
+ if (unwind_error(&state))
+ return -EINVAL;
+
+ /* Success path for non-user tasks, i.e. kthreads and idle tasks */
+ if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
return -EINVAL;
- }
+success:
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 74392d9d51e0..1463468ba9a0 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -33,16 +33,13 @@ EXPORT_SYMBOL(cpu_khz);
unsigned int __read_mostly tsc_khz;
EXPORT_SYMBOL(tsc_khz);
+#define KHZ 1000
+
/*
* TSC can be unstable due to cpufreq or due to unsynced TSCs
*/
static int __read_mostly tsc_unstable;
-/* native_sched_clock() is called before tsc_init(), so
- we must start with the TSC soft disabled to prevent
- erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
-static int __read_mostly tsc_disabled = -1;
-
static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable;
@@ -106,23 +103,6 @@ void cyc2ns_read_end(void)
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static void cyc2ns_data_init(struct cyc2ns_data *data)
-{
- data->cyc2ns_mul = 0;
- data->cyc2ns_shift = 0;
- data->cyc2ns_offset = 0;
-}
-
-static void __init cyc2ns_init(int cpu)
-{
- struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
-
- cyc2ns_data_init(&c2n->data[0]);
- cyc2ns_data_init(&c2n->data[1]);
-
- seqcount_init(&c2n->seq);
-}
-
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
struct cyc2ns_data data;
@@ -138,18 +118,11 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
return ns;
}
-static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
+static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
{
unsigned long long ns_now;
struct cyc2ns_data data;
struct cyc2ns *c2n;
- unsigned long flags;
-
- local_irq_save(flags);
- sched_clock_idle_sleep_event();
-
- if (!khz)
- goto done;
ns_now = cycles_2_ns(tsc_now);
@@ -181,13 +154,56 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_
c2n->data[0] = data;
raw_write_seqcount_latch(&c2n->seq);
c2n->data[1] = data;
+}
+
+static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sched_clock_idle_sleep_event();
+
+ if (khz)
+ __set_cyc2ns_scale(khz, cpu, tsc_now);
-done:
sched_clock_idle_wakeup_event();
local_irq_restore(flags);
}
/*
+ * Initialize cyc2ns for boot cpu
+ */
+static void __init cyc2ns_init_boot_cpu(void)
+{
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+
+ seqcount_init(&c2n->seq);
+ __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
+}
+
+/*
+ * Secondary CPUs do not run through tsc_init(), so set up
+ * all the scale factors for all CPUs, assuming the same
+ * speed as the bootup CPU. (cpufreq notifiers will fix this
+ * up if their speed diverges)
+ */
+static void __init cyc2ns_init_secondary_cpus(void)
+{
+ unsigned int cpu, this_cpu = smp_processor_id();
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+ struct cyc2ns_data *data = c2n->data;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu != this_cpu) {
+ seqcount_init(&c2n->seq);
+ c2n = per_cpu_ptr(&cyc2ns, cpu);
+ c2n->data[0] = data[0];
+ c2n->data[1] = data[1];
+ }
+ }
+}
+
+/*
* Scheduler clock - returns current time in nanosec units.
*/
u64 native_sched_clock(void)
@@ -248,8 +264,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
- pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
- tsc_disabled = 1;
+ mark_tsc_unstable("boot parameter notsc");
return 1;
}
#else
@@ -665,30 +680,17 @@ static unsigned long cpu_khz_from_cpuid(void)
return eax_base_mhz * 1000;
}
-/**
- * native_calibrate_cpu - calibrate the cpu on boot
+/*
+ * calibrate cpu using pit, hpet, and ptimer methods. They are available
+ * later in boot after acpi is initialized.
*/
-unsigned long native_calibrate_cpu(void)
+static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
{
u64 tsc1, tsc2, delta, ref1, ref2;
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
- unsigned long flags, latch, ms, fast_calibrate;
+ unsigned long flags, latch, ms;
int hpet = is_hpet_enabled(), i, loopmin;
- fast_calibrate = cpu_khz_from_cpuid();
- if (fast_calibrate)
- return fast_calibrate;
-
- fast_calibrate = cpu_khz_from_msr();
- if (fast_calibrate)
- return fast_calibrate;
-
- local_irq_save(flags);
- fast_calibrate = quick_pit_calibrate();
- local_irq_restore(flags);
- if (fast_calibrate)
- return fast_calibrate;
-
/*
* Run 5 calibration loops to get the lowest frequency value
* (the best estimate). We use two different calibration modes
@@ -831,6 +833,37 @@ unsigned long native_calibrate_cpu(void)
return tsc_pit_min;
}
+/**
+ * native_calibrate_cpu_early - can calibrate the cpu early in boot
+ */
+unsigned long native_calibrate_cpu_early(void)
+{
+ unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
+
+ if (!fast_calibrate)
+ fast_calibrate = cpu_khz_from_msr();
+ if (!fast_calibrate) {
+ local_irq_save(flags);
+ fast_calibrate = quick_pit_calibrate();
+ local_irq_restore(flags);
+ }
+ return fast_calibrate;
+}
+
+
+/**
+ * native_calibrate_cpu - calibrate the cpu
+ */
+static unsigned long native_calibrate_cpu(void)
+{
+ unsigned long tsc_freq = native_calibrate_cpu_early();
+
+ if (!tsc_freq)
+ tsc_freq = pit_hpet_ptimer_calibrate_cpu();
+
+ return tsc_freq;
+}
+
void recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
@@ -1307,7 +1340,7 @@ unreg:
static int __init init_tsc_clocksource(void)
{
- if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
+ if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
return 0;
if (tsc_unstable)
@@ -1341,40 +1374,22 @@ unreg:
*/
device_initcall(init_tsc_clocksource);
-void __init tsc_early_delay_calibrate(void)
+static bool __init determine_cpu_tsc_frequencies(bool early)
{
- unsigned long lpj;
-
- if (!boot_cpu_has(X86_FEATURE_TSC))
- return;
-
- cpu_khz = x86_platform.calibrate_cpu();
- tsc_khz = x86_platform.calibrate_tsc();
-
- tsc_khz = tsc_khz ? : cpu_khz;
- if (!tsc_khz)
- return;
-
- lpj = tsc_khz * 1000;
- do_div(lpj, HZ);
- loops_per_jiffy = lpj;
-}
-
-void __init tsc_init(void)
-{
- u64 lpj, cyc;
- int cpu;
-
- if (!boot_cpu_has(X86_FEATURE_TSC)) {
- setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
- return;
+ /* Make sure that cpu and tsc are not already calibrated */
+ WARN_ON(cpu_khz || tsc_khz);
+
+ if (early) {
+ cpu_khz = x86_platform.calibrate_cpu();
+ tsc_khz = x86_platform.calibrate_tsc();
+ } else {
+ /* We should not be here with non-native cpu calibration */
+ WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
+ cpu_khz = pit_hpet_ptimer_calibrate_cpu();
}
- cpu_khz = x86_platform.calibrate_cpu();
- tsc_khz = x86_platform.calibrate_tsc();
-
/*
- * Trust non-zero tsc_khz as authorative,
+ * Trust non-zero tsc_khz as authoritative,
* and use it to sanity check cpu_khz,
* which will be off if system timer is off.
*/
@@ -1383,52 +1398,78 @@ void __init tsc_init(void)
else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
cpu_khz = tsc_khz;
- if (!tsc_khz) {
- mark_tsc_unstable("could not calculate TSC khz");
- setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
- return;
- }
+ if (tsc_khz == 0)
+ return false;
pr_info("Detected %lu.%03lu MHz processor\n",
- (unsigned long)cpu_khz / 1000,
- (unsigned long)cpu_khz % 1000);
+ (unsigned long)cpu_khz / KHZ,
+ (unsigned long)cpu_khz % KHZ);
if (cpu_khz != tsc_khz) {
pr_info("Detected %lu.%03lu MHz TSC",
- (unsigned long)tsc_khz / 1000,
- (unsigned long)tsc_khz % 1000);
+ (unsigned long)tsc_khz / KHZ,
+ (unsigned long)tsc_khz % KHZ);
}
+ return true;
+}
+
+static unsigned long __init get_loops_per_jiffy(void)
+{
+ unsigned long lpj = tsc_khz * KHZ;
+ do_div(lpj, HZ);
+ return lpj;
+}
+
+static void __init tsc_enable_sched_clock(void)
+{
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
tsc_store_and_check_tsc_adjust(true);
+ cyc2ns_init_boot_cpu();
+ static_branch_enable(&__use_tsc);
+}
+
+void __init tsc_early_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_TSC))
+ return;
+ if (!determine_cpu_tsc_frequencies(true))
+ return;
+ loops_per_jiffy = get_loops_per_jiffy();
+ tsc_enable_sched_clock();
+}
+
+void __init tsc_init(void)
+{
/*
- * Secondary CPUs do not run through tsc_init(), so set up
- * all the scale factors for all CPUs, assuming the same
- * speed as the bootup CPU. (cpufreq notifiers will fix this
- * up if their speed diverges)
+ * native_calibrate_cpu_early can only calibrate using methods that are
+ * available early in boot.
*/
- cyc = rdtsc();
- for_each_possible_cpu(cpu) {
- cyc2ns_init(cpu);
- set_cyc2ns_scale(tsc_khz, cpu, cyc);
- }
+ if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
+ x86_platform.calibrate_cpu = native_calibrate_cpu;
- if (tsc_disabled > 0)
+ if (!boot_cpu_has(X86_FEATURE_TSC)) {
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return;
+ }
- /* now allow native_sched_clock() to use rdtsc */
+ if (!tsc_khz) {
+ /* We failed to determine frequencies earlier, try again */
+ if (!determine_cpu_tsc_frequencies(false)) {
+ mark_tsc_unstable("could not calculate TSC khz");
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
+ }
+ tsc_enable_sched_clock();
+ }
- tsc_disabled = 0;
- static_branch_enable(&__use_tsc);
+ cyc2ns_init_secondary_cpus();
if (!no_sched_irq_time)
enable_sched_clock_irqtime();
- lpj = ((u64)tsc_khz * 1000);
- do_div(lpj, HZ);
- lpj_fine = lpj;
-
+ lpj_fine = get_loops_per_jiffy();
use_tsc_delay();
check_system_tsc_reliable();
@@ -1455,7 +1496,7 @@ unsigned long calibrate_delay_is_known(void)
int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
const struct cpumask *mask = topology_core_cpumask(cpu);
- if (tsc_disabled || !constant_tsc || !mask)
+ if (!constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 19afdbd7d0a7..27ef714d886c 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -1,17 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * tsc_msr.c - TSC frequency enumeration via MSR
+ * TSC frequency enumeration via MSR
*
- * Copyright (C) 2013 Intel Corporation
+ * Copyright (C) 2013, 2018 Intel Corporation
* Author: Bin Gao <bin.gao@intel.com>
- *
- * This file is released under the GPLv2.
*/
#include <linux/kernel.h>
-#include <asm/processor.h>
-#include <asm/setup.h>
+
#include <asm/apic.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
#include <asm/param.h>
+#include <asm/tsc.h>
#define MAX_NUM_FREQS 9
@@ -23,44 +25,48 @@
* field msr_plat does.
*/
struct freq_desc {
- u8 x86_family; /* CPU family */
- u8 x86_model; /* model */
u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */
u32 freqs[MAX_NUM_FREQS];
};
-static struct freq_desc freq_desc_tables[] = {
- /* PNW */
- { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } },
- /* CLV+ */
- { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } },
- /* TNG - Intel Atom processor Z3400 series */
- { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } },
- /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */
- { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } },
- /* ANN - Intel Atom processor Z3500 series */
- { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } },
- /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */
- { 6, 0x4c, 1, { 83300, 100000, 133300, 116700,
- 80000, 93300, 90000, 88900, 87500 } },
+/*
+ * Penwell and Clovertrail use spread spectrum clock,
+ * so the freq number is not exactly the same as reported
+ * by MSR based on SDM.
+ */
+static const struct freq_desc freq_desc_pnw = {
+ 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 }
};
-static int match_cpu(u8 family, u8 model)
-{
- int i;
+static const struct freq_desc freq_desc_clv = {
+ 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 }
+};
- for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) {
- if ((family == freq_desc_tables[i].x86_family) &&
- (model == freq_desc_tables[i].x86_model))
- return i;
- }
+static const struct freq_desc freq_desc_byt = {
+ 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 }
+};
- return -1;
-}
+static const struct freq_desc freq_desc_cht = {
+ 1, { 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500 }
+};
-/* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
-#define id_to_freq(cpu_index, freq_id) \
- (freq_desc_tables[cpu_index].freqs[freq_id])
+static const struct freq_desc freq_desc_tng = {
+ 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 }
+};
+
+static const struct freq_desc freq_desc_ann = {
+ 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 }
+};
+
+static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
+ INTEL_CPU_FAM6(ATOM_PENWELL, freq_desc_pnw),
+ INTEL_CPU_FAM6(ATOM_CLOVERVIEW, freq_desc_clv),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT1, freq_desc_byt),
+ INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht),
+ INTEL_CPU_FAM6(ATOM_MERRIFIELD, freq_desc_tng),
+ INTEL_CPU_FAM6(ATOM_MOOREFIELD, freq_desc_ann),
+ {}
+};
/*
* MSR-based CPU/TSC frequency discovery for certain CPUs.
@@ -70,18 +76,17 @@ static int match_cpu(u8 family, u8 model)
*/
unsigned long cpu_khz_from_msr(void)
{
- u32 lo, hi, ratio, freq_id, freq;
+ u32 lo, hi, ratio, freq;
+ const struct freq_desc *freq_desc;
+ const struct x86_cpu_id *id;
unsigned long res;
- int cpu_index;
-
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return 0;
- cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
- if (cpu_index < 0)
+ id = x86_match_cpu(tsc_msr_cpu_ids);
+ if (!id)
return 0;
- if (freq_desc_tables[cpu_index].msr_plat) {
+ freq_desc = (struct freq_desc *)id->driver_data;
+ if (freq_desc->msr_plat) {
rdmsr(MSR_PLATFORM_INFO, lo, hi);
ratio = (lo >> 8) & 0xff;
} else {
@@ -91,8 +96,9 @@ unsigned long cpu_khz_from_msr(void)
/* Get FSB FREQ ID */
rdmsr(MSR_FSB_FREQ, lo, hi);
- freq_id = lo & 0x7;
- freq = id_to_freq(cpu_index, freq_id);
+
+ /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
+ freq = freq_desc->freqs[lo & 0x7];
/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
res = freq * ratio;
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index feb28fee6cea..26038eacf74a 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -198,7 +198,7 @@ static int orc_sort_cmp(const void *_a, const void *_b)
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = cur_orc_table + (a - cur_orc_ip_table);
- return orc_a->sp_reg == ORC_REG_UNDEFINED ? -1 : 1;
+ return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
}
#ifdef CONFIG_MODULES
@@ -352,7 +352,7 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
bool unwind_next_frame(struct unwind_state *state)
{
- unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
+ unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
enum stack_type prev_type = state->stack_info.type;
struct orc_entry *orc;
bool indirect = false;
@@ -363,9 +363,9 @@ bool unwind_next_frame(struct unwind_state *state)
/* Don't let modules unload while we're reading their ORC data. */
preempt_disable();
- /* Have we reached the end? */
+ /* End-of-stack check for user tasks: */
if (state->regs && user_mode(state->regs))
- goto done;
+ goto the_end;
/*
* Find the orc_entry associated with the text address.
@@ -374,9 +374,16 @@ bool unwind_next_frame(struct unwind_state *state)
* calls and calls to noreturn functions.
*/
orc = orc_find(state->signal ? state->ip : state->ip - 1);
- if (!orc || orc->sp_reg == ORC_REG_UNDEFINED)
- goto done;
- orig_ip = state->ip;
+ if (!orc)
+ goto err;
+
+ /* End-of-stack check for kernel threads: */
+ if (orc->sp_reg == ORC_REG_UNDEFINED) {
+ if (!orc->end)
+ goto err;
+
+ goto the_end;
+ }
/* Find the previous frame's stack: */
switch (orc->sp_reg) {
@@ -402,7 +409,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->r10;
break;
@@ -411,7 +418,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->r13;
break;
@@ -420,7 +427,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->di;
break;
@@ -429,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->dx;
break;
@@ -437,12 +444,12 @@ bool unwind_next_frame(struct unwind_state *state)
default:
orc_warn("unknown SP base reg %d for ip %pB\n",
orc->sp_reg, (void *)state->ip);
- goto done;
+ goto err;
}
if (indirect) {
if (!deref_stack_reg(state, sp, &sp))
- goto done;
+ goto err;
}
/* Find IP, SP and possibly regs: */
@@ -451,7 +458,7 @@ bool unwind_next_frame(struct unwind_state *state)
ip_p = sp - sizeof(long);
if (!deref_stack_reg(state, ip_p, &state->ip))
- goto done;
+ goto err;
state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
state->ip, (void *)ip_p);
@@ -465,7 +472,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
orc_warn("can't dereference registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
- goto done;
+ goto err;
}
state->regs = (struct pt_regs *)sp;
@@ -477,7 +484,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
orc_warn("can't dereference iret registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
- goto done;
+ goto err;
}
state->regs = (void *)sp - IRET_FRAME_OFFSET;
@@ -500,18 +507,18 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_PREV_SP:
if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
- goto done;
+ goto err;
break;
case ORC_REG_BP:
if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
- goto done;
+ goto err;
break;
default:
orc_warn("unknown BP base reg %d for ip %pB\n",
orc->bp_reg, (void *)orig_ip);
- goto done;
+ goto err;
}
/* Prevent a recursive loop due to bad ORC data: */
@@ -520,13 +527,16 @@ bool unwind_next_frame(struct unwind_state *state)
state->sp <= prev_sp) {
orc_warn("stack going in the wrong direction? ip=%pB\n",
(void *)orig_ip);
- goto done;
+ goto err;
}
preempt_enable();
return true;
-done:
+err:
+ state->error = true;
+
+the_end:
preempt_enable();
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 9d0b5af7db91..1c03e4aa6474 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -149,7 +149,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
preempt_disable();
tsk->thread.sp0 = vm86->saved_sp0;
tsk->thread.sysenter_cs = __KERNEL_CS;
- update_sp0(tsk);
+ update_task_stack(tsk);
refresh_sysenter_cs(&tsk->thread);
vm86->saved_sp0 = 0;
preempt_enable();
@@ -374,7 +374,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
refresh_sysenter_cs(&tsk->thread);
}
- update_sp0(tsk);
+ update_task_stack(tsk);
preempt_enable();
if (vm86->flags & VM86_SCREEN_BITMAP)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 5e1458f609a1..8bde0a419f86 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -55,19 +55,22 @@ jiffies_64 = jiffies;
* so we can enable protection checks as well as retain 2MB large page
* mappings for kernel text.
*/
-#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
+#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
-#define X64_ALIGN_RODATA_END \
+#define X86_ALIGN_RODATA_END \
. = ALIGN(HPAGE_SIZE); \
- __end_rodata_hpage_align = .;
+ __end_rodata_hpage_align = .; \
+ __end_rodata_aligned = .;
#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
#else
-#define X64_ALIGN_RODATA_BEGIN
-#define X64_ALIGN_RODATA_END
+#define X86_ALIGN_RODATA_BEGIN
+#define X86_ALIGN_RODATA_END \
+ . = ALIGN(PAGE_SIZE); \
+ __end_rodata_aligned = .;
#define ALIGN_ENTRY_TEXT_BEGIN
#define ALIGN_ENTRY_TEXT_END
@@ -141,9 +144,9 @@ SECTIONS
/* .text should occupy whole number of pages */
. = ALIGN(PAGE_SIZE);
- X64_ALIGN_RODATA_BEGIN
+ X86_ALIGN_RODATA_BEGIN
RO_DATA(PAGE_SIZE)
- X64_ALIGN_RODATA_END
+ X86_ALIGN_RODATA_END
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 3ab867603e81..2792b5573818 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -109,7 +109,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
static void default_nmi_init(void) { };
struct x86_platform_ops x86_platform __ro_after_init = {
- .calibrate_cpu = native_calibrate_cpu,
+ .calibrate_cpu = native_calibrate_cpu_early,
.calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b5cd8465d44f..d536d457517b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1379,7 +1379,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
* using swait_active() is safe.
*/
if (swait_active(q))
- swake_up(q);
+ swake_up_one(q);
if (apic_lvtt_tscdeadline(apic))
ktimer->expired_tscdeadline = ktimer->tscdeadline;
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 298ef1479240..3b24dc05251c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -256,7 +256,7 @@ ENTRY(__memcpy_mcsafe)
/* Copy successful. Return zero */
.L_done_memcpy_trap:
- xorq %rax, %rax
+ xorl %eax, %eax
ret
ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 2f3c9196b834..a12afff146d1 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -111,6 +111,8 @@ static struct addr_marker address_markers[] = {
[END_OF_SPACE_NR] = { -1, NULL }
};
+#define INIT_PGD ((pgd_t *) &init_top_pgt)
+
#else /* CONFIG_X86_64 */
enum address_markers_idx {
@@ -121,6 +123,9 @@ enum address_markers_idx {
#ifdef CONFIG_HIGHMEM
PKMAP_BASE_NR,
#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ LDT_NR,
+#endif
CPU_ENTRY_AREA_NR,
FIXADDR_START_NR,
END_OF_SPACE_NR,
@@ -134,11 +139,16 @@ static struct addr_marker address_markers[] = {
#ifdef CONFIG_HIGHMEM
[PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ [LDT_NR] = { 0UL, "LDT remap" },
+#endif
[CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
[FIXADDR_START_NR] = { 0UL, "Fixmap area" },
[END_OF_SPACE_NR] = { -1, NULL }
};
+#define INIT_PGD (swapper_pg_dir)
+
#endif /* !CONFIG_X86_64 */
/* Multipliers for offsets within the PTEs */
@@ -496,11 +506,7 @@ static inline bool is_hypervisor_range(int idx)
static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
bool checkwx, bool dmesg)
{
-#ifdef CONFIG_X86_64
- pgd_t *start = (pgd_t *) &init_top_pgt;
-#else
- pgd_t *start = swapper_pg_dir;
-#endif
+ pgd_t *start = INIT_PGD;
pgprotval_t prot, eff;
int i;
struct pg_state st = {};
@@ -563,12 +569,13 @@ void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
}
EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
-static void ptdump_walk_user_pgd_level_checkwx(void)
+void ptdump_walk_user_pgd_level_checkwx(void)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pgd_t *pgd = (pgd_t *) &init_top_pgt;
+ pgd_t *pgd = INIT_PGD;
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!(__supported_pte_mask & _PAGE_NX) ||
+ !static_cpu_has(X86_FEATURE_PTI))
return;
pr_info("x86/mm: Checking user space page tables\n");
@@ -580,7 +587,6 @@ static void ptdump_walk_user_pgd_level_checkwx(void)
void ptdump_walk_pgd_level_checkwx(void)
{
ptdump_walk_pgd_level_core(NULL, NULL, true, false);
- ptdump_walk_user_pgd_level_checkwx();
}
static int __init pt_dump_init(void)
@@ -609,6 +615,9 @@ static int __init pt_dump_init(void)
# endif
address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
+# ifdef CONFIG_MODIFY_LDT_SYSCALL
+ address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
+# endif
#endif
return 0;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2aafa6ab6103..db1c042e9853 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
- WARN_ON_ONCE(in_nmi());
-
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cee58a972cb2..74b157ac078d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -773,13 +773,44 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
}
}
+/*
+ * begin/end can be in the direct map or the "high kernel mapping"
+ * used for the kernel image only. free_init_pages() will do the
+ * right thing for either kind of address.
+ */
+void free_kernel_image_pages(void *begin, void *end)
+{
+ unsigned long begin_ul = (unsigned long)begin;
+ unsigned long end_ul = (unsigned long)end;
+ unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
+
+
+ free_init_pages("unused kernel image", begin_ul, end_ul);
+
+ /*
+ * PTI maps some of the kernel into userspace. For performance,
+ * this includes some kernel areas that do not contain secrets.
+ * Those areas might be adjacent to the parts of the kernel image
+ * being freed, which may contain secrets. Remove the "high kernel
+ * image mapping" for these freed areas, ensuring they are not even
+ * potentially vulnerable to Meltdown regardless of the specific
+ * optimizations PTI is currently using.
+ *
+ * The "noalias" prevents unmapping the direct map alias which is
+ * needed to access the freed pages.
+ *
+ * This is only valid for 64bit kernels. 32bit has only one mapping
+ * which can't be treated in this way for obvious reasons.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
+ set_memory_np_noalias(begin_ul, len_pages);
+}
+
void __ref free_initmem(void)
{
e820__reallocate_tables();
- free_init_pages("unused kernel",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
+ free_kernel_image_pages(&__init_begin, &__init_end);
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a688617c727e..dd519f372169 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1283,20 +1283,10 @@ void mark_rodata_ro(void)
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif
- free_init_pages("unused kernel",
- (unsigned long) __va(__pa_symbol(text_end)),
- (unsigned long) __va(__pa_symbol(rodata_start)));
- free_init_pages("unused kernel",
- (unsigned long) __va(__pa_symbol(rodata_end)),
- (unsigned long) __va(__pa_symbol(_sdata)));
+ free_kernel_image_pages((void *)text_end, (void *)rodata_start);
+ free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
debug_checkwx();
-
- /*
- * Do this after all of the manipulation of the
- * kernel text page tables are complete.
- */
- pti_clone_kernel_text();
}
int kern_addr_valid(unsigned long addr)
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 34a2a3bfde9c..b54d52a2d00a 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -61,7 +61,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
eb->nid = nid;
if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
- emu_nid_to_phys[nid] = nid;
+ emu_nid_to_phys[nid] = pb->nid;
pb->start += size;
if (pb->start >= pb->end) {
@@ -198,40 +198,73 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
return end;
}
+static u64 uniform_size(u64 max_addr, u64 base, u64 hole, int nr_nodes)
+{
+ unsigned long max_pfn = PHYS_PFN(max_addr);
+ unsigned long base_pfn = PHYS_PFN(base);
+ unsigned long hole_pfns = PHYS_PFN(hole);
+
+ return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes);
+}
+
/*
* Sets up fake nodes of `size' interleaved over physical nodes ranging from
* `addr' to `max_addr'.
*
* Returns zero on success or negative on error.
*/
-static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
struct numa_meminfo *pi,
- u64 addr, u64 max_addr, u64 size)
+ u64 addr, u64 max_addr, u64 size,
+ int nr_nodes, struct numa_memblk *pblk,
+ int nid)
{
nodemask_t physnode_mask = numa_nodes_parsed;
+ int i, ret, uniform = 0;
u64 min_size;
- int nid = 0;
- int i, ret;
- if (!size)
+ if ((!size && !nr_nodes) || (nr_nodes && !pblk))
return -1;
+
/*
- * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
- * increased accordingly if the requested size is too small. This
- * creates a uniform distribution of node sizes across the entire
- * machine (but not necessarily over physical nodes).
+ * In the 'uniform' case split the passed in physical node by
+ * nr_nodes, in the non-uniform case, ignore the passed in
+ * physical block and try to create nodes of at least size
+ * @size.
+ *
+ * In the uniform case, split the nodes strictly by physical
+ * capacity, i.e. ignore holes. In the non-uniform case account
+ * for holes and treat @size as a minimum floor.
*/
- min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
- min_size = max(min_size, FAKE_NODE_MIN_SIZE);
- if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
- min_size = (min_size + FAKE_NODE_MIN_SIZE) &
- FAKE_NODE_MIN_HASH_MASK;
+ if (!nr_nodes)
+ nr_nodes = MAX_NUMNODES;
+ else {
+ nodes_clear(physnode_mask);
+ node_set(pblk->nid, physnode_mask);
+ uniform = 1;
+ }
+
+ if (uniform) {
+ min_size = uniform_size(max_addr, addr, 0, nr_nodes);
+ size = min_size;
+ } else {
+ /*
+ * The limit on emulated nodes is MAX_NUMNODES, so the
+ * size per node is increased accordingly if the
+ * requested size is too small. This creates a uniform
+ * distribution of node sizes across the entire machine
+ * (but not necessarily over physical nodes).
+ */
+ min_size = uniform_size(max_addr, addr,
+ mem_hole_size(addr, max_addr), nr_nodes);
+ }
+ min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
if (size < min_size) {
pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
size >> 20, min_size >> 20);
size = min_size;
}
- size &= FAKE_NODE_MIN_HASH_MASK;
+ size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);
/*
* Fill physical nodes with fake nodes of size until there is no memory
@@ -248,10 +281,14 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
node_clear(i, physnode_mask);
continue;
}
+
start = pi->blk[phys_blk].start;
limit = pi->blk[phys_blk].end;
- end = find_end_of_node(start, limit, size);
+ if (uniform)
+ end = start + size;
+ else
+ end = find_end_of_node(start, limit, size);
/*
* If there won't be at least FAKE_NODE_MIN_SIZE of
* non-reserved memory in ZONE_DMA32 for the next node,
@@ -266,7 +303,8 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
- if (limit - end - mem_hole_size(end, limit) < size)
+ if ((limit - end - mem_hole_size(end, limit) < size)
+ && !uniform)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -276,7 +314,15 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
return ret;
}
}
- return 0;
+ return nid;
+}
+
+static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+ struct numa_meminfo *pi,
+ u64 addr, u64 max_addr, u64 size)
+{
+ return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
+ 0, NULL, NUMA_NO_NODE);
}
int __init setup_emu2phys_nid(int *dfl_phys_nid)
@@ -346,7 +392,28 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
* the fixed node size. Otherwise, if it is just a single number N,
* split the system RAM into N fake nodes.
*/
- if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
+ if (strchr(emu_cmdline, 'U')) {
+ nodemask_t physnode_mask = numa_nodes_parsed;
+ unsigned long n;
+ int nid = 0;
+
+ n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
+ ret = -1;
+ for_each_node_mask(i, physnode_mask) {
+ ret = split_nodes_size_interleave_uniform(&ei, &pi,
+ pi.blk[i].start, pi.blk[i].end, 0,
+ n, &pi.blk[i], nid);
+ if (ret < 0)
+ break;
+ if (ret < n) {
+ pr_info("%s: phys: %d only got %d of %ld nodes, failing\n",
+ __func__, i, ret, n);
+ ret = -1;
+ break;
+ }
+ nid = ret;
+ }
+ } else if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
u64 size;
size = memparse(emu_cmdline, &emu_cmdline);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3bded76e8d5c..0a74996a1149 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -53,6 +53,7 @@ static DEFINE_SPINLOCK(cpa_lock);
#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
#define CPA_PAGES_ARRAY 4
+#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
#ifdef CONFIG_PROC_FS
static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -1486,6 +1487,9 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/* No alias checking for _NX bit modifications */
checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
+ /* Has caller explicitly disabled alias checking? */
+ if (in_flag & CPA_NO_CHECK_ALIAS)
+ checkalias = 0;
ret = __change_page_attr_set_clr(&cpa, checkalias);
@@ -1772,6 +1776,15 @@ int set_memory_np(unsigned long addr, int numpages)
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
}
+int set_memory_np_noalias(unsigned long addr, int numpages)
+{
+ int cpa_flags = CPA_NO_CHECK_ALIAS;
+
+ return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
+ __pgprot(_PAGE_PRESENT), 0,
+ cpa_flags, NULL);
+}
+
int set_memory_4k(unsigned long addr, int numpages)
{
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
@@ -1784,6 +1797,12 @@ int set_memory_nonglobal(unsigned long addr, int numpages)
__pgprot(_PAGE_GLOBAL), 0);
}
+int set_memory_global(unsigned long addr, int numpages)
+{
+ return change_page_attr_set(&addr, numpages,
+ __pgprot(_PAGE_GLOBAL), 0);
+}
+
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
struct cpa_data cpa;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 47b5951e592b..3ef095c70ae3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -182,6 +182,14 @@ static void pgd_dtor(pgd_t *pgd)
*/
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
+/*
+ * We allocate separate PMDs for the kernel part of the user page-table
+ * when PTI is enabled. We need them to map the per-process LDT into the
+ * user-space page-table.
+ */
+#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
+ KERNEL_PGD_PTRS : 0)
+
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
@@ -202,14 +210,14 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0
-
+#define PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */
-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
+static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
- for(i = 0; i < PREALLOCATED_PMDS; i++)
+ for (i = 0; i < count; i++)
if (pmds[i]) {
pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
free_page((unsigned long)pmds[i]);
@@ -217,7 +225,7 @@ static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
}
}
-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
+static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
bool failed = false;
@@ -226,7 +234,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
if (mm == &init_mm)
gfp &= ~__GFP_ACCOUNT;
- for(i = 0; i < PREALLOCATED_PMDS; i++) {
+ for (i = 0; i < count; i++) {
pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
if (!pmd)
failed = true;
@@ -241,7 +249,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
}
if (failed) {
- free_pmds(mm, pmds);
+ free_pmds(mm, pmds, count);
return -ENOMEM;
}
@@ -254,23 +262,38 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
+static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
+{
+ pgd_t pgd = *pgdp;
+
+ if (pgd_val(pgd) != 0) {
+ pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+
+ *pgdp = native_make_pgd(0);
+
+ paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+ pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
+ }
+}
+
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
int i;
- for(i = 0; i < PREALLOCATED_PMDS; i++) {
- pgd_t pgd = pgdp[i];
+ for (i = 0; i < PREALLOCATED_PMDS; i++)
+ mop_up_one_pmd(mm, &pgdp[i]);
- if (pgd_val(pgd) != 0) {
- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pgdp[i] = native_make_pgd(0);
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
- pmd_free(mm, pmd);
- mm_dec_nr_pmds(mm);
- }
- }
+ pgdp = kernel_to_user_pgdp(pgdp);
+
+ for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
+ mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
+#endif
}
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
@@ -296,6 +319,38 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
}
}
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
+ pgd_t *k_pgd, pmd_t *pmds[])
+{
+ pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ p4d_t *u_p4d;
+ pud_t *u_pud;
+ int i;
+
+ u_p4d = p4d_offset(u_pgd, 0);
+ u_pud = pud_offset(u_p4d, 0);
+
+ s_pgd += KERNEL_PGD_BOUNDARY;
+ u_pud += KERNEL_PGD_BOUNDARY;
+
+ for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
+ pmd_t *pmd = pmds[i];
+
+ memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
+ sizeof(pmd_t) * PTRS_PER_PMD);
+
+ pud_populate(mm, u_pud, pmd);
+ }
+
+}
+#else
+static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
+ pgd_t *k_pgd, pmd_t *pmds[])
+{
+}
+#endif
/*
* Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
* assumes that pgd should be in one page.
@@ -329,9 +384,6 @@ static int __init pgd_cache_init(void)
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
- if (!pgd_cache)
- return -ENOMEM;
-
return 0;
}
core_initcall(pgd_cache_init);
@@ -343,7 +395,8 @@ static inline pgd_t *_pgd_alloc(void)
* We allocate one page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return (pgd_t *)__get_free_page(PGALLOC_GFP);
+ return (pgd_t *)__get_free_pages(PGALLOC_GFP,
+ PGD_ALLOCATION_ORDER);
/*
* Now PAE kernel is not running as a Xen domain. We can allocate
@@ -355,7 +408,7 @@ static inline pgd_t *_pgd_alloc(void)
static inline void _pgd_free(pgd_t *pgd)
{
if (!SHARED_KERNEL_PMD)
- free_page((unsigned long)pgd);
+ free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
else
kmem_cache_free(pgd_cache, pgd);
}
@@ -375,6 +428,7 @@ static inline void _pgd_free(pgd_t *pgd)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
+ pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
pmd_t *pmds[PREALLOCATED_PMDS];
pgd = _pgd_alloc();
@@ -384,12 +438,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
mm->pgd = pgd;
- if (preallocate_pmds(mm, pmds) != 0)
+ if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
goto out_free_pgd;
- if (paravirt_pgd_alloc(mm) != 0)
+ if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
goto out_free_pmds;
+ if (paravirt_pgd_alloc(mm) != 0)
+ goto out_free_user_pmds;
+
/*
* Make sure that pre-populating the pmds is atomic with
* respect to anything walking the pgd_list, so that they
@@ -399,13 +456,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_ctor(mm, pgd);
pgd_prepopulate_pmd(mm, pgd, pmds);
+ pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
spin_unlock(&pgd_lock);
return pgd;
+out_free_user_pmds:
+ free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
out_free_pmds:
- free_pmds(mm, pmds);
+ free_pmds(mm, pmds, PREALLOCATED_PMDS);
out_free_pgd:
_pgd_free(pgd);
out:
@@ -719,28 +779,50 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
+#ifdef CONFIG_X86_64
/**
* pud_free_pmd_page - Clear pud entry and free pmd page.
* @pud: Pointer to a PUD.
+ * @addr: Virtual address associated with pud.
*
- * Context: The pud range has been unmaped and TLB purged.
+ * Context: The pud range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
+ *
+ * NOTE: Callers must allow a single page allocation.
*/
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- pmd_t *pmd;
+ pmd_t *pmd, *pmd_sv;
+ pte_t *pte;
int i;
if (pud_none(*pud))
return 1;
pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
+ if (!pmd_sv)
+ return 0;
- for (i = 0; i < PTRS_PER_PMD; i++)
- if (!pmd_free_pte_page(&pmd[i]))
- return 0;
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd_sv[i] = pmd[i];
+ if (!pmd_none(pmd[i]))
+ pmd_clear(&pmd[i]);
+ }
pud_clear(pud);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd_sv[i])) {
+ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
+ free_page((unsigned long)pte);
+ }
+ }
+
+ free_page((unsigned long)pmd_sv);
free_page((unsigned long)pmd);
return 1;
@@ -749,11 +831,12 @@ int pud_free_pmd_page(pud_t *pud)
/**
* pmd_free_pte_page - Clear pmd entry and free pte page.
* @pmd: Pointer to a PMD.
+ * @addr: Virtual address associated with pmd.
*
- * Context: The pmd range has been unmaped and TLB purged.
+ * Context: The pmd range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
@@ -762,8 +845,30 @@ int pmd_free_pte_page(pmd_t *pmd)
pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
free_page((unsigned long)pte);
return 1;
}
+
+#else /* !CONFIG_X86_64 */
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return pud_none(*pud);
+}
+
+/*
+ * Disable free page handling on x86-PAE. This assures that ioremap()
+ * does not update sync'd pmd entries. See vmalloc_sync_one().
+ */
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return pmd_none(*pmd);
+}
+
+#endif /* CONFIG_X86_64 */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 4d418e705878..d58b4aba9510 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -54,6 +54,16 @@
#define __GFP_NOTRACK 0
#endif
+/*
+ * Define the page-table levels we clone for user-space on 32
+ * and 64 bit.
+ */
+#ifdef CONFIG_X86_64
+#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
+#else
+#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
+#endif
+
static void __init pti_print_if_insecure(const char *reason)
{
if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
@@ -117,7 +127,7 @@ enable:
setup_force_cpu_cap(X86_FEATURE_PTI);
}
-pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
{
/*
* Changes to the high (kernel) portion of the kernelmode page
@@ -176,7 +186,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
if (pgd_none(*pgd)) {
unsigned long new_p4d_page = __get_free_page(gfp);
- if (!new_p4d_page)
+ if (WARN_ON_ONCE(!new_p4d_page))
return NULL;
set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
@@ -195,13 +205,17 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
+ p4d_t *p4d;
pud_t *pud;
+ p4d = pti_user_pagetable_walk_p4d(address);
+ if (!p4d)
+ return NULL;
+
BUILD_BUG_ON(p4d_large(*p4d) != 0);
if (p4d_none(*p4d)) {
unsigned long new_pud_page = __get_free_page(gfp);
- if (!new_pud_page)
+ if (WARN_ON_ONCE(!new_pud_page))
return NULL;
set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
@@ -215,7 +229,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
}
if (pud_none(*pud)) {
unsigned long new_pmd_page = __get_free_page(gfp);
- if (!new_pmd_page)
+ if (WARN_ON_ONCE(!new_pmd_page))
return NULL;
set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
@@ -224,7 +238,6 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
return pmd_offset(pud, address);
}
-#ifdef CONFIG_X86_VSYSCALL_EMULATION
/*
* Walk the shadow copy of the page tables (optionally) trying to allocate
* page table pages on the way down. Does not support large pages.
@@ -237,9 +250,13 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
+ pmd_t *pmd;
pte_t *pte;
+ pmd = pti_user_pagetable_walk_pmd(address);
+ if (!pmd)
+ return NULL;
+
/* We can't do anything sensible if we hit a large mapping. */
if (pmd_large(*pmd)) {
WARN_ON(1);
@@ -262,6 +279,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
return pte;
}
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
static void __init pti_setup_vsyscall(void)
{
pte_t *pte, *target_pte;
@@ -282,8 +300,14 @@ static void __init pti_setup_vsyscall(void)
static void __init pti_setup_vsyscall(void) { }
#endif
+enum pti_clone_level {
+ PTI_CLONE_PMD,
+ PTI_CLONE_PTE,
+};
+
static void
-pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
+pti_clone_pgtable(unsigned long start, unsigned long end,
+ enum pti_clone_level level)
{
unsigned long addr;
@@ -291,59 +315,105 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
* Clone the populated PMDs which cover start to end. These PMD areas
* can have holes.
*/
- for (addr = start; addr < end; addr += PMD_SIZE) {
+ for (addr = start; addr < end;) {
+ pte_t *pte, *target_pte;
pmd_t *pmd, *target_pmd;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
+ /* Overflow check */
+ if (addr < start)
+ break;
+
pgd = pgd_offset_k(addr);
if (WARN_ON(pgd_none(*pgd)))
return;
p4d = p4d_offset(pgd, addr);
if (WARN_ON(p4d_none(*p4d)))
return;
+
pud = pud_offset(p4d, addr);
- if (pud_none(*pud))
+ if (pud_none(*pud)) {
+ addr += PUD_SIZE;
continue;
+ }
+
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ if (pmd_none(*pmd)) {
+ addr += PMD_SIZE;
continue;
+ }
- target_pmd = pti_user_pagetable_walk_pmd(addr);
- if (WARN_ON(!target_pmd))
- return;
-
- /*
- * Only clone present PMDs. This ensures only setting
- * _PAGE_GLOBAL on present PMDs. This should only be
- * called on well-known addresses anyway, so a non-
- * present PMD would be a surprise.
- */
- if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
- return;
-
- /*
- * Setting 'target_pmd' below creates a mapping in both
- * the user and kernel page tables. It is effectively
- * global, so set it as global in both copies. Note:
- * the X86_FEATURE_PGE check is not _required_ because
- * the CPU ignores _PAGE_GLOBAL when PGE is not
- * supported. The check keeps consistentency with
- * code that only set this bit when supported.
- */
- if (boot_cpu_has(X86_FEATURE_PGE))
- *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
-
- /*
- * Copy the PMD. That is, the kernelmode and usermode
- * tables will share the last-level page tables of this
- * address range
- */
- *target_pmd = pmd_clear_flags(*pmd, clear);
+ if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
+ target_pmd = pti_user_pagetable_walk_pmd(addr);
+ if (WARN_ON(!target_pmd))
+ return;
+
+ /*
+ * Only clone present PMDs. This ensures only setting
+ * _PAGE_GLOBAL on present PMDs. This should only be
+ * called on well-known addresses anyway, so a non-
+ * present PMD would be a surprise.
+ */
+ if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
+ return;
+
+ /*
+ * Setting 'target_pmd' below creates a mapping in both
+ * the user and kernel page tables. It is effectively
+ * global, so set it as global in both copies. Note:
+ * the X86_FEATURE_PGE check is not _required_ because
+ * the CPU ignores _PAGE_GLOBAL when PGE is not
+ * supported. The check keeps consistentency with
+ * code that only set this bit when supported.
+ */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
+
+ /*
+ * Copy the PMD. That is, the kernelmode and usermode
+ * tables will share the last-level page tables of this
+ * address range
+ */
+ *target_pmd = *pmd;
+
+ addr += PMD_SIZE;
+
+ } else if (level == PTI_CLONE_PTE) {
+
+ /* Walk the page-table down to the pte level */
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte)) {
+ addr += PAGE_SIZE;
+ continue;
+ }
+
+ /* Only clone present PTEs */
+ if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
+ return;
+
+ /* Allocate PTE in the user page-table */
+ target_pte = pti_user_pagetable_walk_pte(addr);
+ if (WARN_ON(!target_pte))
+ return;
+
+ /* Set GLOBAL bit in both PTEs */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
+
+ /* Clone the PTE */
+ *target_pte = *pte;
+
+ addr += PAGE_SIZE;
+
+ } else {
+ BUG();
+ }
}
}
+#ifdef CONFIG_X86_64
/*
* Clone a single p4d (i.e. a top-level entry on 4-level systems and a
* next-level entry on 5-level systems.
@@ -354,6 +424,9 @@ static void __init pti_clone_p4d(unsigned long addr)
pgd_t *kernel_pgd;
user_p4d = pti_user_pagetable_walk_p4d(addr);
+ if (!user_p4d)
+ return;
+
kernel_pgd = pgd_offset_k(addr);
kernel_p4d = p4d_offset(kernel_pgd, addr);
*user_p4d = *kernel_p4d;
@@ -367,6 +440,25 @@ static void __init pti_clone_user_shared(void)
pti_clone_p4d(CPU_ENTRY_AREA_BASE);
}
+#else /* CONFIG_X86_64 */
+
+/*
+ * On 32 bit PAE systems with 1GB of Kernel address space there is only
+ * one pgd/p4d for the whole kernel. Cloning that would map the whole
+ * address space into the user page-tables, making PTI useless. So clone
+ * the page-table on the PMD level to prevent that.
+ */
+static void __init pti_clone_user_shared(void)
+{
+ unsigned long start, end;
+
+ start = CPU_ENTRY_AREA_BASE;
+ end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+
+ pti_clone_pgtable(start, end, PTI_CLONE_PMD);
+}
+#endif /* CONFIG_X86_64 */
+
/*
* Clone the ESPFIX P4D into the user space visible page table
*/
@@ -380,11 +472,11 @@ static void __init pti_setup_espfix64(void)
/*
* Clone the populated PMDs of the entry and irqentry text and force it RO.
*/
-static void __init pti_clone_entry_text(void)
+static void pti_clone_entry_text(void)
{
- pti_clone_pmds((unsigned long) __entry_text_start,
- (unsigned long) __irqentry_text_end,
- _PAGE_RW);
+ pti_clone_pgtable((unsigned long) __entry_text_start,
+ (unsigned long) __irqentry_text_end,
+ PTI_CLONE_PMD);
}
/*
@@ -435,10 +527,17 @@ static inline bool pti_kernel_image_global_ok(void)
}
/*
+ * This is the only user for these and it is not arch-generic
+ * like the other set_memory.h functions. Just extern them.
+ */
+extern int set_memory_nonglobal(unsigned long addr, int numpages);
+extern int set_memory_global(unsigned long addr, int numpages);
+
+/*
* For some configurations, map all of kernel text into the user page
* tables. This reduces TLB misses, especially on non-PCID systems.
*/
-void pti_clone_kernel_text(void)
+static void pti_clone_kernel_text(void)
{
/*
* rodata is part of the kernel image and is normally
@@ -446,7 +545,8 @@ void pti_clone_kernel_text(void)
* clone the areas past rodata, they might contain secrets.
*/
unsigned long start = PFN_ALIGN(_text);
- unsigned long end = (unsigned long)__end_rodata_hpage_align;
+ unsigned long end_clone = (unsigned long)__end_rodata_aligned;
+ unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
if (!pti_kernel_image_global_ok())
return;
@@ -458,14 +558,18 @@ void pti_clone_kernel_text(void)
* pti_set_kernel_image_nonglobal() did to clear the
* global bit.
*/
- pti_clone_pmds(start, end, _PAGE_RW);
+ pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
+
+ /*
+ * pti_clone_pgtable() will set the global bit in any PMDs
+ * that it clones, but we also need to get any PTEs in
+ * the last level for areas that are not huge-page-aligned.
+ */
+
+ /* Set the global bit for normal non-__init kernel text: */
+ set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
}
-/*
- * This is the only user for it and it is not arch-generic like
- * the other set_memory.h functions. Just extern it.
- */
-extern int set_memory_nonglobal(unsigned long addr, int numpages);
void pti_set_kernel_image_nonglobal(void)
{
/*
@@ -477,9 +581,11 @@ void pti_set_kernel_image_nonglobal(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
- if (pti_kernel_image_global_ok())
- return;
-
+ /*
+ * This clears _PAGE_GLOBAL from the entire kernel image.
+ * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
+ * areas that are mapped to userspace.
+ */
set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
}
@@ -493,6 +599,28 @@ void __init pti_init(void)
pr_info("enabled\n");
+#ifdef CONFIG_X86_32
+ /*
+ * We check for X86_FEATURE_PCID here. But the init-code will
+ * clear the feature flag on 32 bit because the feature is not
+ * supported on 32 bit anyway. To print the warning we need to
+ * check with cpuid directly again.
+ */
+ if (cpuid_ecx(0x1) & BIT(17)) {
+ /* Use printk to work around pr_fmt() */
+ printk(KERN_WARNING "\n");
+ printk(KERN_WARNING "************************************************************\n");
+ printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
+ printk(KERN_WARNING "** **\n");
+ printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
+ printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
+ printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
+ printk(KERN_WARNING "** **\n");
+ printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
+ printk(KERN_WARNING "************************************************************\n");
+ }
+#endif
+
pti_clone_user_shared();
/* Undo all global bits from the init pagetables in head_64.S: */
@@ -502,3 +630,22 @@ void __init pti_init(void)
pti_setup_espfix64();
pti_setup_vsyscall();
}
+
+/*
+ * Finalize the kernel mappings in the userspace page-table. Some of the
+ * mappings for the kernel image might have changed since pti_init()
+ * cloned them. This is because parts of the kernel image have been
+ * mapped RO and/or NX. These changes need to be cloned again to the
+ * userspace page-table.
+ */
+void pti_finalize(void)
+{
+ /*
+ * We need to clone everything (again) that maps parts of the
+ * kernel image.
+ */
+ pti_clone_entry_text();
+ pti_clone_kernel_text();
+
+ debug_checkwx_user();
+}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6eb1f34c3c85..752dbf4e0e50 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
+#include <linux/gfp.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
@@ -35,7 +36,7 @@
* necessary invalidation by clearing out the 'ctx_id' which
* forces a TLB flush when the context is loaded.
*/
-void clear_asid_other(void)
+static void clear_asid_other(void)
{
u16 asid;
@@ -185,8 +186,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
{
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
unsigned cpu = smp_processor_id();
u64 next_tlb_gen;
+ bool need_flush;
+ u16 new_asid;
/*
* NB: The scheduler will call us with prev == next when switching
@@ -240,20 +244,41 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
next->context.ctx_id);
/*
- * We don't currently support having a real mm loaded without
- * our cpu set in mm_cpumask(). We have all the bookkeeping
- * in place to figure out whether we would need to flush
- * if our cpu were cleared in mm_cpumask(), but we don't
- * currently use it.
+ * Even in lazy TLB mode, the CPU should stay set in the
+ * mm_cpumask. The TLB shootdown code can figure out from
+ * from cpu_tlbstate.is_lazy whether or not to send an IPI.
*/
if (WARN_ON_ONCE(real_prev != &init_mm &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
- return;
+ /*
+ * If the CPU is not in lazy TLB mode, we are just switching
+ * from one thread in a process to another thread in the same
+ * process. No TLB flush required.
+ */
+ if (!was_lazy)
+ return;
+
+ /*
+ * Read the tlb_gen to check whether a flush is needed.
+ * If the TLB is up to date, just use it.
+ * The barrier synchronizes with the tlb_gen increment in
+ * the TLB shootdown code.
+ */
+ smp_mb();
+ next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+ if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
+ next_tlb_gen)
+ return;
+
+ /*
+ * TLB contents went out of date while we were in lazy
+ * mode. Fall through to the TLB switching code below.
+ */
+ new_asid = prev_asid;
+ need_flush = true;
} else {
- u16 new_asid;
- bool need_flush;
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
/*
@@ -285,53 +310,60 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
sync_current_stack_to_mm(next);
}
- /* Stop remote flushes for the previous mm */
- VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
- real_prev != &init_mm);
- cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+ /*
+ * Stop remote flushes for the previous mm.
+ * Skip kernel threads; we never send init_mm TLB flushing IPIs,
+ * but the bitmap manipulation can cause cache line contention.
+ */
+ if (real_prev != &init_mm) {
+ VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
+ mm_cpumask(real_prev)));
+ cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+ }
/*
* Start remote flushes and then read tlb_gen.
*/
- cpumask_set_cpu(cpu, mm_cpumask(next));
+ if (next != &init_mm)
+ cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+ }
- if (need_flush) {
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
- load_new_mm_cr3(next->pgd, new_asid, true);
-
- /*
- * NB: This gets called via leave_mm() in the idle path
- * where RCU functions differently. Tracing normally
- * uses RCU, so we need to use the _rcuidle variant.
- *
- * (There is no good reason for this. The idle code should
- * be rearranged to call this before rcu_idle_enter().)
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
- } else {
- /* The new ASID is already up to date. */
- load_new_mm_cr3(next->pgd, new_asid, false);
-
- /* See above wrt _rcuidle. */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
- }
+ if (need_flush) {
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
+ load_new_mm_cr3(next->pgd, new_asid, true);
/*
- * Record last user mm's context id, so we can avoid
- * flushing branch buffer with IBPB if we switch back
- * to the same user.
+ * NB: This gets called via leave_mm() in the idle path
+ * where RCU functions differently. Tracing normally
+ * uses RCU, so we need to use the _rcuidle variant.
+ *
+ * (There is no good reason for this. The idle code should
+ * be rearranged to call this before rcu_idle_enter().)
*/
- if (next != &init_mm)
- this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ } else {
+ /* The new ASID is already up to date. */
+ load_new_mm_cr3(next->pgd, new_asid, false);
- this_cpu_write(cpu_tlbstate.loaded_mm, next);
- this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+ /* See above wrt _rcuidle. */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
+ /*
+ * Record last user mm's context id, so we can avoid
+ * flushing branch buffer with IBPB if we switch back
+ * to the same user.
+ */
+ if (next != &init_mm)
+ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
+ this_cpu_write(cpu_tlbstate.loaded_mm, next);
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+
load_mm_cr4(next);
switch_ldt(real_prev, next);
}
@@ -354,20 +386,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
- if (tlb_defer_switch_to_init_mm()) {
- /*
- * There's a significant optimization that may be possible
- * here. We have accurate enough TLB flush tracking that we
- * don't need to maintain coherence of TLB per se when we're
- * lazy. We do, however, need to maintain coherence of
- * paging-structure caches. We could, in principle, leave our
- * old mm loaded and only switch to init_mm when
- * tlb_remove_page() happens.
- */
- this_cpu_write(cpu_tlbstate.is_lazy, true);
- } else {
- switch_mm(NULL, &init_mm, NULL);
- }
+ this_cpu_write(cpu_tlbstate.is_lazy, true);
}
/*
@@ -454,6 +473,9 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* paging-structure cache to avoid speculatively reading
* garbage into our TLB. Since switching to init_mm is barely
* slower than a minimal flush, just switch to init_mm.
+ *
+ * This should be rare, with native_flush_tlb_others skipping
+ * IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
return;
@@ -560,6 +582,9 @@ static void flush_tlb_func_remote(void *info)
void native_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
+ cpumask_var_t lazymask;
+ unsigned int cpu;
+
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (info->end == TLB_FLUSH_ALL)
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
@@ -583,8 +608,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
* that UV should be updated so that smp_call_function_many(),
* etc, are optimal on UV.
*/
- unsigned int cpu;
-
cpu = smp_processor_id();
cpumask = uv_flush_tlb_others(cpumask, info);
if (cpumask)
@@ -592,8 +615,29 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1);
return;
}
- smp_call_function_many(cpumask, flush_tlb_func_remote,
+
+ /*
+ * A temporary cpumask is used in order to skip sending IPIs
+ * to CPUs in lazy TLB state, while keeping them in mm_cpumask(mm).
+ * If the allocation fails, simply IPI every CPU in mm_cpumask.
+ */
+ if (!alloc_cpumask_var(&lazymask, GFP_ATOMIC)) {
+ smp_call_function_many(cpumask, flush_tlb_func_remote,
(void *)info, 1);
+ return;
+ }
+
+ cpumask_copy(lazymask, cpumask);
+
+ for_each_cpu(cpu, lazymask) {
+ if (per_cpu(cpu_tlbstate.is_lazy, cpu))
+ cpumask_clear_cpu(cpu, lazymask);
+ }
+
+ smp_call_function_many(lazymask, flush_tlb_func_remote,
+ (void *)info, 1);
+
+ free_cpumask_var(lazymask);
}
/*
@@ -646,6 +690,68 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
put_cpu();
}
+void tlb_flush_remove_tables_local(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm &&
+ this_cpu_read(cpu_tlbstate.is_lazy)) {
+ /*
+ * We're in lazy mode. We need to at least flush our
+ * paging-structure cache to avoid speculatively reading
+ * garbage into our TLB. Since switching to init_mm is barely
+ * slower than a minimal flush, just switch to init_mm.
+ */
+ switch_mm_irqs_off(NULL, &init_mm, NULL);
+ }
+}
+
+static void mm_fill_lazy_tlb_cpu_mask(struct mm_struct *mm,
+ struct cpumask *lazy_cpus)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mm_cpumask(mm)) {
+ if (!per_cpu(cpu_tlbstate.is_lazy, cpu))
+ cpumask_set_cpu(cpu, lazy_cpus);
+ }
+}
+
+void tlb_flush_remove_tables(struct mm_struct *mm)
+{
+ int cpu = get_cpu();
+ cpumask_var_t lazy_cpus;
+
+ if (cpumask_any_but(mm_cpumask(mm), cpu) >= nr_cpu_ids) {
+ put_cpu();
+ return;
+ }
+
+ if (!zalloc_cpumask_var(&lazy_cpus, GFP_ATOMIC)) {
+ /*
+ * If the cpumask allocation fails, do a brute force flush
+ * on all the CPUs that have this mm loaded.
+ */
+ smp_call_function_many(mm_cpumask(mm),
+ tlb_flush_remove_tables_local, (void *)mm, 1);
+ put_cpu();
+ return;
+ }
+
+ /*
+ * CPUs with !is_lazy either received a TLB flush IPI while the user
+ * pages in this address range were unmapped, or have context switched
+ * and reloaded %CR3 since then.
+ *
+ * Shootdown IPIs at page table freeing time only need to be sent to
+ * CPUs that may have out of date TLB contents.
+ */
+ mm_fill_lazy_tlb_cpu_mask(mm, lazy_cpus);
+ smp_call_function_many(lazy_cpus,
+ tlb_flush_remove_tables_local, (void *)mm, 1);
+ free_cpumask_var(lazy_cpus);
+ put_cpu();
+}
static void do_flush_tlb_all(void *info)
{
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 5f2eb3231607..ee5d08f25ce4 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -636,6 +636,8 @@ void efi_switch_mm(struct mm_struct *mm)
#ifdef CONFIG_EFI_MIXED
extern efi_status_t efi64_thunk(u32, ...);
+static DEFINE_SPINLOCK(efi_runtime_lock);
+
#define runtime_service32(func) \
({ \
u32 table = (u32)(unsigned long)efi.systab; \
@@ -657,17 +659,14 @@ extern efi_status_t efi64_thunk(u32, ...);
#define efi_thunk(f, ...) \
({ \
efi_status_t __s; \
- unsigned long __flags; \
u32 __func; \
\
- local_irq_save(__flags); \
arch_efi_call_virt_setup(); \
\
__func = runtime_service32(f); \
__s = efi64_thunk(__func, __VA_ARGS__); \
\
arch_efi_call_virt_teardown(); \
- local_irq_restore(__flags); \
\
__s; \
})
@@ -702,14 +701,17 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
u32 phys_tm, phys_tc;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
phys_tc = virt_to_phys_or_null(tc);
status = efi_thunk(get_time, phys_tm, phys_tc);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -719,13 +721,16 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
{
efi_status_t status;
u32 phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_time, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -737,8 +742,10 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
{
efi_status_t status;
u32 phys_enabled, phys_pending, phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_enabled = virt_to_phys_or_null(enabled);
phys_pending = virt_to_phys_or_null(pending);
@@ -747,6 +754,7 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
status = efi_thunk(get_wakeup_time, phys_enabled,
phys_pending, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -757,13 +765,16 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
efi_status_t status;
u32 phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_wakeup_time, enabled, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -781,6 +792,9 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
efi_status_t status;
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_data_size = virt_to_phys_or_null(data_size);
phys_vendor = virt_to_phys_or_null(vendor);
@@ -791,6 +805,8 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
status = efi_thunk(get_variable, phys_name, phys_vendor,
phys_attr, phys_data_size, phys_data);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -800,6 +816,34 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
{
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+ /* If data_size is > sizeof(u32) we've got problems */
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
+{
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
phys_vendor = virt_to_phys_or_null(vendor);
@@ -809,6 +853,8 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
status = efi_thunk(set_variable, phys_name, phys_vendor,
attr, data_size, phys_data);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -819,6 +865,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
{
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_name_size = virt_to_phys_or_null(name_size);
phys_vendor = virt_to_phys_or_null(vendor);
@@ -827,6 +876,8 @@ efi_thunk_get_next_variable(unsigned long *name_size,
status = efi_thunk(get_next_variable, phys_name_size,
phys_name, phys_vendor);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -835,10 +886,15 @@ efi_thunk_get_next_high_mono_count(u32 *count)
{
efi_status_t status;
u32 phys_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_count = virt_to_phys_or_null(count);
status = efi_thunk(get_next_high_mono_count, phys_count);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -847,10 +903,15 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data)
{
u32 phys_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_data = virt_to_phys_or_null_size(data, data_size);
efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
}
static efi_status_t
@@ -872,10 +933,13 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
{
efi_status_t status;
u32 phys_storage, phys_remaining, phys_max;
+ unsigned long flags;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
phys_storage = virt_to_phys_or_null(storage_space);
phys_remaining = virt_to_phys_or_null(remaining_space);
phys_max = virt_to_phys_or_null(max_variable_size);
@@ -883,6 +947,35 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
status = efi_thunk(query_variable_info, attr, phys_storage,
phys_remaining, phys_max);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+ u32 phys_storage, phys_remaining, phys_max;
+ unsigned long flags;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
+
+ phys_storage = virt_to_phys_or_null(storage_space);
+ phys_remaining = virt_to_phys_or_null(remaining_space);
+ phys_max = virt_to_phys_or_null(max_variable_size);
+
+ status = efi_thunk(query_variable_info, attr, phys_storage,
+ phys_remaining, phys_max);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -908,9 +1001,11 @@ void efi_thunk_runtime_setup(void)
efi.get_variable = efi_thunk_get_variable;
efi.get_next_variable = efi_thunk_get_next_variable;
efi.set_variable = efi_thunk_set_variable;
+ efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
efi.reset_system = efi_thunk_reset_system;
efi.query_variable_info = efi_thunk_query_variable_info;
+ efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
efi.update_capsule = efi_thunk_update_capsule;
efi.query_capsule_caps = efi_thunk_query_capsule_caps;
}
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 36c1f8b9f7e0..844d31cb8a0c 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -105,12 +105,11 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia);
*/
void efi_delete_dummy_variable(void)
{
- efi.set_variable((efi_char16_t *)efi_dummy_name,
- &EFI_DUMMY_GUID,
- EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS,
- 0, NULL);
+ efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
+ &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
}
/*
@@ -249,7 +248,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
int num_entries;
void *new;
- if (efi_mem_desc_lookup(addr, &md)) {
+ if (efi_mem_desc_lookup(addr, &md) ||
+ md.type != EFI_BOOT_SERVICES_DATA) {
pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
return;
}
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
index fa021dfab088..5cf886c867c2 100644
--- a/arch/x86/platform/intel-mid/Makefile
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfld.o pwr.o
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o pwr.o
# SFI specific code
ifdef CONFIG_X86_INTEL_MID
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 2ebdf31d9996..56f66eafb94f 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -36,8 +36,6 @@
#include <asm/apb_timer.h>
#include <asm/reboot.h>
-#include "intel_mid_weak_decls.h"
-
/*
* the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
* cmdline option x86_intel_mid_timer can be used to override the configuration
@@ -61,10 +59,6 @@
enum intel_mid_timer_options intel_mid_timer_options;
-/* intel_mid_ops to store sub arch ops */
-static struct intel_mid_ops *intel_mid_ops;
-/* getter function for sub arch ops*/
-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
enum intel_mid_cpu_type __intel_mid_cpu_chip;
EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
@@ -82,11 +76,6 @@ static void intel_mid_reboot(void)
intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
}
-static unsigned long __init intel_mid_calibrate_tsc(void)
-{
- return 0;
-}
-
static void __init intel_mid_setup_bp_timer(void)
{
apbt_time_init();
@@ -133,6 +122,7 @@ static void intel_mid_arch_setup(void)
case 0x3C:
case 0x4A:
__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
+ x86_platform.legacy.rtc = 1;
break;
case 0x27:
default:
@@ -140,17 +130,7 @@ static void intel_mid_arch_setup(void)
break;
}
- if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
- intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
- else {
- intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
- pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
- }
-
out:
- if (intel_mid_ops->arch_setup)
- intel_mid_ops->arch_setup();
-
/*
* Intel MID platforms are using explicitly defined regulators.
*
@@ -191,7 +171,6 @@ void __init x86_intel_mid_early_setup(void)
x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
- x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
x86_init.pci.arch_init = intel_mid_pci_init;
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
deleted file mode 100644
index 3c1c3866d82b..000000000000
--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-
-/* For every CPU addition a new get_<cpuname>_ops interface needs
- * to be added.
- */
-extern void *get_penwell_ops(void);
-extern void *get_cloverview_ops(void);
-extern void *get_tangier_ops(void);
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
deleted file mode 100644
index e42978d4deaf..000000000000
--- a/arch/x86/platform/intel-mid/mfld.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * mfld.c: Intel Medfield platform setup code
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-
-#include <asm/apic.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_mid_vrtc.h>
-
-#include "intel_mid_weak_decls.h"
-
-static unsigned long __init mfld_calibrate_tsc(void)
-{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb;
-
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
- ratio = (hi >> 8) & 0x1f;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("read a zero ratio, should be incorrect!\n");
- pr_err("force tsc ratio to 16 ...\n");
- ratio = 16;
- }
- rdmsr(MSR_FSB_FREQ, lo, hi);
- if ((lo & 0x7) == 0x7)
- fsb = FSB_FREQ_83SKU;
- else
- fsb = FSB_FREQ_100SKU;
- fast_calibrate = ratio * fsb;
- pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
- lapic_timer_frequency = fsb * 1000 / HZ;
-
- /*
- * TSC on Intel Atom SoCs is reliable and of known frequency.
- * See tsc_msr.c for details.
- */
- setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
- return fast_calibrate;
-}
-
-static void __init penwell_arch_setup(void)
-{
- x86_platform.calibrate_tsc = mfld_calibrate_tsc;
-}
-
-static struct intel_mid_ops penwell_ops = {
- .arch_setup = penwell_arch_setup,
-};
-
-void *get_penwell_ops(void)
-{
- return &penwell_ops;
-}
-
-void *get_cloverview_ops(void)
-{
- return &penwell_ops;
-}
diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c
deleted file mode 100644
index ae7bdeb0e507..000000000000
--- a/arch/x86/platform/intel-mid/mrfld.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Intel Merrifield platform specific setup code
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-
-#include <asm/apic.h>
-#include <asm/intel-mid.h>
-
-#include "intel_mid_weak_decls.h"
-
-static unsigned long __init tangier_calibrate_tsc(void)
-{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb, bus_freq;
-
- /* *********************** */
- /* Compute TSC:Ratio * FSB */
- /* *********************** */
-
- /* Compute Ratio */
- rdmsr(MSR_PLATFORM_INFO, lo, hi);
- pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
-
- ratio = (lo >> 8) & 0xFF;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
- ratio = 4;
- }
-
- /* Compute FSB */
- rdmsr(MSR_FSB_FREQ, lo, hi);
- pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
- hi, lo);
-
- bus_freq = lo & 0x7;
- pr_debug("bus_freq = 0x%x\n", bus_freq);
-
- if (bus_freq == 0)
- fsb = FSB_FREQ_100SKU;
- else if (bus_freq == 1)
- fsb = FSB_FREQ_100SKU;
- else if (bus_freq == 2)
- fsb = FSB_FREQ_133SKU;
- else if (bus_freq == 3)
- fsb = FSB_FREQ_167SKU;
- else if (bus_freq == 4)
- fsb = FSB_FREQ_83SKU;
- else if (bus_freq == 5)
- fsb = FSB_FREQ_400SKU;
- else if (bus_freq == 6)
- fsb = FSB_FREQ_267SKU;
- else if (bus_freq == 7)
- fsb = FSB_FREQ_333SKU;
- else {
- BUG();
- pr_err("Invalid bus_freq! Setting to minimal value!\n");
- fsb = FSB_FREQ_100SKU;
- }
-
- /* TSC = FSB Freq * Resolved HFM Ratio */
- fast_calibrate = ratio * fsb;
- pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
-
- /* ************************************ */
- /* Calculate Local APIC Timer Frequency */
- /* ************************************ */
- lapic_timer_frequency = (fsb * 1000) / HZ;
-
- pr_debug("Setting lapic_timer_frequency = %d\n",
- lapic_timer_frequency);
-
- /*
- * TSC on Intel Atom SoCs is reliable and of known frequency.
- * See tsc_msr.c for details.
- */
- setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
- return fast_calibrate;
-}
-
-static void __init tangier_arch_setup(void)
-{
- x86_platform.calibrate_tsc = tangier_calibrate_tsc;
- x86_platform.legacy.rtc = 1;
-}
-
-/* tangier arch ops */
-static struct intel_mid_ops tangier_ops = {
- .arch_setup = tangier_arch_setup,
-};
-
-void *get_tangier_ops(void)
-{
- return &tangier_ops;
-}
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index 7c3077e58fa0..f0e920fb98ad 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -311,10 +311,8 @@ static int __init add_xo1_platform_devices(void)
return PTR_ERR(pdev);
pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- return 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
static int olpc_xo1_ec_probe(struct platform_device *pdev)
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index ca446da48fd2..e26dfad507c8 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1607,8 +1607,6 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
*tunables[cnt].tunp = val;
continue;
}
- if (q == p)
- break;
}
return 0;
}
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index ce8da3a0412c..fd369a6e9ff8 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -137,7 +137,7 @@ ENTRY(restore_registers)
/* Saved in save_processor_state. */
lgdt saved_context_gdt_desc(%rax)
- xorq %rax, %rax
+ xorl %eax, %eax
/* tell the hibernation core that we've just restored the memory */
movq %rax, in_suspend(%rip)
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 220e97841e49..3a6c8ebc8032 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -67,6 +67,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__tracedata_(start|end)|"
"__(start|stop)_notes|"
"__end_rodata|"
+ "__end_rodata_aligned|"
"__initramfs_start|"
"(jiffies|jiffies_64)|"
#if ELF_BITS == 64
diff --git a/arch/x86/um/vdso/.gitignore b/arch/x86/um/vdso/.gitignore
index 9cac6d072199..f8b69d84238e 100644
--- a/arch/x86/um/vdso/.gitignore
+++ b/arch/x86/um/vdso/.gitignore
@@ -1,2 +1 @@
-vdso-syms.lds
vdso.lds
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index b2d6967262b2..822ccdba93ad 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -53,22 +53,6 @@ $(vobjs): KBUILD_CFLAGS += $(CFL)
CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-arcs -ftest-coverage
CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-arcs -ftest-coverage
-targets += vdso-syms.lds
-extra-$(VDSO64-y) += vdso-syms.lds
-
-#
-# Match symbols in the DSO that look like VDSO*; produce a file of constants.
-#
-sed-vdsosym := -e 's/^00*/0/' \
- -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
-quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
- $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
-endef
-
-$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
- $(call if_changed,vdsosym)
-
#
# The DSO images are built using a special linker script.
#
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 439a94bf89ad..105a57d73701 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -119,6 +119,27 @@ static void __init xen_banner(void)
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
}
+
+static void __init xen_pv_init_platform(void)
+{
+ set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
+ HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+
+ /* xen clock uses per-cpu vcpu_info, need to init it for boot cpu */
+ xen_vcpu_info_reset(0);
+
+ /* pvclock is in shared info area */
+ xen_init_time_ops();
+}
+
+static void __init xen_pv_guest_late_init(void)
+{
+#ifndef CONFIG_SMP
+ /* Setup shared vcpu info for non-smp configurations */
+ xen_setup_vcpu_info_placement();
+#endif
+}
+
/* Check if running on Xen version (major, minor) or later */
bool
xen_running_on_version_or_later(unsigned int major, unsigned int minor)
@@ -947,34 +968,8 @@ static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
xen_write_msr_safe(msr, low, high);
}
-void xen_setup_shared_info(void)
-{
- set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
-
- HYPERVISOR_shared_info =
- (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
-
- xen_setup_mfn_list_list();
-
- if (system_state == SYSTEM_BOOTING) {
-#ifndef CONFIG_SMP
- /*
- * In UP this is as good a place as any to set up shared info.
- * Limit this to boot only, at restore vcpu setup is done via
- * xen_vcpu_restore().
- */
- xen_setup_vcpu_info_placement();
-#endif
- /*
- * Now that shared info is set up we can start using routines
- * that point to pvclock area.
- */
- xen_init_time_ops();
- }
-}
-
/* This is called once we have the cpu_possible_mask */
-void __ref xen_setup_vcpu_info_placement(void)
+void __init xen_setup_vcpu_info_placement(void)
{
int cpu;
@@ -1228,6 +1223,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
x86_init.irqs.intr_mode_init = x86_init_noop;
x86_init.oem.arch_setup = xen_arch_setup;
x86_init.oem.banner = xen_banner;
+ x86_init.hyper.init_platform = xen_pv_init_platform;
+ x86_init.hyper.guest_late_init = xen_pv_guest_late_init;
/*
* Set up some pagetable state before starting to set any ptes.
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2c30cabfda90..52206ad81e4b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1230,8 +1230,7 @@ static void __init xen_pagetable_p2m_free(void)
* We roundup to the PMD, which means that if anybody at this stage is
* using the __ka address of xen_start_info or
* xen_start_info->shared_info they are in going to crash. Fortunatly
- * we have already revectored in xen_setup_kernel_pagetable and in
- * xen_setup_shared_info.
+ * we have already revectored in xen_setup_kernel_pagetable.
*/
size = roundup(size, PMD_SIZE);
@@ -1292,8 +1291,7 @@ static void __init xen_pagetable_init(void)
/* Remap memory freed due to conflicts with E820 map */
xen_remap_memory();
-
- xen_setup_shared_info();
+ xen_setup_mfn_list_list();
}
static void xen_write_cr2(unsigned long cr2)
{
diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c
index a2e0f110af56..8303b58c79a9 100644
--- a/arch/x86/xen/suspend_pv.c
+++ b/arch/x86/xen/suspend_pv.c
@@ -27,8 +27,9 @@ void xen_pv_pre_suspend(void)
void xen_pv_post_suspend(int suspend_cancelled)
{
xen_build_mfn_list_list();
-
- xen_setup_shared_info();
+ set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
+ HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+ xen_setup_mfn_list_list();
if (suspend_cancelled) {
xen_start_info->store_mfn =
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index e0f1bcf01d63..c84f1e039d84 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,6 +31,8 @@
/* Xen may fire a timer up to this many ns early */
#define TIMER_SLOP 100000
+static u64 xen_sched_clock_offset __read_mostly;
+
/* Get the TSC speed from Xen */
static unsigned long xen_tsc_khz(void)
{
@@ -40,7 +42,7 @@ static unsigned long xen_tsc_khz(void)
return pvclock_tsc_khz(info);
}
-u64 xen_clocksource_read(void)
+static u64 xen_clocksource_read(void)
{
struct pvclock_vcpu_time_info *src;
u64 ret;
@@ -57,6 +59,11 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
return xen_clocksource_read();
}
+static u64 xen_sched_clock(void)
+{
+ return xen_clocksource_read() - xen_sched_clock_offset;
+}
+
static void xen_read_wallclock(struct timespec64 *ts)
{
struct shared_info *s = HYPERVISOR_shared_info;
@@ -367,7 +374,7 @@ void xen_timer_resume(void)
}
static const struct pv_time_ops xen_time_ops __initconst = {
- .sched_clock = xen_clocksource_read,
+ .sched_clock = xen_sched_clock,
.steal_clock = xen_steal_clock,
};
@@ -503,8 +510,9 @@ static void __init xen_time_init(void)
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
-void __ref xen_init_time_ops(void)
+void __init xen_init_time_ops(void)
{
+ xen_sched_clock_offset = xen_clocksource_read();
pv_time_ops = xen_time_ops;
x86_init.timers.timer_init = xen_time_init;
@@ -542,11 +550,11 @@ void __init xen_hvm_init_time_ops(void)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
- printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
- "disable pv timer\n");
+ pr_info("Xen doesn't support pvclock on HVM, disable pv timer");
return;
}
+ xen_sched_clock_offset = xen_clocksource_read();
pv_time_ops = xen_time_ops;
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 3b34745d0a52..e78684597f57 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -31,7 +31,6 @@ extern struct shared_info xen_dummy_shared_info;
extern struct shared_info *HYPERVISOR_shared_info;
void xen_setup_mfn_list_list(void);
-void xen_setup_shared_info(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
@@ -68,12 +67,11 @@ void xen_init_irq_ops(void);
void xen_setup_timer(int cpu);
void xen_setup_runstate_info(int cpu);
void xen_teardown_timer(int cpu);
-u64 xen_clocksource_read(void);
void xen_setup_cpu_clockevents(void);
void xen_save_time_memory_area(void);
void xen_restore_time_memory_area(void);
-void __ref xen_init_time_ops(void);
-void __init xen_hvm_init_time_ops(void);
+void xen_init_time_ops(void);
+void xen_hvm_init_time_ops(void);
irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index e7a23f2a519a..7de0149e1cf7 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -197,107 +197,9 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1,(v))
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1,(v))
-
-/**
- * atomic_dec_return - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
-
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h
index dbe3053b284a..9f119c1ca0b5 100644
--- a/arch/xtensa/include/asm/hw_breakpoint.h
+++ b/arch/xtensa/include/asm/hw_breakpoint.h
@@ -30,13 +30,16 @@ struct arch_hw_breakpoint {
u16 type;
};
+struct perf_event_attr;
struct perf_event;
struct pt_regs;
struct task_struct;
int hw_breakpoint_slots(int type);
-int arch_check_bp_in_kernelspace(struct perf_event *bp);
-int arch_validate_hwbkpt_settings(struct perf_event *bp);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c
index b35656ab7dbd..c2e387c19cda 100644
--- a/arch/xtensa/kernel/hw_breakpoint.c
+++ b/arch/xtensa/kernel/hw_breakpoint.c
@@ -33,14 +33,13 @@ int hw_breakpoint_slots(int type)
}
}
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = bp->attr.bp_len;
+ va = hw->address;
+ len = hw->len;
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -48,50 +47,41 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->type = XTENSA_BREAKPOINT_EXECUTE;
+ hw->type = XTENSA_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->type = XTENSA_BREAKPOINT_LOAD;
+ hw->type = XTENSA_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->type = XTENSA_BREAKPOINT_STORE;
+ hw->type = XTENSA_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
+ hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- info->len = bp->attr.bp_len;
- if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len))
+ hw->len = attr->bp_len;
+ if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
return -EINVAL;
/* Address */
- info->address = bp->attr.bp_addr;
- if (info->address & (info->len - 1))
+ hw->address = attr->bp_addr;
+ if (hw->address & (hw->len - 1))
return -EINVAL;
return 0;
}
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
-{
- int ret;
-
- /* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
- return ret;
-}
-
int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data)
{
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index fa0729c1e776..d81c653b9bf6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -61,7 +61,7 @@ static int atomic_inc_return_safe(atomic_t *v)
{
unsigned int counter;
- counter = (unsigned int)__atomic_add_unless(v, 1, 0);
+ counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
if (counter <= (unsigned int)INT_MAX)
return (int)counter;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7436b2d27fa3..a390c6d4f72d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
zram->backing_dev = NULL;
zram->old_block_size = 0;
zram->bdev = NULL;
-
+ zram->disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_SYNCHRONOUS_IO;
kvfree(zram->bitmap);
zram->bitmap = NULL;
}
@@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev,
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
+ /*
+ * With writeback feature, zram does asynchronous IO so it's no longer
+ * synchronous device so let's remove synchronous io flag. Othewise,
+ * upper layer(e.g., swap) could wait IO completion rather than
+ * (submit and return), which will cause system sluggish.
+ * Furthermore, when the IO function returns(e.g., swap_readpage),
+ * upper layer expects IO was done so it could deallocate the page
+ * freely but in fact, IO is going on so finally could cause
+ * use-after-free when the IO is really done.
+ */
+ zram->disk->queue->backing_dev_info->capabilities &=
+ ~BDI_CAP_SYNCHRONOUS_IO;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 7513411140b6..9ab3db8b3988 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -35,9 +35,6 @@ static struct clk *__of_clk_get(struct device_node *np, int index,
struct clk *clk;
int rc;
- if (index < 0)
- return ERR_PTR(-EINVAL);
-
rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
&clkspec);
if (rc)
@@ -199,7 +196,7 @@ struct clk *clk_get(struct device *dev, const char *con_id)
const char *dev_id = dev ? dev_name(dev) : NULL;
struct clk *clk;
- if (dev) {
+ if (dev && dev->of_node) {
clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
return clk;
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 00caf37e52f9..c070cc7992e9 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
-obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
+obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
deleted file mode 100644
index f9b724fd9950..000000000000
--- a/drivers/clocksource/mtk_timer.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Mediatek SoCs General-Purpose Timer handling.
- *
- * Copyright (C) 2014 Matthias Brugger
- *
- * Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqreturn.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-#include <linux/slab.h>
-
-#define GPT_IRQ_EN_REG 0x00
-#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
-#define GPT_IRQ_ACK_REG 0x08
-#define GPT_IRQ_ACK(val) BIT((val) - 1)
-
-#define TIMER_CTRL_REG(val) (0x10 * (val))
-#define TIMER_CTRL_OP(val) (((val) & 0x3) << 4)
-#define TIMER_CTRL_OP_ONESHOT (0)
-#define TIMER_CTRL_OP_REPEAT (1)
-#define TIMER_CTRL_OP_FREERUN (3)
-#define TIMER_CTRL_CLEAR (2)
-#define TIMER_CTRL_ENABLE (1)
-#define TIMER_CTRL_DISABLE (0)
-
-#define TIMER_CLK_REG(val) (0x04 + (0x10 * (val)))
-#define TIMER_CLK_SRC(val) (((val) & 0x1) << 4)
-#define TIMER_CLK_SRC_SYS13M (0)
-#define TIMER_CLK_SRC_RTC32K (1)
-#define TIMER_CLK_DIV1 (0x0)
-#define TIMER_CLK_DIV2 (0x1)
-
-#define TIMER_CNT_REG(val) (0x08 + (0x10 * (val)))
-#define TIMER_CMP_REG(val) (0x0C + (0x10 * (val)))
-
-#define GPT_CLK_EVT 1
-#define GPT_CLK_SRC 2
-
-struct mtk_clock_event_device {
- void __iomem *gpt_base;
- u32 ticks_per_jiffy;
- struct clock_event_device dev;
-};
-
-static void __iomem *gpt_sched_reg __read_mostly;
-
-static u64 notrace mtk_read_sched_clock(void)
-{
- return readl_relaxed(gpt_sched_reg);
-}
-
-static inline struct mtk_clock_event_device *to_mtk_clk(
- struct clock_event_device *c)
-{
- return container_of(c, struct mtk_clock_event_device, dev);
-}
-
-static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
-{
- u32 val;
-
- val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
- writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
- TIMER_CTRL_REG(timer));
-}
-
-static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
- unsigned long delay, u8 timer)
-{
- writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
-}
-
-static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
- bool periodic, u8 timer)
-{
- u32 val;
-
- /* Acknowledge interrupt */
- writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
-
- val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
-
- /* Clear 2 bit timer operation mode field */
- val &= ~TIMER_CTRL_OP(0x3);
-
- if (periodic)
- val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
- else
- val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
-
- writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static int mtk_clkevt_shutdown(struct clock_event_device *clk)
-{
- mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT);
- return 0;
-}
-
-static int mtk_clkevt_set_periodic(struct clock_event_device *clk)
-{
- struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
- mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
- mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
- return 0;
-}
-
-static int mtk_clkevt_next_event(unsigned long event,
- struct clock_event_device *clk)
-{
- struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
- mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
- mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
-
- return 0;
-}
-
-static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
-{
- struct mtk_clock_event_device *evt = dev_id;
-
- /* Acknowledge timer0 irq */
- writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
- evt->dev.event_handler(&evt->dev);
-
- return IRQ_HANDLED;
-}
-
-static void
-__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
-{
- writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-
- writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
- evt->gpt_base + TIMER_CLK_REG(timer));
-
- writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
-
- writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
-{
- u32 val;
-
- /* Disable all interrupts */
- writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
-
- /* Acknowledge all spurious pending interrupts */
- writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
-
- val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
- writel(val | GPT_IRQ_ENABLE(timer),
- evt->gpt_base + GPT_IRQ_EN_REG);
-}
-
-static int __init mtk_timer_init(struct device_node *node)
-{
- struct mtk_clock_event_device *evt;
- struct resource res;
- unsigned long rate = 0;
- struct clk *clk;
-
- evt = kzalloc(sizeof(*evt), GFP_KERNEL);
- if (!evt)
- return -ENOMEM;
-
- evt->dev.name = "mtk_tick";
- evt->dev.rating = 300;
- evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->dev.set_state_shutdown = mtk_clkevt_shutdown;
- evt->dev.set_state_periodic = mtk_clkevt_set_periodic;
- evt->dev.set_state_oneshot = mtk_clkevt_shutdown;
- evt->dev.tick_resume = mtk_clkevt_shutdown;
- evt->dev.set_next_event = mtk_clkevt_next_event;
- evt->dev.cpumask = cpu_possible_mask;
-
- evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
- if (IS_ERR(evt->gpt_base)) {
- pr_err("Can't get resource\n");
- goto err_kzalloc;
- }
-
- evt->dev.irq = irq_of_parse_and_map(node, 0);
- if (evt->dev.irq <= 0) {
- pr_err("Can't parse IRQ\n");
- goto err_mem;
- }
-
- clk = of_clk_get(node, 0);
- if (IS_ERR(clk)) {
- pr_err("Can't get timer clock\n");
- goto err_irq;
- }
-
- if (clk_prepare_enable(clk)) {
- pr_err("Can't prepare clock\n");
- goto err_clk_put;
- }
- rate = clk_get_rate(clk);
-
- if (request_irq(evt->dev.irq, mtk_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
- pr_err("failed to setup irq %d\n", evt->dev.irq);
- goto err_clk_disable;
- }
-
- evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
-
- /* Configure clock source */
- mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
- clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
- node->name, rate, 300, 32, clocksource_mmio_readl_up);
- gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
- sched_clock_register(mtk_read_sched_clock, 32, rate);
-
- /* Configure clock event */
- mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
- clockevents_config_and_register(&evt->dev, rate, 0x3,
- 0xffffffff);
-
- mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
- return 0;
-
-err_clk_disable:
- clk_disable_unprepare(clk);
-err_clk_put:
- clk_put(clk);
-err_irq:
- irq_dispose_mapping(evt->dev.irq);
-err_mem:
- iounmap(evt->gpt_base);
- of_address_to_resource(node, 0, &res);
- release_mem_region(res.start, resource_size(&res));
-err_kzalloc:
- kfree(evt);
-
- return -EINVAL;
-}
-TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index c337a8100a7b..aa624885e0e2 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -230,7 +230,7 @@ static int __init tegra20_init_timer(struct device_node *np)
return ret;
}
- tegra_clockevent.cpumask = cpu_all_mask;
+ tegra_clockevent.cpumask = cpu_possible_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
@@ -259,6 +259,6 @@ static int __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- return register_persistent_clock(NULL, tegra_read_persistent_clock64);
+ return register_persistent_clock(tegra_read_persistent_clock64);
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/timer-atcpit100.c b/drivers/clocksource/timer-atcpit100.c
index 5e23d7b4a722..b4bd2f5b801d 100644
--- a/drivers/clocksource/timer-atcpit100.c
+++ b/drivers/clocksource/timer-atcpit100.c
@@ -185,7 +185,7 @@ static struct timer_of to = {
.set_state_oneshot = atcpit100_clkevt_set_oneshot,
.tick_resume = atcpit100_clkevt_shutdown,
.set_next_event = atcpit100_clkevt_next_event,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
},
.of_irq = {
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 0eee03250cfc..f5b2eda30bf3 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -211,7 +211,7 @@ static int __init keystone_timer_init(struct device_node *np)
event_dev->set_state_shutdown = keystone_shutdown;
event_dev->set_state_periodic = keystone_set_periodic;
event_dev->set_state_oneshot = keystone_shutdown;
- event_dev->cpumask = cpu_all_mask;
+ event_dev->cpumask = cpu_possible_mask;
event_dev->owner = THIS_MODULE;
event_dev->name = TIMER_NAME;
event_dev->irq = irq;
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
new file mode 100644
index 000000000000..eb10321f8517
--- /dev/null
+++ b/drivers/clocksource/timer-mediatek.c
@@ -0,0 +1,328 @@
+/*
+ * Mediatek SoCs General-Purpose Timer handling.
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include "timer-of.h"
+
+#define TIMER_CLK_EVT (1)
+#define TIMER_CLK_SRC (2)
+
+#define TIMER_SYNC_TICKS (3)
+
+/* gpt */
+#define GPT_IRQ_EN_REG 0x00
+#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
+#define GPT_IRQ_ACK_REG 0x08
+#define GPT_IRQ_ACK(val) BIT((val) - 1)
+
+#define GPT_CTRL_REG(val) (0x10 * (val))
+#define GPT_CTRL_OP(val) (((val) & 0x3) << 4)
+#define GPT_CTRL_OP_ONESHOT (0)
+#define GPT_CTRL_OP_REPEAT (1)
+#define GPT_CTRL_OP_FREERUN (3)
+#define GPT_CTRL_CLEAR (2)
+#define GPT_CTRL_ENABLE (1)
+#define GPT_CTRL_DISABLE (0)
+
+#define GPT_CLK_REG(val) (0x04 + (0x10 * (val)))
+#define GPT_CLK_SRC(val) (((val) & 0x1) << 4)
+#define GPT_CLK_SRC_SYS13M (0)
+#define GPT_CLK_SRC_RTC32K (1)
+#define GPT_CLK_DIV1 (0x0)
+#define GPT_CLK_DIV2 (0x1)
+
+#define GPT_CNT_REG(val) (0x08 + (0x10 * (val)))
+#define GPT_CMP_REG(val) (0x0C + (0x10 * (val)))
+
+/* system timer */
+#define SYST_BASE (0x40)
+
+#define SYST_CON (SYST_BASE + 0x0)
+#define SYST_VAL (SYST_BASE + 0x4)
+
+#define SYST_CON_REG(to) (timer_of_base(to) + SYST_CON)
+#define SYST_VAL_REG(to) (timer_of_base(to) + SYST_VAL)
+
+/*
+ * SYST_CON_EN: Clock enable. Shall be set to
+ * - Start timer countdown.
+ * - Allow timeout ticks being updated.
+ * - Allow changing interrupt functions.
+ *
+ * SYST_CON_IRQ_EN: Set to allow interrupt.
+ *
+ * SYST_CON_IRQ_CLR: Set to clear interrupt.
+ */
+#define SYST_CON_EN BIT(0)
+#define SYST_CON_IRQ_EN BIT(1)
+#define SYST_CON_IRQ_CLR BIT(4)
+
+static void __iomem *gpt_sched_reg __read_mostly;
+
+static void mtk_syst_ack_irq(struct timer_of *to)
+{
+ /* Clear and disable interrupt */
+ writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
+}
+
+static irqreturn_t mtk_syst_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ mtk_syst_ack_irq(to);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_syst_clkevt_next_event(unsigned long ticks,
+ struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ /* Enable clock to allow timeout tick update later */
+ writel(SYST_CON_EN, SYST_CON_REG(to));
+
+ /*
+ * Write new timeout ticks. Timer shall start countdown
+ * after timeout ticks are updated.
+ */
+ writel(ticks, SYST_VAL_REG(to));
+
+ /* Enable interrupt */
+ writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
+
+ return 0;
+}
+
+static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
+{
+ /* Disable timer */
+ writel(0, SYST_CON_REG(to_timer_of(clkevt)));
+
+ return 0;
+}
+
+static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt)
+{
+ return mtk_syst_clkevt_shutdown(clkevt);
+}
+
+static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt)
+{
+ return 0;
+}
+
+static u64 notrace mtk_gpt_read_sched_clock(void)
+{
+ return readl_relaxed(gpt_sched_reg);
+}
+
+static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
+{
+ u32 val;
+
+ val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+ writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
+ GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
+ unsigned long delay, u8 timer)
+{
+ writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_start(struct timer_of *to,
+ bool periodic, u8 timer)
+{
+ u32 val;
+
+ /* Acknowledge interrupt */
+ writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+ val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+
+ /* Clear 2 bit timer operation mode field */
+ val &= ~GPT_CTRL_OP(0x3);
+
+ if (periodic)
+ val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT);
+ else
+ val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT);
+
+ writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk)
+{
+ mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static int mtk_gpt_clkevt_next_event(unsigned long event,
+ struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ /* Acknowledge timer0 irq */
+ writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void
+__init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
+{
+ writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+
+ writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1,
+ timer_of_base(to) + GPT_CLK_REG(timer));
+
+ writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
+
+ writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
+{
+ u32 val;
+
+ /* Disable all interrupts */
+ writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
+
+ /* Acknowledge all spurious pending interrupts */
+ writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+ val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
+ writel(val | GPT_IRQ_ENABLE(timer),
+ timer_of_base(to) + GPT_IRQ_EN_REG);
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "mtk-clkevt",
+ .rating = 300,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init mtk_syst_init(struct device_node *node)
+{
+ int ret;
+
+ to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
+ to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
+ to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
+ to.clkevt.tick_resume = mtk_syst_clkevt_resume;
+ to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
+ to.of_irq.handler = mtk_syst_handler;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ goto err;
+
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ return 0;
+err:
+ timer_of_cleanup(&to);
+ return ret;
+}
+
+static int __init mtk_gpt_init(struct device_node *node)
+{
+ int ret;
+
+ to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
+ to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
+ to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
+ to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
+ to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
+ to.of_irq.handler = mtk_gpt_interrupt;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ goto err;
+
+ /* Configure clock source */
+ mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
+ clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
+ node->name, timer_of_rate(&to), 300, 32,
+ clocksource_mmio_readl_up);
+ gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
+ sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
+
+ /* Configure clock event */
+ mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
+
+ return 0;
+err:
+ timer_of_cleanup(&to);
+ return ret;
+}
+TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
+TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
index ef9ebeafb3ed..430cb99d8d79 100644
--- a/drivers/clocksource/timer-sprd.c
+++ b/drivers/clocksource/timer-sprd.c
@@ -156,4 +156,54 @@ static int __init sprd_timer_init(struct device_node *np)
return 0;
}
+static struct timer_of suspend_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
+static u64 sprd_suspend_timer_read(struct clocksource *cs)
+{
+ return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_LO) & cs->mask;
+}
+
+static int sprd_suspend_timer_enable(struct clocksource *cs)
+{
+ sprd_timer_update_counter(timer_of_base(&suspend_to),
+ TIMER_VALUE_LO_MASK);
+ sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static void sprd_suspend_timer_disable(struct clocksource *cs)
+{
+ sprd_timer_disable(timer_of_base(&suspend_to));
+}
+
+static struct clocksource suspend_clocksource = {
+ .name = "sprd_suspend_timer",
+ .rating = 200,
+ .read = sprd_suspend_timer_read,
+ .enable = sprd_suspend_timer_enable,
+ .disable = sprd_suspend_timer_disable,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
+
+static int __init sprd_suspend_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &suspend_to);
+ if (ret)
+ return ret;
+
+ clocksource_register_hz(&suspend_clocksource,
+ timer_of_rate(&suspend_to));
+
+ return 0;
+}
+
TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
+TIMER_OF_DECLARE(sc9860_persistent_timer, "sprd,sc9860-suspend-timer",
+ sprd_suspend_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 880a861ab3c8..29e2e1a78a43 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -78,8 +78,7 @@ static struct ti_32k ti_32k_timer = {
.rating = 250,
.read = ti_32k_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS |
- CLOCK_SOURCE_SUSPEND_NONSTOP,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index a6a0338eea77..f74689334f7c 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -162,7 +162,7 @@ static int __init zevio_timer_add(struct device_node *node)
timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
timer->clkevt.tick_resume = zevio_timer_set_oneshot;
timer->clkevt.rating = 200;
- timer->clkevt.cpumask = cpu_all_mask;
+ timer->clkevt.cpumask = cpu_possible_mask;
timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
timer->clkevt.irq = irqnr;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 781a4a337557..d8e159feb573 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS
config EFI_ARMSTUB
bool
+config EFI_ARMSTUB_DTB_LOADER
+ bool "Enable the DTB loader"
+ depends on EFI_ARMSTUB
+ help
+ Select this config option to add support for the dtb= command
+ line parameter, allowing a device tree blob to be loaded into
+ memory from the EFI System Partition by the stub.
+
+ The device tree is typically provided by the platform or by
+ the bootloader, so this option is mostly for development
+ purposes only.
+
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
depends on EFI_VARS
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 3bf0dca378a6..a7902fccdcfa 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -48,8 +48,21 @@ u64 cper_next_record_id(void)
{
static atomic64_t seq;
- if (!atomic64_read(&seq))
- atomic64_set(&seq, ((u64)get_seconds()) << 32);
+ if (!atomic64_read(&seq)) {
+ time64_t time = ktime_get_real_seconds();
+
+ /*
+ * This code is unlikely to still be needed in year 2106,
+ * but just in case, let's use a few more bits for timestamps
+ * after y2038 to be sure they keep increasing monotonically
+ * for the next few hundred years...
+ */
+ if (time < 0x80000000)
+ atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+ else
+ atomic64_set(&seq, 0x8000000000000000ull |
+ ktime_get_real_seconds() << 24);
+ }
return atomic64_inc_return(&seq);
}
@@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
- } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: ARM processor error\n", newpfx);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 232f4915223b..2a29dd9c986d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -82,8 +82,11 @@ struct mm_struct efi_mm = {
.mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
};
+struct workqueue_struct *efi_rts_wq;
+
static bool disable_runtime;
static int __init setup_noefi(char *arg)
{
@@ -337,6 +340,18 @@ static int __init efisubsys_init(void)
if (!efi_enabled(EFI_BOOT))
return 0;
+ /*
+ * Since we process only one efi_runtime_service() at a time, an
+ * ordered workqueue (which creates only one execution context)
+ * should suffice all our needs.
+ */
+ efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+ if (!efi_rts_wq) {
+ pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return 0;
+ }
+
/* We register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
@@ -388,7 +403,7 @@ subsys_initcall(efisubsys_init);
* and if so, populate the supplied memory descriptor with the appropriate
* data.
*/
-int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
efi_memory_desc_t *md;
@@ -406,12 +421,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
u64 size;
u64 end;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_DATA &&
- md->type != EFI_RUNTIME_SERVICES_DATA) {
- continue;
- }
-
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
if (phys_addr >= md->phys_addr && phys_addr < end) {
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 1ab80e06e7c5..5d06bd247d07 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -250,7 +250,10 @@ void __init efi_esrt_init(void)
return;
rc = efi_mem_desc_lookup(efi.esrt, &md);
- if (rc < 0) {
+ if (rc < 0 ||
+ (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+ md.type != EFI_BOOT_SERVICES_DATA &&
+ md.type != EFI_RUNTIME_SERVICES_DATA)) {
pr_warn("ESRT header is not in the memory map.\n");
return;
}
@@ -326,7 +329,8 @@ void __init efi_esrt_init(void)
end = esrt_data + size;
pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
- efi_mem_reserve(esrt_data, esrt_data_size);
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(esrt_data, esrt_data_size);
pr_debug("esrt-init: loaded.\n");
}
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 01a9d78ee415..6920033de6d4 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -40,31 +40,6 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
- void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_t *image = __image;
- efi_file_handle_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
-
- status = sys_table_arg->boottime->handle_protocol(handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
- return status;
- }
-
- status = io->open_volume(io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
void efi_char16_printk(efi_system_table_t *sys_table_arg,
efi_char16_t *str)
{
@@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
* 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
* boot is enabled if we can't determine its state.
*/
- if (secure_boot != efi_secureboot_mode_disabled &&
- strstr(cmdline_ptr, "dtb=")) {
- pr_efi(sys_table, "Ignoring DTB from command line.\n");
+ if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+ secure_boot != efi_secureboot_mode_disabled) {
+ if (strstr(cmdline_ptr, "dtb="))
+ pr_efi(sys_table, "Ignoring DTB from command line.\n");
} else {
status = handle_cmdline_files(sys_table, image, cmdline_ptr,
"dtb=",
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 50a9cab5a834..e94975f4655b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle)
return efi_call_proto(efi_file_handle, close, handle);
}
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ efi_file_handle_t **__fh)
+{
+ efi_file_io_interface_t *io;
+ efi_file_handle_t *fh;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_status_t status;
+ void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
+ device_handle,
+ image);
+
+ status = efi_call_early(handle_protocol, handle,
+ &fs_proto, (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ return status;
+ }
+
+ status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table_arg, "Failed to open volume\n");
+ else
+ *__fh = fh;
+
+ return status;
+}
+
/*
* Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
* option, e.g. efi=nochunk.
@@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- status = efi_open_volume(sys_table_arg, image,
- (void **)&fh);
+ status = efi_open_volume(sys_table_arg, image, &fh);
if (status != EFI_SUCCESS)
goto free_files;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index f59564b72ddc..32799cf039ef 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -36,9 +36,6 @@ extern int __pure is_quiet(void);
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
- void **__fh);
-
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index ae54870b2788..aa66cbf23512 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -1,6 +1,15 @@
/*
* runtime-wrappers.c - Runtime Services function call wrappers
*
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* Split off from arch/x86/platform/efi/efi.c
@@ -22,6 +31,9 @@
#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
#include <asm/efi.h>
/*
@@ -33,6 +45,76 @@
#define __efi_call_virt(f, args...) \
__efi_call_virt_pointer(efi.systab->runtime, f, args)
+/* efi_runtime_service() function identifiers */
+enum efi_rts_ids {
+ GET_TIME,
+ SET_TIME,
+ GET_WAKEUP_TIME,
+ SET_WAKEUP_TIME,
+ GET_VARIABLE,
+ GET_NEXT_VARIABLE,
+ SET_VARIABLE,
+ QUERY_VARIABLE_INFO,
+ GET_NEXT_HIGH_MONO_COUNT,
+ UPDATE_CAPSULE,
+ QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work: Details of EFI Runtime Service work
+ * @arg<1-5>: EFI Runtime Service function arguments
+ * @status: Status of executing EFI Runtime Service
+ * @efi_rts_id: EFI Runtime Service function identifier
+ * @efi_rts_comp: Struct used for handling completions
+ */
+struct efi_runtime_work {
+ void *arg1;
+ void *arg2;
+ void *arg3;
+ void *arg4;
+ void *arg5;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+};
+
+/*
+ * efi_queue_work: Queue efi_runtime_service() and wait until it's done
+ * @rts: efi_runtime_service() function identifier
+ * @rts_arg<1-5>: efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
+({ \
+ struct efi_runtime_work efi_rts_work; \
+ efi_rts_work.status = EFI_ABORTED; \
+ \
+ init_completion(&efi_rts_work.efi_rts_comp); \
+ INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
+ efi_rts_work.arg1 = _arg1; \
+ efi_rts_work.arg2 = _arg2; \
+ efi_rts_work.arg3 = _arg3; \
+ efi_rts_work.arg4 = _arg4; \
+ efi_rts_work.arg5 = _arg5; \
+ efi_rts_work.efi_rts_id = _rts; \
+ \
+ /* \
+ * queue_work() returns 0 if work was already on queue, \
+ * _ideally_ this should never happen. \
+ */ \
+ if (queue_work(efi_rts_wq, &efi_rts_work.work)) \
+ wait_for_completion(&efi_rts_work.efi_rts_comp); \
+ else \
+ pr_err("Failed to queue work to efi_rts_wq.\n"); \
+ \
+ efi_rts_work.status; \
+})
+
void efi_call_virt_check_flags(unsigned long flags, const char *call)
{
unsigned long cur_flags, mismatch;
@@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
*/
static DEFINE_SEMAPHORE(efi_runtime_lock);
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+ struct efi_runtime_work *efi_rts_work;
+ void *arg1, *arg2, *arg3, *arg4, *arg5;
+ efi_status_t status = EFI_NOT_FOUND;
+
+ efi_rts_work = container_of(work, struct efi_runtime_work, work);
+ arg1 = efi_rts_work->arg1;
+ arg2 = efi_rts_work->arg2;
+ arg3 = efi_rts_work->arg3;
+ arg4 = efi_rts_work->arg4;
+ arg5 = efi_rts_work->arg5;
+
+ switch (efi_rts_work->efi_rts_id) {
+ case GET_TIME:
+ status = efi_call_virt(get_time, (efi_time_t *)arg1,
+ (efi_time_cap_t *)arg2);
+ break;
+ case SET_TIME:
+ status = efi_call_virt(set_time, (efi_time_t *)arg1);
+ break;
+ case GET_WAKEUP_TIME:
+ status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+ (efi_bool_t *)arg2, (efi_time_t *)arg3);
+ break;
+ case SET_WAKEUP_TIME:
+ status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+ (efi_time_t *)arg2);
+ break;
+ case GET_VARIABLE:
+ status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, (u32 *)arg3,
+ (unsigned long *)arg4, (void *)arg5);
+ break;
+ case GET_NEXT_VARIABLE:
+ status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+ (efi_char16_t *)arg2,
+ (efi_guid_t *)arg3);
+ break;
+ case SET_VARIABLE:
+ status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, *(u32 *)arg3,
+ *(unsigned long *)arg4, (void *)arg5);
+ break;
+ case QUERY_VARIABLE_INFO:
+ status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+ (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+ break;
+ case GET_NEXT_HIGH_MONO_COUNT:
+ status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+ break;
+ case UPDATE_CAPSULE:
+ status = efi_call_virt(update_capsule,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2,
+ *(unsigned long *)arg3);
+ break;
+ case QUERY_CAPSULE_CAPS:
+ status = efi_call_virt(query_capsule_caps,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2, (u64 *)arg3,
+ (int *)arg4);
+ break;
+ default:
+ /*
+ * Ideally, we should never reach here because a caller of this
+ * function should have put the right efi_runtime_service()
+ * function identifier into efi_rts_work->efi_rts_id
+ */
+ pr_err("Requested executing invalid EFI Runtime Service.\n");
+ }
+ efi_rts_work->status = status;
+ complete(&efi_rts_work->efi_rts_comp);
+}
+
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_time, tm, tc);
+ status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_time, tm);
+ status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
+ status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+ NULL);
up(&efi_runtime_lock);
return status;
}
@@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_wakeup_time, enabled, tm);
+ status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+ NULL);
up(&efi_runtime_lock);
return status;
}
@@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_variable, name, vendor, attr, data_size,
- data);
+ status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+ data);
up(&efi_runtime_lock);
return status;
}
@@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_next_variable, name_size, name, vendor);
+ status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_variable, name, vendor, attr, data_size,
- data);
+ status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+ data);
up(&efi_runtime_lock);
return status;
}
@@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(query_variable_info, attr, storage_space,
- remaining_space, max_variable_size);
+ status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+ remaining_space, max_variable_size, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_next_high_mono_count, count);
+ status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(update_capsule, capsules, count, sg_list);
+ status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
- reset_type);
+ status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+ max_size, reset_type, NULL);
up(&efi_runtime_lock);
return status;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index e2232cbcec8b..addd9fecc198 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -25,6 +25,7 @@
struct acpi_gpio_event {
struct list_head node;
+ struct list_head initial_sync_list;
acpi_handle handle;
unsigned int pin;
unsigned int irq;
@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
struct list_head events;
};
+static LIST_HEAD(acpi_gpio_initial_sync_list);
+static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
if (!gc->parent)
@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
return gpiochip_get_desc(chip, pin);
}
+static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
+{
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
+static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
+{
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ if (!list_empty(&event->initial_sync_list))
+ list_del_init(&event->initial_sync_list);
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
struct acpi_gpio_event *event = data;
@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
irq_handler_t handler = NULL;
struct gpio_desc *desc;
unsigned long irqflags;
- int ret, pin, irq;
+ int ret, pin, irq, value;
if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
+ value = gpiod_get_value(desc);
+
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
event->irq = irq;
event->pin = pin;
event->desc = desc;
+ INIT_LIST_HEAD(&event->initial_sync_list);
ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
"ACPI:Event", event);
@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
enable_irq_wake(irq);
list_add_tail(&event->node, &acpi_gpio->events);
+
+ /*
+ * Make sure we trigger the initial state of the IRQ when using RISING
+ * or FALLING. Note we run the handlers on late_init, the AML code
+ * may refer to OperationRegions from other (builtin) drivers which
+ * may be probed after us.
+ */
+ if (handler == acpi_gpio_irq_handler &&
+ (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+ ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
+ acpi_gpio_add_to_initial_sync_list(event);
+
return AE_OK;
fail_free_event:
@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
+ acpi_gpio_del_from_initial_sync_list(event);
+
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
disable_irq_wake(event->irq);
@@ -1158,3 +1194,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL;
}
+
+/* Sync the initial state of handlers after all builtin drivers have probed */
+static int acpi_gpio_initial_sync(void)
+{
+ struct acpi_gpio_event *event, *ep;
+
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
+ initial_sync_list) {
+ acpi_evaluate_object(event->handle, NULL, NULL, NULL);
+ list_del_init(&event->initial_sync_list);
+ }
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+ return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_initial_sync);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d638c0fb3418..b54fb78a283c 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -553,7 +553,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
/* Clone the lessor file to create a new file for us */
DRM_DEBUG_LEASE("Allocating lease file\n");
- lessee_file = filp_clone_open(lessor_file);
+ lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
goto out_lessee;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f10840ad465c..ccf42663a908 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -937,6 +937,18 @@ config SENSORS_MCP3021
This driver can also be built as a module. If so, the module
will be called mcp3021.
+config SENSORS_MLXREG_FAN
+ tristate "Mellanox Mellanox FAN driver"
+ depends on MELLANOX_PLATFORM
+ imply THERMAL
+ select REGMAP
+ help
+ This option enables support for the FAN control on the Mellanox
+ Ethernet and InfiniBand switches. The driver can be activated by the
+ platform device add call. Say Y to enable these. To compile this
+ driver as a module, choose 'M' here: the module will be called
+ mlxreg-fan.
+
config SENSORS_TC654
tristate "Microchip TC654/TC655 and compatibles"
depends on I2C
@@ -1256,6 +1268,16 @@ config SENSORS_NCT7904
This driver can also be built as a module. If so, the module
will be called nct7904.
+config SENSORS_NPCM7XX
+ tristate "Nuvoton NPCM750 and compatible PWM and Fan controllers"
+ imply THERMAL
+ help
+ This driver provides support for Nuvoton NPCM750/730/715/705 PWM
+ and Fan controllers.
+
+ This driver can also be built as a module. If so, the module
+ will be called npcm750-pwm-fan.
+
config SENSORS_NSA320
tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors"
depends on GPIOLIB && OF
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e7d52a36e6c4..842c92f83ce6 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -129,11 +129,13 @@ obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_TC654) += tc654.o
+obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
obj-$(CONFIG_SENSORS_NCT7904) += nct7904.o
+obj-$(CONFIG_SENSORS_NPCM7XX) += npcm750-pwm-fan.o
obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 9ef84998c7f3..90837f7c7d0f 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -194,8 +194,7 @@ struct adt7475_data {
struct mutex lock;
unsigned long measure_updated;
- unsigned long limits_updated;
- char valid;
+ bool valid;
u8 config4;
u8 config5;
@@ -326,6 +325,9 @@ static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
unsigned short val;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
switch (sattr->nr) {
case ALARM:
return sprintf(buf, "%d\n",
@@ -381,6 +383,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
switch (sattr->nr) {
case HYSTERSIS:
mutex_lock(&data->lock);
@@ -625,6 +630,9 @@ static ssize_t show_point2(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out, val;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
mutex_lock(&data->lock);
out = (data->range[sattr->index] >> 4) & 0x0F;
val = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
@@ -683,6 +691,9 @@ static ssize_t show_tach(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (sattr->nr == ALARM)
out = (data->alarms >> (sattr->index + 10)) & 1;
else
@@ -720,6 +731,9 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]);
}
@@ -729,6 +743,9 @@ static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwmchan[sattr->index]);
}
@@ -738,6 +755,9 @@ static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwmctl[sattr->index]);
}
@@ -945,6 +965,9 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
int i = clamp_val(data->range[sattr->index] & 0xf, 0,
ARRAY_SIZE(pwmfreq_table) - 1);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", pwmfreq_table[i]);
}
@@ -1035,6 +1058,10 @@ static ssize_t cpu0_vid_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
@@ -1385,6 +1412,121 @@ static void adt7475_remove_files(struct i2c_client *client,
sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
}
+static int adt7475_update_limits(struct i2c_client *client)
+{
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ int i;
+ int ret;
+
+ ret = adt7475_read(REG_CONFIG4);
+ if (ret < 0)
+ return ret;
+ data->config4 = ret;
+
+ ret = adt7475_read(REG_CONFIG5);
+ if (ret < 0)
+ return ret;
+ data->config5 = ret;
+
+ for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+ if (!(data->has_voltage & (1 << i)))
+ continue;
+ /* Adjust values so they match the input precision */
+ ret = adt7475_read(VOLTAGE_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[MIN][i] = ret << 2;
+
+ ret = adt7475_read(VOLTAGE_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[MAX][i] = ret << 2;
+ }
+
+ if (data->has_voltage & (1 << 5)) {
+ ret = adt7475_read(REG_VTT_MIN);
+ if (ret < 0)
+ return ret;
+ data->voltage[MIN][5] = ret << 2;
+
+ ret = adt7475_read(REG_VTT_MAX);
+ if (ret < 0)
+ return ret;
+ data->voltage[MAX][5] = ret << 2;
+ }
+
+ for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+ /* Adjust values so they match the input precision */
+ ret = adt7475_read(TEMP_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[MIN][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[MAX][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_TMIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[AUTOMIN][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_THERM_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[THERM][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_OFFSET_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[OFFSET][i] = ret;
+ }
+ adt7475_read_hystersis(client);
+
+ for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+ if (i == 3 && !data->has_fan4)
+ continue;
+ ret = adt7475_read_word(client, TACH_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->tach[MIN][i] = ret;
+ }
+
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ if (i == 1 && !data->has_pwm2)
+ continue;
+ ret = adt7475_read(PWM_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[MAX][i] = ret;
+
+ ret = adt7475_read(PWM_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[MIN][i] = ret;
+ /* Set the channel and control information */
+ adt7475_read_pwm(client, i);
+ }
+
+ ret = adt7475_read(TEMP_TRANGE_REG(0));
+ if (ret < 0)
+ return ret;
+ data->range[0] = ret;
+
+ ret = adt7475_read(TEMP_TRANGE_REG(1));
+ if (ret < 0)
+ return ret;
+ data->range[1] = ret;
+
+ ret = adt7475_read(TEMP_TRANGE_REG(2));
+ if (ret < 0)
+ return ret;
+ data->range[2] = ret;
+
+ return 0;
+}
+
static int adt7475_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1562,6 +1704,11 @@ static int adt7475_probe(struct i2c_client *client,
(data->bypass_attn & (1 << 3)) ? " in3" : "",
(data->bypass_attn & (1 << 4)) ? " in4" : "");
+ /* Limits and settings, should never change update more than once */
+ ret = adt7475_update_limits(client);
+ if (ret)
+ goto eremove;
+
return 0;
eremove:
@@ -1658,121 +1805,122 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
}
}
-static struct adt7475_data *adt7475_update_device(struct device *dev)
+static int adt7475_update_measure(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
u16 ext;
int i;
+ int ret;
- mutex_lock(&data->lock);
+ ret = adt7475_read(REG_STATUS2);
+ if (ret < 0)
+ return ret;
+ data->alarms = ret << 8;
- /* Measurement values update every 2 seconds */
- if (time_after(jiffies, data->measure_updated + HZ * 2) ||
- !data->valid) {
- data->alarms = adt7475_read(REG_STATUS2) << 8;
- data->alarms |= adt7475_read(REG_STATUS1);
-
- ext = (adt7475_read(REG_EXTEND2) << 8) |
- adt7475_read(REG_EXTEND1);
- for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
- if (!(data->has_voltage & (1 << i)))
- continue;
- data->voltage[INPUT][i] =
- (adt7475_read(VOLTAGE_REG(i)) << 2) |
- ((ext >> (i * 2)) & 3);
- }
+ ret = adt7475_read(REG_STATUS1);
+ if (ret < 0)
+ return ret;
+ data->alarms |= ret;
- for (i = 0; i < ADT7475_TEMP_COUNT; i++)
- data->temp[INPUT][i] =
- (adt7475_read(TEMP_REG(i)) << 2) |
- ((ext >> ((i + 5) * 2)) & 3);
+ ret = adt7475_read(REG_EXTEND2);
+ if (ret < 0)
+ return ret;
- if (data->has_voltage & (1 << 5)) {
- data->alarms |= adt7475_read(REG_STATUS4) << 24;
- ext = adt7475_read(REG_EXTEND3);
- data->voltage[INPUT][5] = adt7475_read(REG_VTT) << 2 |
- ((ext >> 4) & 3);
- }
+ ext = (ret << 8);
- for (i = 0; i < ADT7475_TACH_COUNT; i++) {
- if (i == 3 && !data->has_fan4)
- continue;
- data->tach[INPUT][i] =
- adt7475_read_word(client, TACH_REG(i));
- }
+ ret = adt7475_read(REG_EXTEND1);
+ if (ret < 0)
+ return ret;
- /* Updated by hw when in auto mode */
- for (i = 0; i < ADT7475_PWM_COUNT; i++) {
- if (i == 1 && !data->has_pwm2)
- continue;
- data->pwm[INPUT][i] = adt7475_read(PWM_REG(i));
- }
+ ext |= ret;
+
+ for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+ if (!(data->has_voltage & (1 << i)))
+ continue;
+ ret = adt7475_read(VOLTAGE_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[INPUT][i] =
+ (ret << 2) |
+ ((ext >> (i * 2)) & 3);
+ }
- if (data->has_vid)
- data->vid = adt7475_read(REG_VID) & 0x3f;
+ for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+ ret = adt7475_read(TEMP_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[INPUT][i] =
+ (ret << 2) |
+ ((ext >> ((i + 5) * 2)) & 3);
+ }
- data->measure_updated = jiffies;
+ if (data->has_voltage & (1 << 5)) {
+ ret = adt7475_read(REG_STATUS4);
+ if (ret < 0)
+ return ret;
+ data->alarms |= ret << 24;
+
+ ret = adt7475_read(REG_EXTEND3);
+ if (ret < 0)
+ return ret;
+ ext = ret;
+
+ ret = adt7475_read(REG_VTT);
+ if (ret < 0)
+ return ret;
+ data->voltage[INPUT][5] = ret << 2 |
+ ((ext >> 4) & 3);
}
- /* Limits and settings, should never change update every 60 seconds */
- if (time_after(jiffies, data->limits_updated + HZ * 60) ||
- !data->valid) {
- data->config4 = adt7475_read(REG_CONFIG4);
- data->config5 = adt7475_read(REG_CONFIG5);
-
- for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
- if (!(data->has_voltage & (1 << i)))
- continue;
- /* Adjust values so they match the input precision */
- data->voltage[MIN][i] =
- adt7475_read(VOLTAGE_MIN_REG(i)) << 2;
- data->voltage[MAX][i] =
- adt7475_read(VOLTAGE_MAX_REG(i)) << 2;
- }
+ for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+ if (i == 3 && !data->has_fan4)
+ continue;
+ ret = adt7475_read_word(client, TACH_REG(i));
+ if (ret < 0)
+ return ret;
+ data->tach[INPUT][i] = ret;
+ }
- if (data->has_voltage & (1 << 5)) {
- data->voltage[MIN][5] = adt7475_read(REG_VTT_MIN) << 2;
- data->voltage[MAX][5] = adt7475_read(REG_VTT_MAX) << 2;
- }
+ /* Updated by hw when in auto mode */
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ if (i == 1 && !data->has_pwm2)
+ continue;
+ ret = adt7475_read(PWM_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[INPUT][i] = ret;
+ }
- for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
- /* Adjust values so they match the input precision */
- data->temp[MIN][i] =
- adt7475_read(TEMP_MIN_REG(i)) << 2;
- data->temp[MAX][i] =
- adt7475_read(TEMP_MAX_REG(i)) << 2;
- data->temp[AUTOMIN][i] =
- adt7475_read(TEMP_TMIN_REG(i)) << 2;
- data->temp[THERM][i] =
- adt7475_read(TEMP_THERM_REG(i)) << 2;
- data->temp[OFFSET][i] =
- adt7475_read(TEMP_OFFSET_REG(i));
- }
- adt7475_read_hystersis(client);
+ if (data->has_vid) {
+ ret = adt7475_read(REG_VID);
+ if (ret < 0)
+ return ret;
+ data->vid = ret & 0x3f;
+ }
- for (i = 0; i < ADT7475_TACH_COUNT; i++) {
- if (i == 3 && !data->has_fan4)
- continue;
- data->tach[MIN][i] =
- adt7475_read_word(client, TACH_MIN_REG(i));
- }
+ return 0;
+}
- for (i = 0; i < ADT7475_PWM_COUNT; i++) {
- if (i == 1 && !data->has_pwm2)
- continue;
- data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i));
- data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i));
- /* Set the channel and control information */
- adt7475_read_pwm(client, i);
- }
+static struct adt7475_data *adt7475_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ int ret;
- data->range[0] = adt7475_read(TEMP_TRANGE_REG(0));
- data->range[1] = adt7475_read(TEMP_TRANGE_REG(1));
- data->range[2] = adt7475_read(TEMP_TRANGE_REG(2));
+ mutex_lock(&data->lock);
- data->limits_updated = jiffies;
- data->valid = 1;
+ /* Measurement values update every 2 seconds */
+ if (time_after(jiffies, data->measure_updated + HZ * 2) ||
+ !data->valid) {
+ ret = adt7475_update_measure(dev);
+ if (ret) {
+ data->valid = false;
+ mutex_unlock(&data->lock);
+ return ERR_PTR(ret);
+ }
+ data->measure_updated = jiffies;
+ data->valid = true;
}
mutex_unlock(&data->lock);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 1ea7ca510f84..aaebeb726d6a 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -443,8 +443,10 @@ static int emc1403_probe(struct i2c_client *client,
switch (id->driver_data) {
case emc1404:
data->groups[2] = &emc1404_group;
+ /* fall through */
case emc1403:
data->groups[1] = &emc1403_group;
+ /* fall through */
case emc1402:
data->groups[0] = &emc1402_group;
}
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 69031a0f7ed2..2f3f875c06ac 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -22,7 +22,6 @@
* struct iio_hwmon_state - device instance state
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
- * @hwmon_dev: associated hwmon device
* @attr_group: the group of attributes
* @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
@@ -30,7 +29,6 @@
struct iio_hwmon_state {
struct iio_channel *channels;
int num_channels;
- struct device *hwmon_dev;
struct attribute_group attr_group;
const struct attribute_group *groups[2];
struct attribute **attrs;
@@ -68,12 +66,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
enum iio_chan_type type;
struct iio_channel *channels;
const char *name = "iio_hwmon";
+ struct device *hwmon_dev;
char *sname;
if (dev->of_node && dev->of_node->name)
name = dev->of_node->name;
- channels = iio_channel_get_all(dev);
+ channels = devm_iio_channel_get_all(dev);
if (IS_ERR(channels)) {
if (PTR_ERR(channels) == -ENODEV)
return -EPROBE_DEFER;
@@ -81,10 +80,8 @@ static int iio_hwmon_probe(struct platform_device *pdev)
}
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (st == NULL)
+ return -ENOMEM;
st->channels = channels;
@@ -95,22 +92,18 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attrs = devm_kcalloc(dev,
st->num_channels + 1, sizeof(*st->attrs),
GFP_KERNEL);
- if (st->attrs == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (st->attrs == NULL)
+ return -ENOMEM;
for (i = 0; i < st->num_channels; i++) {
a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
- if (a == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (a == NULL)
+ return -ENOMEM;
sysfs_attr_init(&a->dev_attr.attr);
ret = iio_get_channel_type(&st->channels[i], &type);
if (ret < 0)
- goto error_release_channels;
+ return ret;
switch (type) {
case IIO_VOLTAGE:
@@ -134,13 +127,11 @@ static int iio_hwmon_probe(struct platform_device *pdev)
humidity_i++);
break;
default:
- ret = -EINVAL;
- goto error_release_channels;
- }
- if (a->dev_attr.attr.name == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
+ return -EINVAL;
}
+ if (a->dev_attr.attr.name == NULL)
+ return -ENOMEM;
+
a->dev_attr.show = iio_hwmon_read_val;
a->dev_attr.attr.mode = S_IRUGO;
a->index = i;
@@ -151,34 +142,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->groups[0] = &st->attr_group;
sname = devm_kstrdup(dev, name, GFP_KERNEL);
- if (!sname) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (!sname)
+ return -ENOMEM;
strreplace(sname, '-', '_');
- st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st,
- st->groups);
- if (IS_ERR(st->hwmon_dev)) {
- ret = PTR_ERR(st->hwmon_dev);
- goto error_release_channels;
- }
- platform_set_drvdata(pdev, st);
- return 0;
-
-error_release_channels:
- iio_channel_release_all(channels);
- return ret;
-}
-
-static int iio_hwmon_remove(struct platform_device *pdev)
-{
- struct iio_hwmon_state *st = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(st->hwmon_dev);
- iio_channel_release_all(st->channels);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, sname, st,
+ st->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct of_device_id iio_hwmon_of_match[] = {
@@ -193,7 +163,6 @@ static struct platform_driver __refdata iio_hwmon_driver = {
.of_match_table = iio_hwmon_of_match,
},
.probe = iio_hwmon_probe,
- .remove = iio_hwmon_remove,
};
module_platform_driver(iio_hwmon_driver);
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 17c6460ae351..bb15d7816a29 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -99,12 +99,8 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1700X", 20000 },
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen 7 2700X", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
+ { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
new file mode 100644
index 000000000000..de46577c7d5a
--- /dev/null
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+// Copyright (c) 2018 Vadim Pasternak <vadimp@mellanox.com>
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#define MLXREG_FAN_MAX_TACHO 12
+#define MLXREG_FAN_MAX_STATE 10
+#define MLXREG_FAN_MIN_DUTY 51 /* 20% */
+#define MLXREG_FAN_MAX_DUTY 255 /* 100% */
+/*
+ * Minimum and maximum FAN allowed speed in percent: from 20% to 100%. Values
+ * MLXREG_FAN_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting FAN speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXREG_FAN_SPEED_MIN (MLXREG_FAN_MAX_STATE + 2)
+#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
+#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
+#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
+#define MLXREG_FAN_TACHO_DIVIDER_DEF 1132
+/*
+ * FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high.
+ * The logic in a programmable device measures the time t-high by sampling the
+ * tachometer every t-sample (with the default value 11.32 uS) and increment
+ * a counter (N) as long as the pulse has not change:
+ * RPM = 15 / (t-sample * (K + Regval)), where:
+ * Regval: is the value read from the programmable device register;
+ * - 0xff - represents tachometer fault;
+ * - 0xfe - represents tachometer minimum value , which is 4444 RPM;
+ * - 0x00 - represents tachometer maximum value , which is 300000 RPM;
+ * K: is 44 and it represents the minimum allowed samples per pulse;
+ * N: is equal K + Regval;
+ * In order to calculate RPM from the register value the following formula is
+ * used: RPM = 15 / ((Regval + K) * 11.32) * 10^(-6)), which in the
+ * default case is modified to:
+ * RPM = 15000000 * 100 / ((Regval + 44) * 1132);
+ * - for Regval 0x00, RPM will be 15000000 * 100 / (44 * 1132) = 30115;
+ * - for Regval 0xfe, RPM will be 15000000 * 100 / ((254 + 44) * 1132) = 4446;
+ * In common case the formula is modified to:
+ * RPM = 15000000 * 100 / ((Regval + samples) * divider).
+ */
+#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
+ ((rval) + (s)) * (d)))
+#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
+#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
+ MLXREG_FAN_MAX_STATE, \
+ MLXREG_FAN_MAX_DUTY))
+#define MLXREG_FAN_PWM_STATE2DUTY(stat) (DIV_ROUND_CLOSEST((stat) * \
+ MLXREG_FAN_MAX_DUTY, \
+ MLXREG_FAN_MAX_STATE))
+
+/*
+ * struct mlxreg_fan_tacho - tachometer data (internal use):
+ *
+ * @connected: indicates if tachometer is connected;
+ * @reg: register offset;
+ * @mask: fault mask;
+ */
+struct mlxreg_fan_tacho {
+ bool connected;
+ u32 reg;
+ u32 mask;
+};
+
+/*
+ * struct mlxreg_fan_pwm - PWM data (internal use):
+ *
+ * @connected: indicates if PWM is connected;
+ * @reg: register offset;
+ */
+struct mlxreg_fan_pwm {
+ bool connected;
+ u32 reg;
+};
+
+/*
+ * struct mlxreg_fan - private data (internal use):
+ *
+ * @dev: basic device;
+ * @regmap: register map of parent device;
+ * @tacho: tachometer data;
+ * @pwm: PWM data;
+ * @samples: minimum allowed samples per pulse;
+ * @divider: divider value for tachometer RPM calculation;
+ * @cooling: cooling device levels;
+ * @cdev: cooling device;
+ */
+struct mlxreg_fan {
+ struct device *dev;
+ void *regmap;
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_fan_tacho tacho[MLXREG_FAN_MAX_TACHO];
+ struct mlxreg_fan_pwm pwm;
+ int samples;
+ int divider;
+ u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+ struct thermal_cooling_device *cdev;
+};
+
+static int
+mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct mlxreg_fan *fan = dev_get_drvdata(dev);
+ struct mlxreg_fan_tacho *tacho;
+ u32 regval;
+ int err;
+
+ switch (type) {
+ case hwmon_fan:
+ tacho = &fan->tacho[channel];
+ switch (attr) {
+ case hwmon_fan_input:
+ err = regmap_read(fan->regmap, tacho->reg, &regval);
+ if (err)
+ return err;
+
+ *val = MLXREG_FAN_GET_RPM(regval, fan->divider,
+ fan->samples);
+ break;
+
+ case hwmon_fan_fault:
+ err = regmap_read(fan->regmap, tacho->reg, &regval);
+ if (err)
+ return err;
+
+ *val = MLXREG_FAN_GET_FAULT(regval, tacho->mask);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err)
+ return err;
+
+ *val = regval;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct mlxreg_fan *fan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < MLXREG_FAN_MIN_DUTY ||
+ val > MLXREG_FAN_MAX_DUTY)
+ return -EINVAL;
+ return regmap_write(fan->regmap, fan->pwm.reg, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t
+mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ if (!(((struct mlxreg_fan *)data)->tacho[channel].connected))
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+
+ case hwmon_pwm:
+ if (!(((struct mlxreg_fan *)data)->pwm.connected))
+ return 0;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const u32 mlxreg_fan_hwmon_fan_config[] = {
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ 0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = {
+ .type = hwmon_fan,
+ .config = mlxreg_fan_hwmon_fan_config,
+};
+
+static const u32 mlxreg_fan_hwmon_pwm_config[] = {
+ HWMON_PWM_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = {
+ .type = hwmon_pwm,
+ .config = mlxreg_fan_hwmon_pwm_config,
+};
+
+static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
+ &mlxreg_fan_hwmon_fan,
+ &mlxreg_fan_hwmon_pwm,
+ NULL
+};
+
+static const struct hwmon_ops mlxreg_fan_hwmon_hwmon_ops = {
+ .is_visible = mlxreg_fan_is_visible,
+ .read = mlxreg_fan_read,
+ .write = mlxreg_fan_write,
+};
+
+static const struct hwmon_chip_info mlxreg_fan_hwmon_chip_info = {
+ .ops = &mlxreg_fan_hwmon_hwmon_ops,
+ .info = mlxreg_fan_hwmon_info,
+};
+
+static int mlxreg_fan_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MLXREG_FAN_MAX_STATE;
+ return 0;
+}
+
+static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+
+{
+ struct mlxreg_fan *fan = cdev->devdata;
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ *state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+
+ return 0;
+}
+
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ struct mlxreg_fan *fan = cdev->devdata;
+ unsigned long cur_state;
+ u32 regval;
+ int i;
+ int err;
+
+ /*
+ * Verify if this request is for changing allowed FAN dynamical
+ * minimum. If it is - update cooling levels accordingly and update
+ * state, if current state is below the newly requested minimum state.
+ * For example, if current state is 5, and minimal state is to be
+ * changed from 4 to 6, fan->cooling_levels[0 to 5] will be changed all
+ * from 4 to 6. And state 5 (fan->cooling_levels[4]) should be
+ * overwritten.
+ */
+ if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+ state -= MLXREG_FAN_MAX_STATE;
+ for (i = 0; i < state; i++)
+ fan->cooling_levels[i] = state;
+ for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
+ fan->cooling_levels[i] = i;
+
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+ if (state < cur_state)
+ return 0;
+
+ state = cur_state;
+ }
+
+ if (state > MLXREG_FAN_MAX_STATE)
+ return -EINVAL;
+
+ /* Normalize the state to the valid speed range. */
+ state = fan->cooling_levels[state];
+ err = regmap_write(fan->regmap, fan->pwm.reg,
+ MLXREG_FAN_PWM_STATE2DUTY(state));
+ if (err) {
+ dev_err(fan->dev, "Failed to write PWM duty\n");
+ return err;
+ }
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+ .get_max_state = mlxreg_fan_get_max_state,
+ .get_cur_state = mlxreg_fan_get_cur_state,
+ .set_cur_state = mlxreg_fan_set_cur_state,
+};
+
+static int mlxreg_fan_config(struct mlxreg_fan *fan,
+ struct mlxreg_core_platform_data *pdata)
+{
+ struct mlxreg_core_data *data = pdata->data;
+ bool configured = false;
+ int tacho_num = 0, i;
+
+ fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF;
+ fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF;
+ for (i = 0; i < pdata->counter; i++, data++) {
+ if (strnstr(data->label, "tacho", sizeof(data->label))) {
+ if (tacho_num == MLXREG_FAN_MAX_TACHO) {
+ dev_err(fan->dev, "too many tacho entries: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->tacho[tacho_num].reg = data->reg;
+ fan->tacho[tacho_num].mask = data->mask;
+ fan->tacho[tacho_num++].connected = true;
+ } else if (strnstr(data->label, "pwm", sizeof(data->label))) {
+ if (fan->pwm.connected) {
+ dev_err(fan->dev, "duplicate pwm entry: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->pwm.reg = data->reg;
+ fan->pwm.connected = true;
+ } else if (strnstr(data->label, "conf", sizeof(data->label))) {
+ if (configured) {
+ dev_err(fan->dev, "duplicate conf entry: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ /* Validate that conf parameters are not zeros. */
+ if (!data->mask || !data->bit) {
+ dev_err(fan->dev, "invalid conf entry params: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->samples = data->mask;
+ fan->divider = data->bit;
+ configured = true;
+ } else {
+ dev_err(fan->dev, "invalid label: %s\n", data->label);
+ return -EINVAL;
+ }
+ }
+
+ /* Init cooling levels per PWM state. */
+ for (i = 0; i < MLXREG_FAN_SPEED_MIN_LEVEL; i++)
+ fan->cooling_levels[i] = MLXREG_FAN_SPEED_MIN_LEVEL;
+ for (i = MLXREG_FAN_SPEED_MIN_LEVEL; i <= MLXREG_FAN_MAX_STATE; i++)
+ fan->cooling_levels[i] = i;
+
+ return 0;
+}
+
+static int mlxreg_fan_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_fan *fan;
+ struct device *hwm;
+ int err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ if (!fan)
+ return -ENOMEM;
+
+ fan->dev = &pdev->dev;
+ fan->regmap = pdata->regmap;
+ platform_set_drvdata(pdev, fan);
+
+ err = mlxreg_fan_config(fan, pdata);
+ if (err)
+ return err;
+
+ hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+ fan,
+ &mlxreg_fan_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwm)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ return PTR_ERR(hwm);
+ }
+
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
+ &mlxreg_fan_cooling_ops);
+ if (IS_ERR(fan->cdev)) {
+ dev_err(&pdev->dev, "Failed to register cooling device\n");
+ return PTR_ERR(fan->cdev);
+ }
+ }
+
+ return 0;
+}
+
+static int mlxreg_fan_remove(struct platform_device *pdev)
+{
+ struct mlxreg_fan *fan = platform_get_drvdata(pdev);
+
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ thermal_cooling_device_unregister(fan->cdev);
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_fan_driver = {
+ .driver = {
+ .name = "mlxreg-fan",
+ },
+ .probe = mlxreg_fan_probe,
+ .remove = mlxreg_fan_remove,
+};
+
+module_platform_driver(mlxreg_fan_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox FAN driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mlxreg-fan");
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index f9d1349c3286..c6bd61e4695a 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1050,8 +1050,8 @@ struct nct6775_data {
u64 beeps;
u8 pwm_num; /* number of pwm */
- u8 pwm_mode[NUM_FAN]; /* 1->DC variable voltage,
- * 0->PWM variable duty cycle
+ u8 pwm_mode[NUM_FAN]; /* 0->DC variable voltage,
+ * 1->PWM variable duty cycle
*/
enum pwm_enable pwm_enable[NUM_FAN];
/* 0->off
@@ -2541,7 +2541,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr)
case thermal_cruise:
nct6775_write_value(data, data->REG_TARGET[nr],
data->target_temp[nr]);
- /* intentional */
+ /* fall through */
default:
reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 95a68ab175c7..7815ddf149f6 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -77,7 +77,7 @@ struct nct7904_data {
};
/* Access functions */
-static int nct7904_bank_lock(struct nct7904_data *data, unsigned bank)
+static int nct7904_bank_lock(struct nct7904_data *data, unsigned int bank)
{
int ret;
@@ -99,7 +99,7 @@ static inline void nct7904_bank_release(struct nct7904_data *data)
/* Read 1-byte register. Returns unsigned reg or -ERRNO on error. */
static int nct7904_read_reg(struct nct7904_data *data,
- unsigned bank, unsigned reg)
+ unsigned int bank, unsigned int reg)
{
struct i2c_client *client = data->client;
int ret;
@@ -117,7 +117,7 @@ static int nct7904_read_reg(struct nct7904_data *data,
* -ERRNO on error.
*/
static int nct7904_read_reg16(struct nct7904_data *data,
- unsigned bank, unsigned reg)
+ unsigned int bank, unsigned int reg)
{
struct i2c_client *client = data->client;
int ret, hi;
@@ -139,7 +139,7 @@ static int nct7904_read_reg16(struct nct7904_data *data,
/* Write 1-byte register. Returns 0 or -ERRNO on error. */
static int nct7904_write_reg(struct nct7904_data *data,
- unsigned bank, unsigned reg, u8 val)
+ unsigned int bank, unsigned int reg, u8 val)
{
struct i2c_client *client = data->client;
int ret;
@@ -159,7 +159,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
unsigned int cnt, rpm;
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_fan_input:
ret = nct7904_read_reg16(data, BANK_0,
FANIN1_HV_REG + channel * 2);
@@ -200,7 +200,7 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
index = nct7904_chan_to_index[channel];
- switch(attr) {
+ switch (attr) {
case hwmon_in_input:
ret = nct7904_read_reg16(data, BANK_0,
VSEN1_HV_REG + index * 2);
@@ -236,7 +236,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret, temp;
- switch(attr) {
+ switch (attr) {
case hwmon_temp_input:
if (channel == 0)
ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG);
@@ -276,7 +276,7 @@ static int nct7904_read_pwm(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
ret = nct7904_read_reg(data, BANK_3, FANCTL1_OUT_REG + channel);
if (ret < 0)
@@ -301,7 +301,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
if (val < 0 || val > 255)
return -EINVAL;
@@ -322,7 +322,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
static umode_t nct7904_pwm_is_visible(const void *_data, u32 attr, int channel)
{
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
case hwmon_pwm_enable:
return S_IRUGO | S_IWUSR;
@@ -431,15 +431,15 @@ static const struct hwmon_channel_info nct7904_in = {
};
static const u32 nct7904_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
};
static const struct hwmon_channel_info nct7904_fan = {
@@ -448,11 +448,11 @@ static const struct hwmon_channel_info nct7904_fan = {
};
static const u32 nct7904_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ 0
};
static const struct hwmon_channel_info nct7904_pwm = {
@@ -461,16 +461,16 @@ static const struct hwmon_channel_info nct7904_pwm = {
};
static const u32 nct7904_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- 0
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ 0
};
static const struct hwmon_channel_info nct7904_temp = {
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
new file mode 100644
index 000000000000..8474d601aa63
--- /dev/null
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -0,0 +1,1057 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 Nuvoton Technology corporation.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+
+/* NPCM7XX PWM registers */
+#define NPCM7XX_PWM_REG_BASE(base, n) ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_PWM_REG_PR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_PWM_REG_CSR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_PWM_REG_CR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_PWM_REG_CNRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x0C + (12 * (ch)))
+#define NPCM7XX_PWM_REG_CMRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x10 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PDRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x14 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PIER(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x3C)
+#define NPCM7XX_PWM_REG_PIIR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x40)
+
+#define NPCM7XX_PWM_CTRL_CH0_MODE_BIT BIT(3)
+#define NPCM7XX_PWM_CTRL_CH1_MODE_BIT BIT(11)
+#define NPCM7XX_PWM_CTRL_CH2_MODE_BIT BIT(15)
+#define NPCM7XX_PWM_CTRL_CH3_MODE_BIT BIT(19)
+
+#define NPCM7XX_PWM_CTRL_CH0_INV_BIT BIT(2)
+#define NPCM7XX_PWM_CTRL_CH1_INV_BIT BIT(10)
+#define NPCM7XX_PWM_CTRL_CH2_INV_BIT BIT(14)
+#define NPCM7XX_PWM_CTRL_CH3_INV_BIT BIT(18)
+
+#define NPCM7XX_PWM_CTRL_CH0_EN_BIT BIT(0)
+#define NPCM7XX_PWM_CTRL_CH1_EN_BIT BIT(8)
+#define NPCM7XX_PWM_CTRL_CH2_EN_BIT BIT(12)
+#define NPCM7XX_PWM_CTRL_CH3_EN_BIT BIT(16)
+
+/* Define the maximum PWM channel number */
+#define NPCM7XX_PWM_MAX_CHN_NUM 8
+#define NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE 4
+#define NPCM7XX_PWM_MAX_MODULES 2
+
+/* Define the Counter Register, value = 100 for match 100% */
+#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255
+#define NPCM7XX_PWM_CMR_DEFAULT_NUM 127
+#define NPCM7XX_PWM_CMR_MAX 255
+
+/* default all PWM channels PRESCALE2 = 1 */
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 0x4
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 0x40
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 0x400
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3 0x4000
+
+#define PWM_OUTPUT_FREQ_25KHZ 25000
+#define PWN_CNT_DEFAULT 256
+#define MIN_PRESCALE1 2
+#define NPCM7XX_PWM_PRESCALE_SHIFT_CH01 8
+
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT (NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3)
+
+#define NPCM7XX_PWM_CTRL_MODE_DEFAULT (NPCM7XX_PWM_CTRL_CH0_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH1_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH2_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH3_MODE_BIT)
+
+/* NPCM7XX FAN Tacho registers */
+#define NPCM7XX_FAN_REG_BASE(base, n) ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_FAN_REG_TCNT1(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_FAN_REG_TCRA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x02)
+#define NPCM7XX_FAN_REG_TCRB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_FAN_REG_TCNT2(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x06)
+#define NPCM7XX_FAN_REG_TPRSC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_FAN_REG_TCKC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0A)
+#define NPCM7XX_FAN_REG_TMCTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0C)
+#define NPCM7XX_FAN_REG_TICTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0E)
+#define NPCM7XX_FAN_REG_TICLR(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x10)
+#define NPCM7XX_FAN_REG_TIEN(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x12)
+#define NPCM7XX_FAN_REG_TCPA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x14)
+#define NPCM7XX_FAN_REG_TCPB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x16)
+#define NPCM7XX_FAN_REG_TCPCFG(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x18)
+#define NPCM7XX_FAN_REG_TINASEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1A)
+#define NPCM7XX_FAN_REG_TINBSEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1C)
+
+#define NPCM7XX_FAN_TCKC_CLKX_NONE 0
+#define NPCM7XX_FAN_TCKC_CLK1_APB BIT(0)
+#define NPCM7XX_FAN_TCKC_CLK2_APB BIT(3)
+
+#define NPCM7XX_FAN_TMCTRL_TBEN BIT(6)
+#define NPCM7XX_FAN_TMCTRL_TAEN BIT(5)
+#define NPCM7XX_FAN_TMCTRL_TBEDG BIT(4)
+#define NPCM7XX_FAN_TMCTRL_TAEDG BIT(3)
+#define NPCM7XX_FAN_TMCTRL_MODE_5 BIT(2)
+
+#define NPCM7XX_FAN_TICLR_CLEAR_ALL GENMASK(5, 0)
+#define NPCM7XX_FAN_TICLR_TFCLR BIT(5)
+#define NPCM7XX_FAN_TICLR_TECLR BIT(4)
+#define NPCM7XX_FAN_TICLR_TDCLR BIT(3)
+#define NPCM7XX_FAN_TICLR_TCCLR BIT(2)
+#define NPCM7XX_FAN_TICLR_TBCLR BIT(1)
+#define NPCM7XX_FAN_TICLR_TACLR BIT(0)
+
+#define NPCM7XX_FAN_TIEN_ENABLE_ALL GENMASK(5, 0)
+#define NPCM7XX_FAN_TIEN_TFIEN BIT(5)
+#define NPCM7XX_FAN_TIEN_TEIEN BIT(4)
+#define NPCM7XX_FAN_TIEN_TDIEN BIT(3)
+#define NPCM7XX_FAN_TIEN_TCIEN BIT(2)
+#define NPCM7XX_FAN_TIEN_TBIEN BIT(1)
+#define NPCM7XX_FAN_TIEN_TAIEN BIT(0)
+
+#define NPCM7XX_FAN_TICTRL_TFPND BIT(5)
+#define NPCM7XX_FAN_TICTRL_TEPND BIT(4)
+#define NPCM7XX_FAN_TICTRL_TDPND BIT(3)
+#define NPCM7XX_FAN_TICTRL_TCPND BIT(2)
+#define NPCM7XX_FAN_TICTRL_TBPND BIT(1)
+#define NPCM7XX_FAN_TICTRL_TAPND BIT(0)
+
+#define NPCM7XX_FAN_TCPCFG_HIBEN BIT(7)
+#define NPCM7XX_FAN_TCPCFG_EQBEN BIT(6)
+#define NPCM7XX_FAN_TCPCFG_LOBEN BIT(5)
+#define NPCM7XX_FAN_TCPCFG_CPBSEL BIT(4)
+#define NPCM7XX_FAN_TCPCFG_HIAEN BIT(3)
+#define NPCM7XX_FAN_TCPCFG_EQAEN BIT(2)
+#define NPCM7XX_FAN_TCPCFG_LOAEN BIT(1)
+#define NPCM7XX_FAN_TCPCFG_CPASEL BIT(0)
+
+/* FAN General Definition */
+/* Define the maximum FAN channel number */
+#define NPCM7XX_FAN_MAX_MODULE 8
+#define NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE 2
+#define NPCM7XX_FAN_MAX_CHN_NUM 16
+
+/*
+ * Get Fan Tach Timeout (base on clock 214843.75Hz, 1 cnt = 4.654us)
+ * Timeout 94ms ~= 0x5000
+ * (The minimum FAN speed could to support ~640RPM/pulse 1,
+ * 320RPM/pulse 2, ...-- 10.6Hz)
+ */
+#define NPCM7XX_FAN_TIMEOUT 0x5000
+#define NPCM7XX_FAN_TCNT 0xFFFF
+#define NPCM7XX_FAN_TCPA (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+#define NPCM7XX_FAN_TCPB (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+
+#define NPCM7XX_FAN_POLL_TIMER_200MS 200
+#define NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION 2
+#define NPCM7XX_FAN_TINASEL_FANIN_DEFAULT 0
+#define NPCM7XX_FAN_CLK_PRESCALE 255
+
+#define NPCM7XX_FAN_CMPA 0
+#define NPCM7XX_FAN_CMPB 1
+
+/* Obtain the fan number */
+#define NPCM7XX_FAN_INPUT(fan, cmp) (((fan) << 1) + (cmp))
+
+/* fan sample status */
+#define FAN_DISABLE 0xFF
+#define FAN_INIT 0x00
+#define FAN_PREPARE_TO_GET_FIRST_CAPTURE 0x01
+#define FAN_ENOUGH_SAMPLE 0x02
+
+struct npcm7xx_fan_dev {
+ u8 fan_st_flg;
+ u8 fan_pls_per_rev;
+ u16 fan_cnt;
+ u32 fan_cnt_tmp;
+};
+
+struct npcm7xx_cooling_device {
+ char name[THERMAL_NAME_LENGTH];
+ struct npcm7xx_pwm_fan_data *data;
+ struct thermal_cooling_device *tcdev;
+ int pwm_port;
+ u8 *cooling_levels;
+ u8 max_state;
+ u8 cur_state;
+};
+
+struct npcm7xx_pwm_fan_data {
+ void __iomem *pwm_base;
+ void __iomem *fan_base;
+ unsigned long pwm_clk_freq;
+ unsigned long fan_clk_freq;
+ struct clk *pwm_clk;
+ struct clk *fan_clk;
+ struct mutex pwm_lock[NPCM7XX_PWM_MAX_MODULES];
+ spinlock_t fan_lock[NPCM7XX_FAN_MAX_MODULE];
+ int fan_irq[NPCM7XX_FAN_MAX_MODULE];
+ bool pwm_present[NPCM7XX_PWM_MAX_CHN_NUM];
+ bool fan_present[NPCM7XX_FAN_MAX_CHN_NUM];
+ u32 input_clk_freq;
+ struct timer_list fan_timer;
+ struct npcm7xx_fan_dev fan_dev[NPCM7XX_FAN_MAX_CHN_NUM];
+ struct npcm7xx_cooling_device *cdev[NPCM7XX_PWM_MAX_CHN_NUM];
+ u8 fan_select;
+};
+
+static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data,
+ int channel, u16 val)
+{
+ u32 pwm_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 tmp_buf, ctrl_en_bit, env_bit;
+
+ /*
+ * Config PWM Comparator register for setting duty cycle
+ */
+ mutex_lock(&data->pwm_lock[module]);
+
+ /* write new CMR value */
+ iowrite32(val, NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pwm_ch));
+ tmp_buf = ioread32(NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+
+ switch (pwm_ch) {
+ case 0:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH0_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH0_INV_BIT;
+ break;
+ case 1:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH1_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH1_INV_BIT;
+ break;
+ case 2:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH2_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH2_INV_BIT;
+ break;
+ case 3:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH3_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH3_INV_BIT;
+ break;
+ default:
+ mutex_unlock(&data->pwm_lock[module]);
+ return -ENODEV;
+ }
+
+ if (val == 0) {
+ /* Disable PWM */
+ tmp_buf &= ~ctrl_en_bit;
+ tmp_buf |= env_bit;
+ } else {
+ /* Enable PWM */
+ tmp_buf |= ctrl_en_bit;
+ tmp_buf &= ~env_bit;
+ }
+
+ iowrite32(tmp_buf, NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+ mutex_unlock(&data->pwm_lock[module]);
+
+ return 0;
+}
+
+static inline void npcm7xx_fan_start_capture(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp)
+{
+ u8 fan_id;
+ u8 reg_mode;
+ u8 reg_int;
+ unsigned long flags;
+
+ fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+ /* to check whether any fan tach is enable */
+ if (data->fan_dev[fan_id].fan_st_flg != FAN_DISABLE) {
+ /* reset status */
+ spin_lock_irqsave(&data->fan_lock[fan], flags);
+
+ data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /*
+ * the interrupt enable bits do not need to be cleared before
+ * it sets, the interrupt enable bits are cleared only on reset.
+ * the clock unit control register is behaving in the same
+ * manner that the interrupt enable register behave.
+ */
+ if (cmp == NPCM7XX_FAN_CMPA) {
+ /* enable interrupt */
+ iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TAIEN |
+ NPCM7XX_FAN_TIEN_TEIEN),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ reg_mode = NPCM7XX_FAN_TCKC_CLK1_APB
+ | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+
+ /* start to Capture */
+ iowrite8(reg_mode, NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+ } else {
+ /* enable interrupt */
+ iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TBIEN |
+ NPCM7XX_FAN_TIEN_TFIEN),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ reg_mode =
+ NPCM7XX_FAN_TCKC_CLK2_APB
+ | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+
+ /* start to Capture */
+ iowrite8(reg_mode,
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+ }
+
+ spin_unlock_irqrestore(&data->fan_lock[fan], flags);
+ }
+}
+
+/*
+ * Enable a background timer to poll fan tach value, (200ms * 4)
+ * to polling all fan
+ */
+static void npcm7xx_fan_polling(struct timer_list *t)
+{
+ struct npcm7xx_pwm_fan_data *data;
+ int i;
+
+ data = from_timer(data, t, fan_timer);
+
+ /*
+ * Polling two module per one round,
+ * FAN01 & FAN89 / FAN23 & FAN1011 / FAN45 & FAN1213 / FAN67 & FAN1415
+ */
+ for (i = data->fan_select; i < NPCM7XX_FAN_MAX_MODULE;
+ i = i + 4) {
+ /* clear the flag and reset the counter (TCNT) */
+ iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, i));
+
+ if (data->fan_present[i * 2]) {
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT1(data->fan_base, i));
+ npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPA);
+ }
+ if (data->fan_present[(i * 2) + 1]) {
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT2(data->fan_base, i));
+ npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPB);
+ }
+ }
+
+ data->fan_select++;
+ data->fan_select &= 0x3;
+
+ /* reset the timer interval */
+ data->fan_timer.expires = jiffies +
+ msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+ add_timer(&data->fan_timer);
+}
+
+static inline void npcm7xx_fan_compute(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp, u8 fan_id, u8 flag_int,
+ u8 flag_mode, u8 flag_clear)
+{
+ u8 reg_int;
+ u8 reg_mode;
+ u16 fan_cap;
+
+ if (cmp == NPCM7XX_FAN_CMPA)
+ fan_cap = ioread16(NPCM7XX_FAN_REG_TCRA(data->fan_base, fan));
+ else
+ fan_cap = ioread16(NPCM7XX_FAN_REG_TCRB(data->fan_base, fan));
+
+ /* clear capature flag, H/W will auto reset the NPCM7XX_FAN_TCNTx */
+ iowrite8(flag_clear, NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+ if (data->fan_dev[fan_id].fan_st_flg == FAN_INIT) {
+ /* First capture, drop it */
+ data->fan_dev[fan_id].fan_st_flg =
+ FAN_PREPARE_TO_GET_FIRST_CAPTURE;
+
+ /* reset counter */
+ data->fan_dev[fan_id].fan_cnt_tmp = 0;
+ } else if (data->fan_dev[fan_id].fan_st_flg < FAN_ENOUGH_SAMPLE) {
+ /*
+ * collect the enough sample,
+ * (ex: 2 pulse fan need to get 2 sample)
+ */
+ data->fan_dev[fan_id].fan_cnt_tmp +=
+ (NPCM7XX_FAN_TCNT - fan_cap);
+
+ data->fan_dev[fan_id].fan_st_flg++;
+ } else {
+ /* get enough sample or fan disable */
+ if (data->fan_dev[fan_id].fan_st_flg == FAN_ENOUGH_SAMPLE) {
+ data->fan_dev[fan_id].fan_cnt_tmp +=
+ (NPCM7XX_FAN_TCNT - fan_cap);
+
+ /* compute finial average cnt per pulse */
+ data->fan_dev[fan_id].fan_cnt =
+ data->fan_dev[fan_id].fan_cnt_tmp /
+ FAN_ENOUGH_SAMPLE;
+
+ data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+ }
+
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* disable interrupt */
+ iowrite8((reg_int & ~flag_int),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+ reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /* stop capturing */
+ iowrite8((reg_mode & ~flag_mode),
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+ }
+}
+
+static inline void npcm7xx_check_cmp(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp, u8 flag)
+{
+ u8 reg_int;
+ u8 reg_mode;
+ u8 flag_timeout;
+ u8 flag_cap;
+ u8 flag_clear;
+ u8 flag_int;
+ u8 flag_mode;
+ u8 fan_id;
+
+ fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+ if (cmp == NPCM7XX_FAN_CMPA) {
+ flag_cap = NPCM7XX_FAN_TICTRL_TAPND;
+ flag_timeout = NPCM7XX_FAN_TICTRL_TEPND;
+ flag_int = NPCM7XX_FAN_TIEN_TAIEN | NPCM7XX_FAN_TIEN_TEIEN;
+ flag_mode = NPCM7XX_FAN_TCKC_CLK1_APB;
+ flag_clear = NPCM7XX_FAN_TICLR_TACLR | NPCM7XX_FAN_TICLR_TECLR;
+ } else {
+ flag_cap = NPCM7XX_FAN_TICTRL_TBPND;
+ flag_timeout = NPCM7XX_FAN_TICTRL_TFPND;
+ flag_int = NPCM7XX_FAN_TIEN_TBIEN | NPCM7XX_FAN_TIEN_TFIEN;
+ flag_mode = NPCM7XX_FAN_TCKC_CLK2_APB;
+ flag_clear = NPCM7XX_FAN_TICLR_TBCLR | NPCM7XX_FAN_TICLR_TFCLR;
+ }
+
+ if (flag & flag_timeout) {
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* disable interrupt */
+ iowrite8((reg_int & ~flag_int),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* clear interrupt flag */
+ iowrite8(flag_clear,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+ reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /* stop capturing */
+ iowrite8((reg_mode & ~flag_mode),
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /*
+ * If timeout occurs (NPCM7XX_FAN_TIMEOUT), the fan doesn't
+ * connect or speed is lower than 10.6Hz (320RPM/pulse2).
+ * In these situation, the RPM output should be zero.
+ */
+ data->fan_dev[fan_id].fan_cnt = 0;
+ } else {
+ /* input capture is occurred */
+ if (flag & flag_cap)
+ npcm7xx_fan_compute(data, fan, cmp, fan_id, flag_int,
+ flag_mode, flag_clear);
+ }
+}
+
+static irqreturn_t npcm7xx_fan_isr(int irq, void *dev_id)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_id;
+ unsigned long flags;
+ int module;
+ u8 flag;
+
+ module = irq - data->fan_irq[0];
+ spin_lock_irqsave(&data->fan_lock[module], flags);
+
+ flag = ioread8(NPCM7XX_FAN_REG_TICTRL(data->fan_base, module));
+ if (flag > 0) {
+ npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPA, flag);
+ npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPB, flag);
+ spin_unlock_irqrestore(&data->fan_lock[module], flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&data->fan_lock[module], flags);
+
+ return IRQ_NONE;
+}
+
+static int npcm7xx_read_pwm(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+ u32 pmw_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = ioread32
+ (NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pmw_ch));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int npcm7xx_write_pwm(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > NPCM7XX_PWM_CMR_MAX)
+ return -EINVAL;
+ err = npcm7xx_pwm_config_set(data, channel, (u16)val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static umode_t npcm7xx_pwm_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct npcm7xx_pwm_fan_data *data = _data;
+
+ if (!data->pwm_present[channel])
+ return 0;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int npcm7xx_read_fan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = 0;
+ if (data->fan_dev[channel].fan_cnt <= 0)
+ return data->fan_dev[channel].fan_cnt;
+
+ /* Convert the raw reading to RPM */
+ if (data->fan_dev[channel].fan_cnt > 0 &&
+ data->fan_dev[channel].fan_pls_per_rev > 0)
+ *val = ((data->input_clk_freq * 60) /
+ (data->fan_dev[channel].fan_cnt *
+ data->fan_dev[channel].fan_pls_per_rev));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t npcm7xx_fan_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct npcm7xx_pwm_fan_data *data = _data;
+
+ if (!data->fan_present[channel])
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int npcm7xx_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_read_pwm(dev, attr, channel, val);
+ case hwmon_fan:
+ return npcm7xx_read_fan(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int npcm7xx_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_write_pwm(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t npcm7xx_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_pwm_is_visible(data, attr, channel);
+ case hwmon_fan:
+ return npcm7xx_fan_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
+}
+
+static const u32 npcm7xx_pwm_config[] = {
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info npcm7xx_pwm = {
+ .type = hwmon_pwm,
+ .config = npcm7xx_pwm_config,
+};
+
+static const u32 npcm7xx_fan_config[] = {
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info npcm7xx_fan = {
+ .type = hwmon_fan,
+ .config = npcm7xx_fan_config,
+};
+
+static const struct hwmon_channel_info *npcm7xx_info[] = {
+ &npcm7xx_pwm,
+ &npcm7xx_fan,
+ NULL
+};
+
+static const struct hwmon_ops npcm7xx_hwmon_ops = {
+ .is_visible = npcm7xx_is_visible,
+ .read = npcm7xx_read,
+ .write = npcm7xx_write,
+};
+
+static const struct hwmon_chip_info npcm7xx_chip_info = {
+ .ops = &npcm7xx_hwmon_ops,
+ .info = npcm7xx_info,
+};
+
+static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
+{
+ int m, ch;
+ u32 prescale_val, output_freq;
+
+ data->pwm_clk_freq = clk_get_rate(data->pwm_clk);
+
+ /* Adjust NPCM7xx PWMs output frequency to ~25Khz */
+ output_freq = data->pwm_clk_freq / PWN_CNT_DEFAULT;
+ prescale_val = DIV_ROUND_CLOSEST(output_freq, PWM_OUTPUT_FREQ_25KHZ);
+
+ /* If prescale_val = 0, then the prescale output clock is stopped */
+ if (prescale_val < MIN_PRESCALE1)
+ prescale_val = MIN_PRESCALE1;
+ /*
+ * prescale_val need to decrement in one because in the PWM Prescale
+ * register the Prescale value increment by one
+ */
+ prescale_val--;
+
+ /* Setting PWM Prescale Register value register to both modules */
+ prescale_val |= (prescale_val << NPCM7XX_PWM_PRESCALE_SHIFT_CH01);
+
+ for (m = 0; m < NPCM7XX_PWM_MAX_MODULES ; m++) {
+ iowrite32(prescale_val, NPCM7XX_PWM_REG_PR(data->pwm_base, m));
+ iowrite32(NPCM7XX_PWM_PRESCALE2_DEFAULT,
+ NPCM7XX_PWM_REG_CSR(data->pwm_base, m));
+ iowrite32(NPCM7XX_PWM_CTRL_MODE_DEFAULT,
+ NPCM7XX_PWM_REG_CR(data->pwm_base, m));
+
+ for (ch = 0; ch < NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE; ch++) {
+ iowrite32(NPCM7XX_PWM_COUNTER_DEFAULT_NUM,
+ NPCM7XX_PWM_REG_CNRx(data->pwm_base, m, ch));
+ }
+ }
+
+ return output_freq / ((prescale_val & 0xf) + 1);
+}
+
+static void npcm7xx_fan_init(struct npcm7xx_pwm_fan_data *data)
+{
+ int md;
+ int ch;
+ int i;
+ u32 apb_clk_freq;
+
+ for (md = 0; md < NPCM7XX_FAN_MAX_MODULE; md++) {
+ /* stop FAN0~7 clock */
+ iowrite8(NPCM7XX_FAN_TCKC_CLKX_NONE,
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, md));
+
+ /* disable all interrupt */
+ iowrite8(0x00, NPCM7XX_FAN_REG_TIEN(data->fan_base, md));
+
+ /* clear all interrupt */
+ iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, md));
+
+ /* set FAN0~7 clock prescaler */
+ iowrite8(NPCM7XX_FAN_CLK_PRESCALE,
+ NPCM7XX_FAN_REG_TPRSC(data->fan_base, md));
+
+ /* set FAN0~7 mode (high-to-low transition) */
+ iowrite8((NPCM7XX_FAN_TMCTRL_MODE_5 | NPCM7XX_FAN_TMCTRL_TBEN |
+ NPCM7XX_FAN_TMCTRL_TAEN),
+ NPCM7XX_FAN_REG_TMCTRL(data->fan_base, md));
+
+ /* set FAN0~7 Initial Count/Cap */
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT1(data->fan_base, md));
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT2(data->fan_base, md));
+
+ /* set FAN0~7 compare (equal to count) */
+ iowrite8((NPCM7XX_FAN_TCPCFG_EQAEN | NPCM7XX_FAN_TCPCFG_EQBEN),
+ NPCM7XX_FAN_REG_TCPCFG(data->fan_base, md));
+
+ /* set FAN0~7 compare value */
+ iowrite16(NPCM7XX_FAN_TCPA,
+ NPCM7XX_FAN_REG_TCPA(data->fan_base, md));
+ iowrite16(NPCM7XX_FAN_TCPB,
+ NPCM7XX_FAN_REG_TCPB(data->fan_base, md));
+
+ /* set FAN0~7 fan input FANIN 0~15 */
+ iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+ NPCM7XX_FAN_REG_TINASEL(data->fan_base, md));
+ iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+ NPCM7XX_FAN_REG_TINBSEL(data->fan_base, md));
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE; i++) {
+ ch = md * NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE + i;
+ data->fan_dev[ch].fan_st_flg = FAN_DISABLE;
+ data->fan_dev[ch].fan_pls_per_rev =
+ NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION;
+ data->fan_dev[ch].fan_cnt = 0;
+ }
+ }
+
+ apb_clk_freq = clk_get_rate(data->fan_clk);
+
+ /* Fan tach input clock = APB clock / prescalar, default is 255. */
+ data->input_clk_freq = apb_clk_freq / (NPCM7XX_FAN_CLK_PRESCALE + 1);
+}
+
+static int
+npcm7xx_pwm_cz_get_max_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->max_state;
+
+ return 0;
+}
+
+static int
+npcm7xx_pwm_cz_get_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->cur_state;
+
+ return 0;
+}
+
+static int
+npcm7xx_pwm_cz_set_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+ int ret;
+
+ if (state > cdev->max_state)
+ return -EINVAL;
+
+ cdev->cur_state = state;
+ ret = npcm7xx_pwm_config_set(cdev->data, cdev->pwm_port,
+ cdev->cooling_levels[cdev->cur_state]);
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops npcm7xx_pwm_cool_ops = {
+ .get_max_state = npcm7xx_pwm_cz_get_max_state,
+ .get_cur_state = npcm7xx_pwm_cz_get_cur_state,
+ .set_cur_state = npcm7xx_pwm_cz_set_cur_state,
+};
+
+static int npcm7xx_create_pwm_cooling(struct device *dev,
+ struct device_node *child,
+ struct npcm7xx_pwm_fan_data *data,
+ u32 pwm_port, u8 num_levels)
+{
+ int ret;
+ struct npcm7xx_cooling_device *cdev;
+
+ cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->cooling_levels = devm_kzalloc(dev, num_levels, GFP_KERNEL);
+ if (!cdev->cooling_levels)
+ return -ENOMEM;
+
+ cdev->max_state = num_levels - 1;
+ ret = of_property_read_u8_array(child, "cooling-levels",
+ cdev->cooling_levels,
+ num_levels);
+ if (ret) {
+ dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
+ return ret;
+ }
+ snprintf(cdev->name, THERMAL_NAME_LENGTH, "%s%d", child->name,
+ pwm_port);
+
+ cdev->tcdev = thermal_of_cooling_device_register(child,
+ cdev->name,
+ cdev,
+ &npcm7xx_pwm_cool_ops);
+ if (IS_ERR(cdev->tcdev))
+ return PTR_ERR(cdev->tcdev);
+
+ cdev->data = data;
+ cdev->pwm_port = pwm_port;
+
+ data->cdev[pwm_port] = cdev;
+
+ return 0;
+}
+
+static int npcm7xx_en_pwm_fan(struct device *dev,
+ struct device_node *child,
+ struct npcm7xx_pwm_fan_data *data)
+{
+ u8 *fan_ch;
+ u32 pwm_port;
+ int ret, fan_cnt;
+ u8 index, ch;
+
+ ret = of_property_read_u32(child, "reg", &pwm_port);
+ if (ret)
+ return ret;
+
+ data->pwm_present[pwm_port] = true;
+ ret = npcm7xx_pwm_config_set(data, pwm_port,
+ NPCM7XX_PWM_CMR_DEFAULT_NUM);
+
+ ret = of_property_count_u8_elems(child, "cooling-levels");
+ if (ret > 0) {
+ ret = npcm7xx_create_pwm_cooling(dev, child, data, pwm_port,
+ ret);
+ if (ret)
+ return ret;
+ }
+
+ fan_cnt = of_property_count_u8_elems(child, "fan-tach-ch");
+ if (fan_cnt < 1)
+ return -EINVAL;
+
+ fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+ if (!fan_ch)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(child, "fan-tach-ch", fan_ch, fan_cnt);
+ if (ret)
+ return ret;
+
+ for (ch = 0; ch < fan_cnt; ch++) {
+ index = fan_ch[ch];
+ data->fan_present[index] = true;
+ data->fan_dev[index].fan_st_flg = FAN_INIT;
+ }
+
+ return 0;
+}
+
+static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np, *child;
+ struct npcm7xx_pwm_fan_data *data;
+ struct resource *res;
+ struct device *hwmon;
+ char name[20];
+ int ret, cnt;
+ u32 output_freq;
+ u32 i;
+
+ np = dev->of_node;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
+ if (!res) {
+ dev_err(dev, "pwm resource not found\n");
+ return -ENODEV;
+ }
+
+ data->pwm_base = devm_ioremap_resource(dev, res);
+ dev_dbg(dev, "pwm base resource is %pR\n", res);
+ if (IS_ERR(data->pwm_base))
+ return PTR_ERR(data->pwm_base);
+
+ data->pwm_clk = devm_clk_get(dev, "pwm");
+ if (IS_ERR(data->pwm_clk)) {
+ dev_err(dev, "couldn't get pwm clock\n");
+ return PTR_ERR(data->pwm_clk);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fan");
+ if (!res) {
+ dev_err(dev, "fan resource not found\n");
+ return -ENODEV;
+ }
+
+ data->fan_base = devm_ioremap_resource(dev, res);
+ dev_dbg(dev, "fan base resource is %pR\n", res);
+ if (IS_ERR(data->fan_base))
+ return PTR_ERR(data->fan_base);
+
+ data->fan_clk = devm_clk_get(dev, "fan");
+ if (IS_ERR(data->fan_clk)) {
+ dev_err(dev, "couldn't get fan clock\n");
+ return PTR_ERR(data->fan_clk);
+ }
+
+ output_freq = npcm7xx_pwm_init(data);
+ npcm7xx_fan_init(data);
+
+ for (cnt = 0; cnt < NPCM7XX_PWM_MAX_MODULES ; cnt++)
+ mutex_init(&data->pwm_lock[cnt]);
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) {
+ spin_lock_init(&data->fan_lock[i]);
+
+ data->fan_irq[i] = platform_get_irq(pdev, i);
+ if (data->fan_irq[i] < 0) {
+ dev_err(dev, "get IRQ fan%d failed\n", i);
+ return data->fan_irq[i];
+ }
+
+ sprintf(name, "NPCM7XX-FAN-MD%d", i);
+ ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr,
+ 0, name, (void *)data);
+ if (ret) {
+ dev_err(dev, "register IRQ fan%d failed\n", i);
+ return ret;
+ }
+ }
+
+ for_each_child_of_node(np, child) {
+ ret = npcm7xx_en_pwm_fan(dev, child, data);
+ if (ret) {
+ dev_err(dev, "enable pwm and fan failed\n");
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "npcm7xx_pwm_fan",
+ data, &npcm7xx_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_err(dev, "unable to register hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM; i++) {
+ if (data->fan_present[i]) {
+ /* fan timer initialization */
+ data->fan_timer.expires = jiffies +
+ msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+ timer_setup(&data->fan_timer,
+ npcm7xx_fan_polling, 0);
+ add_timer(&data->fan_timer);
+ break;
+ }
+ }
+
+ pr_info("NPCM7XX PWM-FAN Driver probed, output Freq %dHz[PWM], input Freq %dHz[FAN]\n",
+ output_freq, data->input_clk_freq);
+
+ return 0;
+}
+
+static const struct of_device_id of_pwm_fan_match_table[] = {
+ { .compatible = "nuvoton,npcm750-pwm-fan", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match_table);
+
+static struct platform_driver npcm7xx_pwm_fan_driver = {
+ .probe = npcm7xx_pwm_fan_probe,
+ .driver = {
+ .name = "npcm7xx_pwm_fan",
+ .of_match_table = of_pwm_fan_match_table,
+ },
+};
+
+module_platform_driver(npcm7xx_pwm_fan_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM7XX PWM and Fan Tacho driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index e71aec69e76e..a82018aaf473 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -130,7 +130,7 @@ config SENSORS_MAX34440
default n
help
If you say yes here you get hardware monitoring support for Maxim
- MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
+ MAX34440, MAX34441, MAX34446, MAX34451, MAX34460, and MAX34461.
This driver can also be built as a module. If so, the module will
be called max34440.
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 74a1f6f68fb3..47576c460010 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446, max34460, max34461 };
+enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -44,6 +44,9 @@ enum chips { max34440, max34441, max34446, max34460, max34461 };
#define MAX34440_STATUS_OT_FAULT BIT(5)
#define MAX34440_STATUS_OT_WARN BIT(6)
+#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
+#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
+
struct max34440_data {
int id;
struct pmbus_driver_info info;
@@ -67,7 +70,7 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
- if (data->id != max34446)
+ if (data->id != max34446 && data->id != max34451)
return -ENXIO;
ret = pmbus_read_word_data(client, page,
MAX34446_MFR_IOUT_AVG);
@@ -143,7 +146,7 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
- if (!ret && data->id == max34446)
+ if (!ret && (data->id == max34446 || data->id == max34451))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -202,6 +205,58 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static int max34451_set_supported_funcs(struct i2c_client *client,
+ struct max34440_data *data)
+{
+ /*
+ * Each of the channel 0-15 can be configured to monitor the following
+ * functions based on MFR_CHANNEL_CONFIG[5:0]
+ * 0x10: Sequencing + voltage monitoring (only valid for PAGES 0–11)
+ * 0x20: Voltage monitoring (no sequencing)
+ * 0x21: Voltage read only
+ * 0x22: Current monitoring
+ * 0x23: Current read only
+ * 0x30: General-purpose input active low
+ * 0x34: General-purpose input active high
+ * 0x00: Disabled
+ */
+
+ int page, rv;
+
+ for (page = 0; page < 16; page++) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_word_data(client,
+ MAX34451_MFR_CHANNEL_CONFIG);
+ if (rv < 0)
+ return rv;
+
+ switch (rv & MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK) {
+ case 0x10:
+ case 0x20:
+ data->info.func[page] = PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_STATUS_VOUT;
+ break;
+ case 0x21:
+ data->info.func[page] = PMBUS_HAVE_VOUT;
+ break;
+ case 0x22:
+ data->info.func[page] = PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT;
+ break;
+ case 0x23:
+ data->info.func[page] = PMBUS_HAVE_IOUT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
static struct pmbus_driver_info max34440_info[] = {
[max34440] = {
.pages = 14,
@@ -325,6 +380,30 @@ static struct pmbus_driver_info max34440_info[] = {
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
+ [max34451] = {
+ .pages = 21,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* func 0-15 is set dynamically before probing */
+ .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34460] = {
.pages = 18,
.format[PSC_VOLTAGE_OUT] = direct,
@@ -398,6 +477,7 @@ static int max34440_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max34440_data *data;
+ int rv;
data = devm_kzalloc(&client->dev, sizeof(struct max34440_data),
GFP_KERNEL);
@@ -406,6 +486,12 @@ static int max34440_probe(struct i2c_client *client,
data->id = id->driver_data;
data->info = max34440_info[id->driver_data];
+ if (data->id == max34451) {
+ rv = max34451_set_supported_funcs(client, data);
+ if (rv)
+ return rv;
+ }
+
return pmbus_do_probe(client, id, &data->info);
}
@@ -413,6 +499,7 @@ static const struct i2c_device_id max34440_id[] = {
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
+ {"max34451", max34451},
{"max34460", max34460},
{"max34461", max34461},
{}
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 1f41a4f89c08..8a873975cf12 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
if (priv->len_recv) {
/* read length byte */
rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+ /*
+ * We expect at least 2 interrupts for I2C_M_RECV_LEN
+ * transactions. The length is updated during the first
+ * interrupt, and the buffer contents are only copied
+ * during subsequent interrupts. If in case the interrupts
+ * get merged we would complete the transaction without
+ * copying out the bytes from RX fifo. To avoid this now we
+ * drain the fifo as and when data is available.
+ * We drained the rlen byte already, decrement total length
+ * by one.
+ */
+
+ len--;
if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
rlen = 0; /*abort transfer */
priv->msg_buf_remaining = 0;
priv->msg_len = 0;
- } else {
- *buf++ = rlen;
- if (priv->client_pec)
- ++rlen; /* account for error check byte */
- /* update remaining bytes and message length */
- priv->msg_buf_remaining = rlen;
- priv->msg_len = rlen + 1;
+ xlp9xx_i2c_update_rlen(priv);
+ return;
}
+
+ *buf++ = rlen;
+ if (priv->client_pec)
+ ++rlen; /* account for error check byte */
+ /* update remaining bytes and message length */
+ priv->msg_buf_remaining = rlen;
+ priv->msg_len = rlen + 1;
xlp9xx_i2c_update_rlen(priv);
priv->len_recv = false;
- } else {
- len = min(priv->msg_buf_remaining, len);
- for (i = 0; i < len; i++, buf++)
- *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
-
- priv->msg_buf_remaining -= len;
}
+ len = min(priv->msg_buf_remaining, len);
+ for (i = 0; i < len; i++, buf++)
+ *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+ priv->msg_buf_remaining -= len;
priv->msg_buf = buf;
if (priv->msg_buf_remaining)
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index a6e904973ba8..475910ffbcb6 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -121,7 +121,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
* this lock.
*/
if (!exclusive)
- return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
+ return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
-EBUSY : 0;
/* lock is either WRITE or DESTROY - should be exclusive */
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index a4e404aaf64b..5c7afdec192c 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2");
#define HIL_DATA 0x1
#define HIL_CMD 0x3
#define HIL_IRQ 2
- #define hil_readb(p) readb(p)
- #define hil_writeb(v,p) writeb((v),(p))
+ #define hil_readb(p) readb((const volatile void __iomem *)(p))
+ #define hil_writeb(v, p) writeb((v), (volatile void __iomem *)(p))
#else
#error "HIL is not supported on this platform"
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index e9233db16e03..d564d21245c5 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -8,7 +8,7 @@ config ARM_GIC
bool
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_PM
@@ -34,7 +34,7 @@ config GIC_NON_BANKED
config ARM_GIC_V3
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -66,7 +66,7 @@ config ARM_NVIC
config ARM_VIC
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
config ARM_VIC_NR
int
@@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config ATMEL_AIC5_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config I8259
@@ -137,7 +137,7 @@ config DW_APB_ICTL
config FARADAY_FTINTC010
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config HISILICON_IRQ_MBIGEN
@@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP
bool
depends on ARCH_CLPS711X
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
default y
@@ -181,7 +181,7 @@ config OMAP_IRQCHIP
config ORION_IRQCHIP
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
config PIC32_EVIC
bool
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 4eca5c763766..606efa64adff 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
*/
info->scratchpad[0].ul = mc_bus_dev->icid;
msi_info = msi_get_domain_info(msi_domain->parent);
+
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
}
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 25a98de5cfb2..8d6d009d1d58 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
{
struct pci_dev *pdev, *alias_dev;
struct msi_domain_info *msi_info;
- int alias_count = 0;
+ int alias_count = 0, minnvec = 1;
if (!dev_is_pci(dev))
return -EINVAL;
@@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
- return msi_info->ops->msi_prepare(domain->parent,
- dev, max(nvec, alias_count), info);
+ /*
+ * Always allocate a power of 2, and special case device 0 for
+ * broken systems where the DevID is not wired (and all devices
+ * appear as DevID 0). For that reason, we generously allocate a
+ * minimum of 32 MSIs for DevID 0. If you want more because all
+ * your devices are aliasing to DevID 0, consider fixing your HW.
+ */
+ nvec = max(nvec, alias_count);
+ if (!info->scratchpad[0].ul)
+ minnvec = 32;
+ nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
}
static struct msi_domain_ops its_pci_msi_ops = {
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 8881a053c173..7b8e87b493fe 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = dev_id;
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(domain->parent,
dev, nvec, info);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d7842d312d3e..316a57530f6d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -23,6 +23,8 @@
#include <linux/dma-iommu.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/msi.h>
@@ -160,7 +162,7 @@ static struct {
} vpe_proxy;
static LIST_HEAD(its_nodes);
-static DEFINE_SPINLOCK(its_lock);
+static DEFINE_RAW_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
static struct irq_domain *its_parent;
@@ -1421,112 +1423,176 @@ static struct irq_chip its_irq_chip = {
.irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
};
+
/*
* How we allocate LPIs:
*
- * The GIC has id_bits bits for interrupt identifiers. From there, we
- * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
- * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
- * bits to the right.
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
+ *
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
*
- * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
*/
-#define IRQS_PER_CHUNK_SHIFT 5
-#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
-#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
-static unsigned long *lpi_bitmap;
-static u32 lpi_chunks;
-static DEFINE_SPINLOCK(lpi_lock);
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+ struct list_head entry;
+ u32 base_id;
+ u32 span;
+};
-static int its_lpi_to_chunk(int lpi)
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
{
- return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+ struct lpi_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (range) {
+ INIT_LIST_HEAD(&range->entry);
+ range->base_id = base;
+ range->span = span;
+ }
+
+ return range;
}
-static int its_chunk_to_lpi(int chunk)
+static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
{
- return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+ struct lpi_range *ra, *rb;
+
+ ra = container_of(a, struct lpi_range, entry);
+ rb = container_of(b, struct lpi_range, entry);
+
+ return rb->base_id - ra->base_id;
}
-static int __init its_lpi_init(u32 id_bits)
+static void merge_lpi_ranges(void)
{
- lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+ struct lpi_range *range, *tmp;
- lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
- GFP_KERNEL);
- if (!lpi_bitmap) {
- lpi_chunks = 0;
- return -ENOMEM;
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (!list_is_last(&range->entry, &lpi_range_list) &&
+ (tmp->base_id == (range->base_id + range->span))) {
+ tmp->base_id = range->base_id;
+ tmp->span += range->span;
+ list_del(&range->entry);
+ kfree(range);
+ }
}
+}
- pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
- return 0;
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
+{
+ struct lpi_range *range, *tmp;
+ int err = -ENOSPC;
+
+ mutex_lock(&lpi_range_lock);
+
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (range->span >= nr_lpis) {
+ *base = range->base_id;
+ range->base_id += nr_lpis;
+ range->span -= nr_lpis;
+
+ if (range->span == 0) {
+ list_del(&range->entry);
+ kfree(range);
+ }
+
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&lpi_range_lock);
+
+ pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+ return err;
}
-static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+static int free_lpi_range(u32 base, u32 nr_lpis)
{
- unsigned long *bitmap = NULL;
- int chunk_id;
- int nr_chunks;
- int i;
+ struct lpi_range *new;
+ int err = 0;
+
+ mutex_lock(&lpi_range_lock);
+
+ new = mk_lpi_range(base, nr_lpis);
+ if (!new) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&new->entry, &lpi_range_list);
+ list_sort(NULL, &lpi_range_list, lpi_range_cmp);
+ merge_lpi_ranges();
+out:
+ mutex_unlock(&lpi_range_lock);
+ return err;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+ u32 lpis = (1UL << id_bits) - 8192;
+ u32 numlpis;
+ int err;
+
+ numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+ if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+ lpis = numlpis;
+ pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+ lpis);
+ }
- nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+ /*
+ * Initializing the allocator is just the same as freeing the
+ * full range of LPIs.
+ */
+ err = free_lpi_range(8192, lpis);
+ pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+ return err;
+}
- spin_lock(&lpi_lock);
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+ unsigned long *bitmap = NULL;
+ int err = 0;
do {
- chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
- 0, nr_chunks, 0);
- if (chunk_id < lpi_chunks)
+ err = alloc_lpi_range(nr_irqs, base);
+ if (!err)
break;
- nr_chunks--;
- } while (nr_chunks > 0);
+ nr_irqs /= 2;
+ } while (nr_irqs > 0);
- if (!nr_chunks)
+ if (err)
goto out;
- bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
- sizeof(long),
- GFP_ATOMIC);
+ bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
if (!bitmap)
goto out;
- for (i = 0; i < nr_chunks; i++)
- set_bit(chunk_id + i, lpi_bitmap);
-
- *base = its_chunk_to_lpi(chunk_id);
- *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+ *nr_ids = nr_irqs;
out:
- spin_unlock(&lpi_lock);
-
if (!bitmap)
*base = *nr_ids = 0;
return bitmap;
}
-static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
{
- int lpi;
-
- spin_lock(&lpi_lock);
-
- for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
- int chunk = its_lpi_to_chunk(lpi);
-
- BUG_ON(chunk > lpi_chunks);
- if (test_bit(chunk, lpi_bitmap)) {
- clear_bit(chunk, lpi_bitmap);
- } else {
- pr_err("Bad LPI chunk %d\n", chunk);
- }
- }
-
- spin_unlock(&lpi_lock);
-
+ WARN_ON(free_lpi_range(base, nr_ids));
kfree(bitmap);
}
@@ -1559,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void)
{
phys_addr_t paddr;
- lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
+ lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
if (!gic_rdists->prop_page) {
pr_err("Failed to allocate PROPBASE\n");
@@ -1997,12 +2063,12 @@ static void its_cpu_init_collections(void)
{
struct its_node *its;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry)
its_cpu_init_collection(its);
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
}
static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
@@ -2134,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!its_alloc_device_table(its, dev_id))
return NULL;
+ if (WARN_ON(!is_power_of_2(nvecs)))
+ nvecs = roundup_pow_of_two(nvecs);
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
- * We allocate at least one chunk worth of LPIs bet device,
- * and thus that many ITEs. The device may require less though.
+ * Even if the device wants a single LPI, the ITT must be
+ * sized as a power of two (and you need at least one bit...).
*/
- nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
+ nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
if (alloc_lpis) {
- lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+ lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
col_map = kcalloc(nr_lpis, sizeof(*col_map),
GFP_KERNEL);
@@ -2379,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
/* If all interrupts have been freed, start mopping the floor */
if (bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)) {
- its_lpi_free_chunks(its_dev->event_map.lpi_map,
- its_dev->event_map.lpi_base,
- its_dev->event_map.nr_lpis);
+ its_lpi_free(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+ its_dev->event_map.nr_lpis);
kfree(its_dev->event_map.col_map);
/* Unmap device/itt */
@@ -2780,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
}
if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
- its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+ its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
its_free_prop_table(vm->vprop_page);
}
}
@@ -2795,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
BUG_ON(!vm);
- bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+ bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
if (!bitmap)
return -ENOMEM;
if (nr_ids < nr_irqs) {
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
return -ENOMEM;
}
vprop_page = its_allocate_prop_table(GFP_KERNEL);
if (!vprop_page) {
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
return -ENOMEM;
}
@@ -2833,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i - 1);
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
@@ -3070,7 +3139,7 @@ static int its_save_disable(void)
struct its_node *its;
int err = 0;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
@@ -3102,7 +3171,7 @@ err:
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
}
}
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
return err;
}
@@ -3112,7 +3181,7 @@ static void its_restore_enable(void)
struct its_node *its;
int ret;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
int i;
@@ -3164,7 +3233,7 @@ static void its_restore_enable(void)
GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
its_cpu_init_collection(its);
}
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
}
static struct syscore_ops its_syscore_ops = {
@@ -3398,9 +3467,9 @@ static int __init its_probe_one(struct resource *res,
if (err)
goto out_free_tables;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_add(&its->entry, &its_nodes);
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
return 0;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 76ea56d779a1..e214181b77b7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = {
.flags = IRQCHIP_SET_TYPE_MASKED,
};
-#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
+#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
@@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
*/
typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
- gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+ gic_data.rdists.gicd_typer = typer;
gic_irqs = GICD_TYPER_IRQS(typer);
if (gic_irqs > 1020)
gic_irqs = 1020;
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index fc5953dea509..2ff08986b536 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node,
return ingenic_intc_of_init(node, 1);
}
IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
static int __init intc_2chip_of_init(struct device_node *node,
struct device_node *parent)
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 3a7e8905a97e..3df527fcf4e1 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
};
static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
+ { .exti = 0, .irq_parent = 6 },
{ .exti = 1, .irq_parent = 7 },
{ .exti = 2, .irq_parent = 8 },
{ .exti = 3, .irq_parent = 9 },
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 6b16946f9b05..8fd5ec4d6042 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -67,10 +67,8 @@ static struct file *cxl_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
- struct inode *inode = NULL;
+ struct inode *inode;
int rc;
/* strongly inspired by anon_inode_getfile() */
@@ -91,23 +89,11 @@ static struct file *cxl_getfile(const char *name,
goto err_fs;
}
- file = ERR_PTR(-ENOMEM);
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
- if (!path.dentry)
+ file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
+ if (IS_ERR(file))
goto err_inode;
- path.mnt = mntget(cxl_vfs_mount);
- d_instantiate(path.dentry, inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
- if (IS_ERR(file)) {
- path_put(&path);
- goto err_fs;
- }
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
return file;
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b6d735bf8011..342ae08ec3c2 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
-
#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
@@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy_toio(membase, &outdata, 4);
+ memcpy_toio((void __iomem *)membase, &outdata, 4);
/* Now compare them */
if (memcmp_withio(&outdata, membase, 4) == 0)
return ACCESS_32;
@@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
+ memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4);
/* Fix endianness */
hdr->count = swab16(hdr->count);
}
@@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, dev->mem_start + xfer_base,
+ memcpy_fromio(skb->data,
+ (void __iomem *)dev->mem_start + xfer_base,
semi_count);
count -= semi_count;
- memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
- count);
+ memcpy_fromio(skb->data + semi_count,
+ (void __iomem *)ei_status.rmem_start, count);
} else {
- memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
+ memcpy_fromio(skb->data,
+ (void __iomem *)dev->mem_start + xfer_base,
+ count);
}
}
@@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count,
{
long shmem = (start_page - WD_START_PG)<<8;
- memcpy_toio(dev->mem_start + shmem, buf, count);
+ memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count);
}
/* dayna block input/output */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 956860a69797..3bdab972420b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -762,7 +762,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
hw_atl_rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
hw_atl_rpfl2_accept_all_mc_packets_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI));
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5d08d2aeb172..e337da6ba2a4 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
GFP_KERNEL);
+ if (!lmac->dmacs)
+ return -ENOMEM;
/* Enable lmac */
bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 00fc5f1afb1d..7dddb9e748b8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
req->local_port = cpu_to_be16(f->fs.val.lport);
req->peer_port = cpu_to_be16(f->fs.val.fport);
- req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
- f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
- req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
- f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+ memcpy(&req->local_ip, f->fs.val.lip, 4);
+ memcpy(&req->peer_ip, f->fs.val.fip, 4);
req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
DELACK_V(f->fs.hitcnts) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index eb9eb7aa953a..405236cf0b04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -858,8 +858,6 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
- void (*netdev_registered_init)(struct mlx5e_priv *priv);
- void (*netdev_registered_remove)(struct mlx5e_priv *priv);
int max_tc;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index e33afa8d2417..722998d68564 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -443,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
bool is_new;
int err;
- if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
- return -EINVAL;
-
- if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
- return -EINVAL;
-
- if (!MLX5_DSCP_SUPPORTED(priv->mdev))
- return -EINVAL;
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+ !MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EOPNOTSUPP;
- if (app->protocol >= MLX5E_MAX_DSCP)
+ if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+ (app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL;
/* Save the old entry info */
@@ -500,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
- if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
- return -EINVAL;
-
- if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
- return -EINVAL;
-
- if (!MLX5_DSCP_SUPPORTED(priv->mdev))
- return -EINVAL;
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+ !MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EOPNOTSUPP;
- if (app->protocol >= MLX5E_MAX_DSCP)
+ if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+ (app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL;
/* Skip if no dscp app entry */
@@ -1146,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
int err;
- err = mlx5_set_trust_state(priv->mdev, trust_state);
+ err = mlx5_set_trust_state(priv->mdev, trust_state);
if (err)
return err;
priv->dcbx_dp.trust_state = trust_state;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3a2c4e548226..dfbcda0d0e08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1970,15 +1970,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
- u16 func_id, peer_id;
+ u64 fsystem_guid, psystem_guid;
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
- func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
- peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+ mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
+ mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
- return (func_id == peer_id);
+ return (fsystem_guid == psystem_guid);
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 358edab9e72e..3e34cb8ac1d3 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2086,14 +2086,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
int i;
for (i = 0; i < cpsw->data.slaves; i++) {
- if (vid == cpsw->slaves[i].port_vlan)
- return -EINVAL;
+ if (vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
+ goto err;
+ }
}
}
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
ret = cpsw_add_vlan_ale_entry(priv, vid);
-
+err:
pm_runtime_put(cpsw->dev);
return ret;
}
@@ -2119,22 +2121,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
for (i = 0; i < cpsw->data.slaves; i++) {
if (vid == cpsw->slaves[i].port_vlan)
- return -EINVAL;
+ goto err;
}
}
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
- if (ret != 0)
- return ret;
-
- ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
- HOST_PORT_NUM, ALE_VLAN, vid);
- if (ret != 0)
- return ret;
-
- ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
- 0, ALE_VLAN, vid);
+ ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+err:
pm_runtime_put(cpsw->dev);
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 93dc05c194d3..5766225a4ce1 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
- return -EINVAL;
+ return -ENOENT;
cpsw_ale_read(ale, idx, ale_entry);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2d8812dd1534..9dd2ca62d84a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -894,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
@@ -903,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
- if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+ if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
- BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+ BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
- skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
index a59b6c4bb5b8..ad3d17c42e23 100644
--- a/drivers/nubus/bus.c
+++ b/drivers/nubus/bus.c
@@ -5,6 +5,7 @@
// Copyright (C) 2017 Finn Thain
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/nubus.h>
#include <linux/seq_file.h>
@@ -93,6 +94,8 @@ int nubus_device_register(struct nubus_board *board)
board->dev.release = nubus_device_release;
board->dev.bus = &nubus_bus_type;
dev_set_name(&board->dev, "slot.%X", board->slot);
+ board->dev.dma_mask = &board->dev.coherent_dma_mask;
+ dma_set_mask(&board->dev, DMA_BIT_MASK(32));
return device_register(&board->dev);
}
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 322de58eebaf..f66521c7f846 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -30,7 +30,8 @@ int loongson3_cpu_temp(int cpu)
case PRID_REV_LOONGSON3B_R2:
reg = ((reg >> 8) & 0xff) - 100;
break;
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
reg = (reg & 0xffff)*731/0x4000 - 273;
break;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a9f60d0ee02e..a23e7d394a0a 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -73,8 +73,8 @@ static int dasd_alloc_queue(struct dasd_block *);
static void dasd_setup_queue(struct dasd_block *);
static void dasd_free_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
-static void dasd_device_tasklet(struct dasd_device *);
-static void dasd_block_tasklet(struct dasd_block *);
+static void dasd_device_tasklet(unsigned long);
+static void dasd_block_tasklet(unsigned long);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
@@ -125,8 +125,7 @@ struct dasd_device *dasd_alloc_device(void)
dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
spin_lock_init(&device->mem_lock);
atomic_set(&device->tasklet_scheduled, 0);
- tasklet_init(&device->tasklet,
- (void (*)(unsigned long)) dasd_device_tasklet,
+ tasklet_init(&device->tasklet, dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
timer_setup(&device->timer, dasd_device_timeout, 0);
@@ -166,8 +165,7 @@ struct dasd_block *dasd_alloc_block(void)
atomic_set(&block->open_count, -1);
atomic_set(&block->tasklet_scheduled, 0);
- tasklet_init(&block->tasklet,
- (void (*)(unsigned long)) dasd_block_tasklet,
+ tasklet_init(&block->tasklet, dasd_block_tasklet,
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
@@ -2064,8 +2062,9 @@ EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
/*
* Acquire the device lock and process queues for the device.
*/
-static void dasd_device_tasklet(struct dasd_device *device)
+static void dasd_device_tasklet(unsigned long data)
{
+ struct dasd_device *device = (struct dasd_device *) data;
struct list_head final_queue;
atomic_set (&device->tasklet_scheduled, 0);
@@ -2783,8 +2782,9 @@ static void __dasd_block_start_head(struct dasd_block *block)
* block layer request queue, creates ccw requests, enqueues them on
* a dasd_device and processes ccw requests that have been returned.
*/
-static void dasd_block_tasklet(struct dasd_block *block)
+static void dasd_block_tasklet(unsigned long data)
{
+ struct dasd_block *block = (struct dasd_block *) data;
struct list_head final_queue;
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
@@ -3127,6 +3127,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
block->tag_set.nr_hw_queues = nr_hw_queues;
block->tag_set.queue_depth = queue_depth;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ block->tag_set.numa_node = NUMA_NO_NODE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index e36a114354fc..b9ce93e9df89 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -708,7 +708,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
struct ccw1 *ccw;
cqr = lcu->rsu_cqr;
- strncpy((char *) &cqr->magic, "ECKD", 4);
+ memcpy((char *) &cqr->magic, "ECKD", 4);
ASCEBC((char *) &cqr->magic, 4);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index b9ebb565ee2c..fab35c6170cc 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -426,7 +426,7 @@ dasd_add_busid(const char *bus_id, int features)
if (!devmap) {
/* This bus_id is new. */
new->devindex = dasd_max_devindex++;
- strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+ strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features;
new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bbf95b78ef5d..4e7b55a14b1a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1780,6 +1780,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
struct dasd_eckd_private *private = device->private;
int i;
+ if (!private)
+ return;
+
dasd_alias_disconnect_device_from_lcu(device);
private->ned = NULL;
private->sneq = NULL;
@@ -2035,8 +2038,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
- cancel_work_sync(&device->reload_device);
- cancel_work_sync(&device->kick_validate);
+ if (cancel_work_sync(&device->reload_device))
+ dasd_put_device(device);
+ if (cancel_work_sync(&device->kick_validate))
+ dasd_put_device(device);
+
return 0;
};
@@ -3535,7 +3541,7 @@ static int prepare_itcw(struct itcw *itcw,
dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
&pfxdata, sizeof(pfxdata), total_data_size);
- return PTR_RET(dcw);
+ return PTR_ERR_OR_ZERO(dcw);
}
static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 6ef8714dc693..93bb09da7fdc 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -313,7 +313,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strncpy(header.busid, dev_name(&device->cdev->dev),
+ strlcpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
@@ -356,7 +356,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strncpy(header.busid, dev_name(&device->cdev->dev),
+ strlcpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index b1fcb76dd272..98f66b7b6794 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -455,6 +455,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ bdev->tag_set.numa_node = NUMA_NO_NODE;
ret = blk_mq_alloc_tag_set(&bdev->tag_set);
if (ret)
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 0a4c13e1e76e..c6ab34f94b1b 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -12,11 +12,6 @@ GCOV_PROFILE_sclp_early_core.o := n
KCOV_INSTRUMENT_sclp_early_core.o := n
UBSAN_SANITIZE_sclp_early_core.o := n
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o += -march=z900
-endif
-
CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 79eb60958015..bbb3001b0961 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -334,37 +334,41 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
int cmd, int perm)
{
struct kbentry tmp;
+ unsigned long kb_index, kb_table;
ushort *key_map, val, ov;
if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
+ kb_index = (unsigned long) tmp.kb_index;
#if NR_KEYS < 256
- if (tmp.kb_index >= NR_KEYS)
+ if (kb_index >= NR_KEYS)
return -EINVAL;
#endif
+ kb_table = (unsigned long) tmp.kb_table;
#if MAX_NR_KEYMAPS < 256
- if (tmp.kb_table >= MAX_NR_KEYMAPS)
+ if (kb_table >= MAX_NR_KEYMAPS)
return -EINVAL;
+ kb_table = array_index_nospec(kb_table , MAX_NR_KEYMAPS);
#endif
switch (cmd) {
case KDGKBENT:
- key_map = kbd->key_maps[tmp.kb_table];
+ key_map = kbd->key_maps[kb_table];
if (key_map) {
- val = U(key_map[tmp.kb_index]);
+ val = U(key_map[kb_index]);
if (KTYP(val) >= KBD_NR_TYPES)
val = K_HOLE;
} else
- val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
+ val = (kb_index ? K_HOLE : K_NOSUCHMAP);
return put_user(val, &user_kbe->kb_value);
case KDSKBENT:
if (!perm)
return -EPERM;
- if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
+ if (!kb_index && tmp.kb_value == K_NOSUCHMAP) {
/* disallocate map */
- key_map = kbd->key_maps[tmp.kb_table];
+ key_map = kbd->key_maps[kb_table];
if (key_map) {
- kbd->key_maps[tmp.kb_table] = NULL;
+ kbd->key_maps[kb_table] = NULL;
kfree(key_map);
}
break;
@@ -375,18 +379,18 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
return -EINVAL;
- if (!(key_map = kbd->key_maps[tmp.kb_table])) {
+ if (!(key_map = kbd->key_maps[kb_table])) {
int j;
key_map = kmalloc(sizeof(plain_map),
GFP_KERNEL);
if (!key_map)
return -ENOMEM;
- kbd->key_maps[tmp.kb_table] = key_map;
+ kbd->key_maps[kb_table] = key_map;
for (j = 0; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
}
- ov = U(key_map[tmp.kb_index]);
+ ov = U(key_map[kb_index]);
if (tmp.kb_value == ov)
break; /* nothing to do */
/*
@@ -395,7 +399,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
- key_map[tmp.kb_index] = U(tmp.kb_value);
+ key_map[kb_index] = U(tmp.kb_value);
break;
}
return 0;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 76c158c41510..4f1a69c9d81d 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -61,7 +61,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
struct appldata_product_id id;
int rc;
- strncpy(id.prod_nr, "LNXAPPL", 7);
+ memcpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index ee6f3b563728..e69b12a40636 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -64,42 +64,18 @@ static struct notifier_block call_home_panic_nb = {
.priority = INT_MAX,
};
-static int proc_handler_callhome(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *count,
- loff_t *ppos)
-{
- unsigned long val;
- int len, rc;
- char buf[3];
-
- if (!*count || (*ppos && !write)) {
- *count = 0;
- return 0;
- }
- if (!write) {
- len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
- rc = copy_to_user(buffer, buf, sizeof(buf));
- if (rc != 0)
- return -EFAULT;
- } else {
- len = *count;
- rc = kstrtoul_from_user(buffer, len, 0, &val);
- if (rc)
- return rc;
- if (val != 0 && val != 1)
- return -EINVAL;
- callhome_enabled = val;
- }
- *count = len;
- *ppos += len;
- return 0;
-}
+static int zero;
+static int one = 1;
static struct ctl_table callhome_table[] = {
{
.procname = "callhome",
+ .data = &callhome_enabled,
+ .maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_handler_callhome,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
},
{}
};
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 37e65a05517f..cdcde18e7220 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -113,16 +113,16 @@ static int crypt_enabled(struct tape_device *device)
static void ext_to_int_kekl(struct tape390_kekl *in,
struct tape3592_kekl *out)
{
- int i;
+ int len;
memset(out, 0, sizeof(*out));
if (in->type == TAPE390_KEKL_TYPE_HASH)
out->flags |= 0x40;
if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
out->flags |= 0x80;
- strncpy(out->label, in->label, 64);
- for (i = strlen(in->label); i < sizeof(out->label); i++)
- out->label[i] = ' ';
+ len = min(sizeof(out->label), strlen(in->label));
+ memcpy(out->label, in->label, len);
+ memset(out->label + len, ' ', sizeof(out->label) - len);
ASCEBC(out->label, sizeof(out->label));
}
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index a07102472ce9..b58df0dd0039 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -54,10 +54,10 @@ struct tape_class_device *register_tape_dev(
if (!tcd)
return ERR_PTR(-ENOMEM);
- strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+ strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
- strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+ strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
@@ -77,7 +77,7 @@ struct tape_class_device *register_tape_dev(
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
- rc = PTR_RET(tcd->class_device);
+ rc = PTR_ERR_OR_ZERO(tcd->class_device);
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index afbdee74147d..51038ec309c1 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -471,14 +471,17 @@ int chp_new(struct chp_id chpid)
{
struct channel_subsystem *css = css_by_id(chpid.cssid);
struct channel_path *chp;
- int ret;
+ int ret = 0;
+ mutex_lock(&css->mutex);
if (chp_is_registered(chpid))
- return 0;
- chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
- if (!chp)
- return -ENOMEM;
+ goto out;
+ chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+ if (!chp) {
+ ret = -ENOMEM;
+ goto out;
+ }
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
@@ -505,21 +508,20 @@ int chp_new(struct chp_id chpid)
put_device(&chp->dev);
goto out;
}
- mutex_lock(&css->mutex);
+
if (css->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
device_unregister(&chp->dev);
- mutex_unlock(&css->mutex);
goto out;
}
}
css->chps[chpid.id] = chp;
- mutex_unlock(&css->mutex);
goto out;
out_free:
kfree(chp);
out:
+ mutex_unlock(&css->mutex);
return ret;
}
@@ -585,8 +587,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
case CRW_ERC_INIT:
- if (!chp_is_registered(chpid))
- chp_new(chpid);
+ chp_new(chpid);
chsc_chp_online(chpid);
break;
case CRW_ERC_PERRI: /* Path has gone. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 9029804dcd22..a0baee25134c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -91,7 +91,7 @@ struct chsc_ssd_area {
u16 sch; /* subchannel */
u8 chpid[8]; /* chpids 0-7 */
u16 fla[8]; /* full link addresses 0-7 */
-} __attribute__ ((packed));
+} __packed __aligned(PAGE_SIZE);
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
@@ -319,7 +319,7 @@ struct chsc_sei {
struct chsc_sei_nt2_area nt2_area;
u8 nt_area[PAGE_SIZE - 24];
} u;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
/*
* Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
@@ -841,7 +841,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 : 4;
u32 fmt : 4;
u32 : 16;
- } __attribute__ ((packed)) *secm_area;
+ } *secm_area;
unsigned long flags;
int ret, ccode;
@@ -1014,7 +1014,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 cmg : 8;
u32 zeroes3;
u32 data[NR_MEASUREMENT_CHARS];
- } __attribute__ ((packed)) *scmc_area;
+ } *scmc_area;
chp->shared = -1;
chp->cmg = -1;
@@ -1142,7 +1142,7 @@ int __init chsc_get_cssid(int idx)
u8 cssid;
u32 : 24;
} list[0];
- } __packed *sdcal_area;
+ } *sdcal_area;
int ret;
spin_lock_irq(&chsc_page_lock);
@@ -1192,7 +1192,7 @@ chsc_determine_css_characteristics(void)
u32 reserved4;
u32 general_char[510];
u32 chsc_char[508];
- } __attribute__ ((packed)) *scsc_area;
+ } *scsc_area;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
@@ -1236,7 +1236,7 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
unsigned int rsvd3[3];
u64 clock_delta;
unsigned int rsvd4[2];
- } __attribute__ ((packed)) *rr;
+ } *rr;
int rc;
memset(page, 0, PAGE_SIZE);
@@ -1261,7 +1261,7 @@ int chsc_sstpi(void *page, void *result, size_t size)
unsigned int rsvd0[3];
struct chsc_header response;
char data[];
- } __attribute__ ((packed)) *rr;
+ } *rr;
int rc;
memset(page, 0, PAGE_SIZE);
@@ -1284,7 +1284,7 @@ int chsc_siosl(struct subchannel_id schid)
u32 word3;
struct chsc_header response;
u32 word[11];
- } __attribute__ ((packed)) *siosl_area;
+ } *siosl_area;
unsigned long flags;
int ccode;
int rc;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 5c9f0dd33f4e..78aba8d94eec 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -15,12 +15,12 @@
#define NR_MEASUREMENT_CHARS 5
struct cmg_chars {
u32 values[NR_MEASUREMENT_CHARS];
-} __attribute__ ((packed));
+};
#define NR_MEASUREMENT_ENTRIES 8
struct cmg_entry {
u32 values[NR_MEASUREMENT_ENTRIES];
-} __attribute__ ((packed));
+};
struct channel_path_desc_fmt1 {
u8 flags;
@@ -38,7 +38,7 @@ struct channel_path_desc_fmt1 {
u8 s:1;
u8 f:1;
u32 zeros[2];
-} __attribute__ ((packed));
+};
struct channel_path_desc_fmt3 {
struct channel_path_desc_fmt1 fmt1_desc;
@@ -59,7 +59,7 @@ struct css_chsc_char {
u32:7;
u32 pnso:1; /* bit 116 */
u32:11;
-}__attribute__((packed));
+} __packed;
extern struct css_chsc_char css_chsc_characteristics;
@@ -82,7 +82,7 @@ struct chsc_ssqd_area {
struct chsc_header response;
u32:32;
struct qdio_ssqd_desc qdio_ssqd;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_scssc_area {
struct chsc_header request;
@@ -102,7 +102,7 @@ struct chsc_scssc_area {
u32 reserved[1004];
struct chsc_header response;
u32:32;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_scpd {
struct chsc_header request;
@@ -120,7 +120,7 @@ struct chsc_scpd {
struct chsc_header response;
u32:32;
u8 data[0];
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_sda_area {
struct chsc_header request;
@@ -199,7 +199,7 @@ struct chsc_scm_info {
u32 reserved2[10];
u64 restok;
struct sale scmal[248];
-} __packed;
+} __packed __aligned(PAGE_SIZE);
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
@@ -243,7 +243,7 @@ struct chsc_pnso_area {
struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
struct qdio_brinfo_entry_l2 l2[0];
} entries;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
int chsc_pnso_brinfo(struct subchannel_id schid,
struct chsc_pnso_area *brinfo_area,
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5130d7c67239..de744ca158fd 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -526,76 +526,6 @@ int cio_disable_subchannel(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
-static int cio_check_devno_blacklisted(struct subchannel *sch)
-{
- if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
- /*
- * This device must not be known to Linux. So we simply
- * say that there is no device and return ENODEV.
- */
- CIO_MSG_EVENT(6, "Blacklisted device detected "
- "at devno %04X, subchannel set %x\n",
- sch->schib.pmcw.dev, sch->schid.ssid);
- return -ENODEV;
- }
- return 0;
-}
-
-/**
- * cio_validate_subchannel - basic validation of subchannel
- * @sch: subchannel structure to be filled out
- * @schid: subchannel id
- *
- * Find out subchannel type and initialize struct subchannel.
- * Return codes:
- * 0 on success
- * -ENXIO for non-defined subchannels
- * -ENODEV for invalid subchannels or blacklisted devices
- * -EIO for subchannels in an invalid subchannel set
- */
-int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
-{
- char dbf_txt[15];
- int ccode;
- int err;
-
- sprintf(dbf_txt, "valsch%x", schid.sch_no);
- CIO_TRACE_EVENT(4, dbf_txt);
-
- /*
- * The first subchannel that is not-operational (ccode==3)
- * indicates that there aren't any more devices available.
- * If stsch gets an exception, it means the current subchannel set
- * is not valid.
- */
- ccode = stsch(schid, &sch->schib);
- if (ccode) {
- err = (ccode == 3) ? -ENXIO : ccode;
- goto out;
- }
- sch->st = sch->schib.pmcw.st;
- sch->schid = schid;
-
- switch (sch->st) {
- case SUBCHANNEL_TYPE_IO:
- case SUBCHANNEL_TYPE_MSG:
- if (!css_sch_is_valid(&sch->schib))
- err = -ENODEV;
- else
- err = cio_check_devno_blacklisted(sch);
- break;
- default:
- err = 0;
- }
- if (err)
- goto out;
-
- CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
- sch->schid.ssid, sch->schid.sch_no, sch->st);
-out:
- return err;
-}
-
/*
* do_cio_interrupt() handles all normal I/O device IRQ's
*/
@@ -719,6 +649,7 @@ struct subchannel *cio_probe_console(void)
{
struct subchannel_id schid;
struct subchannel *sch;
+ struct schib schib;
int sch_no, ret;
sch_no = cio_get_console_sch_no();
@@ -728,7 +659,11 @@ struct subchannel *cio_probe_console(void)
}
init_subchannel_id(&schid);
schid.sch_no = sch_no;
- sch = css_alloc_subchannel(schid);
+ ret = stsch(schid, &schib);
+ if (ret)
+ return ERR_PTR(-ENODEV);
+
+ sch = css_alloc_subchannel(schid, &schib);
if (IS_ERR(sch))
return sch;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 94cd813bdcfe..9811fd8a0c73 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -119,7 +119,6 @@ DECLARE_PER_CPU(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
-extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
extern int cio_enable_subchannel(struct subchannel *, u32);
extern int cio_disable_subchannel (struct subchannel *);
extern int cio_cancel (struct subchannel *);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 9263a0fb3858..aea502922646 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -25,6 +25,7 @@
#include "css.h"
#include "cio.h"
+#include "blacklist.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
@@ -168,18 +169,53 @@ static void css_subchannel_release(struct device *dev)
kfree(sch);
}
-struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
+static int css_validate_subchannel(struct subchannel_id schid,
+ struct schib *schib)
+{
+ int err;
+
+ switch (schib->pmcw.st) {
+ case SUBCHANNEL_TYPE_IO:
+ case SUBCHANNEL_TYPE_MSG:
+ if (!css_sch_is_valid(schib))
+ err = -ENODEV;
+ else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
+ CIO_MSG_EVENT(6, "Blacklisted device detected "
+ "at devno %04X, subchannel set %x\n",
+ schib->pmcw.dev, schid.ssid);
+ err = -ENODEV;
+ } else
+ err = 0;
+ break;
+ default:
+ err = 0;
+ }
+ if (err)
+ goto out;
+
+ CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
+ schid.ssid, schid.sch_no, schib->pmcw.st);
+out:
+ return err;
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
+ struct schib *schib)
{
struct subchannel *sch;
int ret;
+ ret = css_validate_subchannel(schid, schib);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
if (!sch)
return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel(sch, schid);
- if (ret < 0)
- goto err;
+ sch->schid = schid;
+ sch->schib = *schib;
+ sch->st = schib->pmcw.st;
ret = css_sch_create_locks(sch);
if (ret)
@@ -244,8 +280,7 @@ static void ssd_register_chpids(struct chsc_ssd_info *ssd)
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd->path_mask & mask)
- if (!chp_is_registered(ssd->chpid[i]))
- chp_new(ssd->chpid[i]);
+ chp_new(ssd->chpid[i]);
}
}
@@ -382,12 +417,12 @@ int css_register_subchannel(struct subchannel *sch)
return ret;
}
-static int css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid, struct schib *schib)
{
struct subchannel *sch;
int ret;
- sch = css_alloc_subchannel(schid);
+ sch = css_alloc_subchannel(schid, schib);
if (IS_ERR(sch))
return PTR_ERR(sch);
@@ -436,23 +471,23 @@ EXPORT_SYMBOL_GPL(css_sch_is_valid);
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
struct schib schib;
+ int ccode;
if (!slow) {
/* Will be done on the slow path. */
return -EAGAIN;
}
- if (stsch(schid, &schib)) {
- /* Subchannel is not provided. */
- return -ENXIO;
- }
- if (!css_sch_is_valid(&schib)) {
- /* Unusable - ignore. */
- return 0;
- }
- CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
- schid.sch_no);
+ /*
+ * The first subchannel that is not-operational (ccode==3)
+ * indicates that there aren't any more devices available.
+ * If stsch gets an exception, it means the current subchannel set
+ * is not valid.
+ */
+ ccode = stsch(schid, &schib);
+ if (ccode)
+ return (ccode == 3) ? -ENXIO : ccode;
- return css_probe_device(schid);
+ return css_probe_device(schid, &schib);
}
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
@@ -1081,6 +1116,11 @@ static int __init channel_subsystem_init(void)
if (ret)
goto out_wq;
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
+ /* Start initial subchannel evaluation. */
+ css_schedule_eval_all();
+
return ret;
out_wq:
destroy_workqueue(cio_work_q);
@@ -1120,10 +1160,6 @@ int css_complete_work(void)
*/
static int __init channel_subsystem_init_sync(void)
{
- /* Register subchannels which are already in use. */
- cio_register_early_subchannels();
- /* Start initial subchannel evaluation. */
- css_schedule_eval_all();
css_complete_work();
return 0;
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 30357cbf350a..8d832900a63d 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -103,7 +103,8 @@ extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
extern int css_register_subchannel(struct subchannel *);
-extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id,
+ struct schib *schib);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int max_ssid;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index f4ca72dd862f..9c7d9da42ba0 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -631,21 +631,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
unsigned long phys_aob = 0;
if (!q->use_cq)
- goto out;
+ return 0;
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
}
-out:
+ q->sbal_state[bufnr].flags = 0;
return phys_aob;
}
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 1f8d1c1e566d..0ebb29b6fd6d 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -30,6 +30,17 @@ DECLARE_EVENT_CLASS(s390_class_schib,
__field(u16, schno)
__field(u16, devno)
__field_struct(struct schib, schib)
+ __field(u8, pmcw_ena)
+ __field(u8, pmcw_st)
+ __field(u8, pmcw_dnv)
+ __field(u16, pmcw_dev)
+ __field(u8, pmcw_lpm)
+ __field(u8, pmcw_pnom)
+ __field(u8, pmcw_lpum)
+ __field(u8, pmcw_pim)
+ __field(u8, pmcw_pam)
+ __field(u8, pmcw_pom)
+ __field(u64, pmcw_chpid)
__field(int, cc)
),
TP_fast_assign(
@@ -38,18 +49,29 @@ DECLARE_EVENT_CLASS(s390_class_schib,
__entry->schno = schid.sch_no;
__entry->devno = schib->pmcw.dev;
__entry->schib = *schib;
+ __entry->pmcw_ena = schib->pmcw.ena;
+ __entry->pmcw_st = schib->pmcw.ena;
+ __entry->pmcw_dnv = schib->pmcw.dnv;
+ __entry->pmcw_dev = schib->pmcw.dev;
+ __entry->pmcw_lpm = schib->pmcw.lpm;
+ __entry->pmcw_pnom = schib->pmcw.pnom;
+ __entry->pmcw_lpum = schib->pmcw.lpum;
+ __entry->pmcw_pim = schib->pmcw.pim;
+ __entry->pmcw_pam = schib->pmcw.pam;
+ __entry->pmcw_pom = schib->pmcw.pom;
+ memcpy(&__entry->pmcw_chpid, &schib->pmcw.chpid, 8);
__entry->cc = cc;
),
TP_printk("schid=%x.%x.%04x cc=%d ena=%d st=%d dnv=%d dev=%04x "
"lpm=0x%02x pnom=0x%02x lpum=0x%02x pim=0x%02x pam=0x%02x "
"pom=0x%02x chpids=%016llx",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- __entry->schib.pmcw.ena, __entry->schib.pmcw.st,
- __entry->schib.pmcw.dnv, __entry->schib.pmcw.dev,
- __entry->schib.pmcw.lpm, __entry->schib.pmcw.pnom,
- __entry->schib.pmcw.lpum, __entry->schib.pmcw.pim,
- __entry->schib.pmcw.pam, __entry->schib.pmcw.pom,
- *((u64 *) __entry->schib.pmcw.chpid)
+ __entry->pmcw_ena, __entry->pmcw_st,
+ __entry->pmcw_dnv, __entry->pmcw_dev,
+ __entry->pmcw_lpm, __entry->pmcw_pnom,
+ __entry->pmcw_lpum, __entry->pmcw_pim,
+ __entry->pmcw_pam, __entry->pmcw_pom,
+ __entry->pmcw_chpid
)
);
@@ -89,6 +111,13 @@ TRACE_EVENT(s390_cio_tsch,
__field(u8, ssid)
__field(u16, schno)
__field_struct(struct irb, irb)
+ __field(u8, scsw_dcc)
+ __field(u8, scsw_pno)
+ __field(u8, scsw_fctl)
+ __field(u8, scsw_actl)
+ __field(u8, scsw_stctl)
+ __field(u8, scsw_dstat)
+ __field(u8, scsw_cstat)
__field(int, cc)
),
TP_fast_assign(
@@ -96,15 +125,22 @@ TRACE_EVENT(s390_cio_tsch,
__entry->ssid = schid.ssid;
__entry->schno = schid.sch_no;
__entry->irb = *irb;
+ __entry->scsw_dcc = scsw_cc(&irb->scsw);
+ __entry->scsw_pno = scsw_pno(&irb->scsw);
+ __entry->scsw_fctl = scsw_fctl(&irb->scsw);
+ __entry->scsw_actl = scsw_actl(&irb->scsw);
+ __entry->scsw_stctl = scsw_stctl(&irb->scsw);
+ __entry->scsw_dstat = scsw_dstat(&irb->scsw);
+ __entry->scsw_cstat = scsw_cstat(&irb->scsw);
__entry->cc = cc;
),
TP_printk("schid=%x.%x.%04x cc=%d dcc=%d pno=%d fctl=0x%x actl=0x%x "
"stctl=0x%x dstat=0x%x cstat=0x%x",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- scsw_cc(&__entry->irb.scsw), scsw_pno(&__entry->irb.scsw),
- scsw_fctl(&__entry->irb.scsw), scsw_actl(&__entry->irb.scsw),
- scsw_stctl(&__entry->irb.scsw),
- scsw_dstat(&__entry->irb.scsw), scsw_cstat(&__entry->irb.scsw)
+ __entry->scsw_dcc, __entry->scsw_pno,
+ __entry->scsw_fctl, __entry->scsw_actl,
+ __entry->scsw_stctl,
+ __entry->scsw_dstat, __entry->scsw_cstat
)
);
@@ -122,6 +158,9 @@ TRACE_EVENT(s390_cio_tpi,
__field(u8, cssid)
__field(u8, ssid)
__field(u16, schno)
+ __field(u8, adapter_IO)
+ __field(u8, isc)
+ __field(u8, type)
),
TP_fast_assign(
__entry->cc = cc;
@@ -136,11 +175,14 @@ TRACE_EVENT(s390_cio_tpi,
__entry->cssid = __entry->tpi_info.schid.cssid;
__entry->ssid = __entry->tpi_info.schid.ssid;
__entry->schno = __entry->tpi_info.schid.sch_no;
+ __entry->adapter_IO = __entry->tpi_info.adapter_IO;
+ __entry->isc = __entry->tpi_info.isc;
+ __entry->type = __entry->tpi_info.type;
),
TP_printk("schid=%x.%x.%04x cc=%d a=%d isc=%d type=%d",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- __entry->tpi_info.adapter_IO, __entry->tpi_info.isc,
- __entry->tpi_info.type
+ __entry->adapter_IO, __entry->isc,
+ __entry->type
)
);
@@ -299,16 +341,20 @@ TRACE_EVENT(s390_cio_interrupt,
__field(u8, cssid)
__field(u8, ssid)
__field(u16, schno)
+ __field(u8, isc)
+ __field(u8, type)
),
TP_fast_assign(
__entry->tpi_info = *tpi_info;
- __entry->cssid = __entry->tpi_info.schid.cssid;
- __entry->ssid = __entry->tpi_info.schid.ssid;
- __entry->schno = __entry->tpi_info.schid.sch_no;
+ __entry->cssid = tpi_info->schid.cssid;
+ __entry->ssid = tpi_info->schid.ssid;
+ __entry->schno = tpi_info->schid.sch_no;
+ __entry->isc = tpi_info->isc;
+ __entry->type = tpi_info->type;
),
TP_printk("schid=%x.%x.%04x isc=%d type=%d",
__entry->cssid, __entry->ssid, __entry->schno,
- __entry->tpi_info.isc, __entry->tpi_info.type
+ __entry->isc, __entry->type
)
);
@@ -321,11 +367,13 @@ TRACE_EVENT(s390_cio_adapter_int,
TP_ARGS(tpi_info),
TP_STRUCT__entry(
__field_struct(struct tpi_info, tpi_info)
+ __field(u8, isc)
),
TP_fast_assign(
__entry->tpi_info = *tpi_info;
+ __entry->isc = tpi_info->isc;
),
- TP_printk("isc=%d", __entry->tpi_info.isc)
+ TP_printk("isc=%d", __entry->isc)
);
/**
@@ -339,16 +387,30 @@ TRACE_EVENT(s390_cio_stcrw,
TP_STRUCT__entry(
__field_struct(struct crw, crw)
__field(int, cc)
+ __field(u8, slct)
+ __field(u8, oflw)
+ __field(u8, chn)
+ __field(u8, rsc)
+ __field(u8, anc)
+ __field(u8, erc)
+ __field(u16, rsid)
),
TP_fast_assign(
__entry->crw = *crw;
__entry->cc = cc;
+ __entry->slct = crw->slct;
+ __entry->oflw = crw->oflw;
+ __entry->chn = crw->chn;
+ __entry->rsc = crw->rsc;
+ __entry->anc = crw->anc;
+ __entry->erc = crw->erc;
+ __entry->rsid = crw->rsid;
),
TP_printk("cc=%d slct=%d oflw=%d chn=%d rsc=%d anc=%d erc=0x%x "
"rsid=0x%x",
- __entry->cc, __entry->crw.slct, __entry->crw.oflw,
- __entry->crw.chn, __entry->crw.rsc, __entry->crw.anc,
- __entry->crw.erc, __entry->crw.rsid
+ __entry->cc, __entry->slct, __entry->oflw,
+ __entry->chn, __entry->rsc, __entry->anc,
+ __entry->erc, __entry->rsid
)
);
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
deleted file mode 100644
index 16b59ce5e01d..000000000000
--- a/drivers/s390/crypto/ap_asm.h
+++ /dev/null
@@ -1,236 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 2016
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * Adjunct processor bus inline assemblies.
- */
-
-#ifndef _AP_ASM_H_
-#define _AP_ASM_H_
-
-#include <asm/isc.h>
-
-/**
- * ap_intructions_available() - Test if AP instructions are available.
- *
- * Returns 0 if the AP instructions are installed.
- */
-static inline int ap_instructions_available(void)
-{
- register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
- register unsigned long reg1 asm ("1") = -ENODEV;
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- " .long 0xb2af0000\n" /* PQAP(TAPQ) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
- return reg1;
-}
-
-/**
- * ap_tapq(): Test adjunct processor queue.
- * @qid: The AP queue number
- * @info: Pointer to queue descriptor
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
-{
- register unsigned long reg0 asm ("0") = qid;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- if (info)
- *info = reg2;
- return reg1;
-}
-
-/**
- * ap_pqap_rapq(): Reset adjunct processor queue.
- * @qid: The AP queue number
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
-{
- register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(RAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- return reg1;
-}
-
-/**
- * ap_aqic(): Control interruption for a specific AP.
- * @qid: The AP queue number
- * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- */
-static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind)
-{
- register unsigned long reg0 asm ("0") = qid | (3UL << 24);
- register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
- register struct ap_queue_status reg1_out asm ("1");
- register void *reg2 asm ("2") = ind;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(AQIC) */
- : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
- :
- : "cc");
- return reg1_out;
-}
-
-/**
- * ap_qci(): Get AP configuration data
- *
- * Returns 0 on success, or -EOPNOTSUPP.
- */
-static inline int ap_qci(void *config)
-{
- register unsigned long reg0 asm ("0") = 0x04000000UL;
- register unsigned long reg1 asm ("1") = -EINVAL;
- register void *reg2 asm ("2") = (void *) config;
-
- asm volatile(
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2)
- :
- : "cc", "memory");
-
- return reg1;
-}
-
-/*
- * union ap_qact_ap_info - used together with the
- * ap_aqic() function to provide a convenient way
- * to handle the ap info needed by the qact function.
- */
-union ap_qact_ap_info {
- unsigned long val;
- struct {
- unsigned int : 3;
- unsigned int mode : 3;
- unsigned int : 26;
- unsigned int cat : 8;
- unsigned int : 8;
- unsigned char ver[2];
- };
-};
-
-/**
- * ap_qact(): Query AP combatibility type.
- * @qid: The AP queue number
- * @apinfo: On input the info about the AP queue. On output the
- * alternate AP queue info provided by the qact function
- * in GR2 is stored in.
- *
- * Returns AP queue status. Check response_code field for failures.
- */
-static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
- union ap_qact_ap_info *apinfo)
-{
- register unsigned long reg0 asm ("0") = qid | (5UL << 24)
- | ((ifbit & 0x01) << 22);
- register unsigned long reg1_in asm ("1") = apinfo->val;
- register struct ap_queue_status reg1_out asm ("1");
- register unsigned long reg2 asm ("2") = 0;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(QACT) */
- : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
- : : "cc");
- apinfo->val = reg2;
- return reg1_out;
-}
-
-/**
- * ap_nqap(): Send message to adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: The program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on NQAP can't happen because the L bit is 1.
- * Condition code 2 on NQAP also means the send is incomplete,
- * because a segment boundary was reached. The NQAP is repeated.
- */
-static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
- unsigned long long psmid,
- void *msg, size_t length)
-{
- register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = (unsigned long) msg;
- register unsigned long reg3 asm ("3") = (unsigned long) length;
- register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
- asm volatile (
- "0: .long 0xb2ad0042\n" /* NQAP */
- " brc 2,0b"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5)
- : "cc", "memory");
- return reg1;
-}
-
-/**
- * ap_dqap(): Receive message from adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on DQAP means the receive has taken place
- * but only partially. The response is incomplete, hence the
- * DQAP is repeated.
- * Condition code 2 on DQAP also means the receive is incomplete,
- * this time because a segment boundary was reached. Again, the
- * DQAP is repeated.
- * Note that gpr2 is used by the DQAP instruction to keep track of
- * any 'residual' length, in case the instruction gets interrupted.
- * Hence it gets zeroed before the instruction.
- */
-static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
- unsigned long long *psmid,
- void *msg, size_t length)
-{
- register unsigned long reg0 asm("0") = qid | 0x80000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm("2") = 0UL;
- register unsigned long reg4 asm("4") = (unsigned long) msg;
- register unsigned long reg5 asm("5") = (unsigned long) length;
- register unsigned long reg6 asm("6") = 0UL;
- register unsigned long reg7 asm("7") = 0UL;
-
-
- asm volatile(
- "0: .long 0xb2ae0064\n" /* DQAP */
- " brc 6,0b\n"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2),
- "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
- : : "cc", "memory");
- *psmid = (((unsigned long long) reg6) << 32) + reg7;
- return reg1;
-}
-
-#endif /* _AP_ASM_H_ */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 35a0c2b52f82..bf27fc4d1335 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -36,7 +36,6 @@
#include <linux/debugfs.h>
#include "ap_bus.h"
-#include "ap_asm.h"
#include "ap_debug.h"
/*
@@ -174,24 +173,6 @@ static inline int ap_qact_available(void)
return 0;
}
-/**
- * ap_test_queue(): Test adjunct processor queue.
- * @qid: The AP queue number
- * @tbit: Test facilities bit
- * @info: Pointer to queue descriptor
- *
- * Returns AP queue status structure.
- */
-struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info)
-{
- if (tbit)
- qid |= 1UL << 23; /* set T bit*/
- return ap_tapq(qid, info);
-}
-EXPORT_SYMBOL(ap_test_queue);
-
/*
* ap_query_configuration(): Fetch cryptographic config info
*
@@ -200,7 +181,7 @@ EXPORT_SYMBOL(ap_test_queue);
* is returned, e.g. if the PQAP(QCI) instruction is not
* available, the return value will be -EOPNOTSUPP.
*/
-int ap_query_configuration(struct ap_config_info *info)
+static inline int ap_query_configuration(struct ap_config_info *info)
{
if (!ap_configuration_available())
return -EOPNOTSUPP;
@@ -493,7 +474,7 @@ static int ap_poll_thread_start(void)
return 0;
mutex_lock(&ap_poll_thread_mutex);
ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
- rc = PTR_RET(ap_poll_kthread);
+ rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
if (rc)
ap_poll_kthread = NULL;
mutex_unlock(&ap_poll_thread_mutex);
@@ -1261,7 +1242,7 @@ static int __init ap_module_init(void)
/* Create /sys/devices/ap. */
ap_root_device = root_device_register("ap");
- rc = PTR_RET(ap_root_device);
+ rc = PTR_ERR_OR_ZERO(ap_root_device);
if (rc)
goto out_bus;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6a273c5ebca5..936541937e15 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/types.h>
+#include <asm/isc.h>
#include <asm/ap.h>
#define AP_DEVICES 256 /* Number of AP devices. */
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 2c726df210f6..c13e43292cb7 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -14,7 +14,6 @@
#include <asm/facility.h>
#include "ap_bus.h"
-#include "ap_asm.h"
/*
* AP card related attributes.
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba3a2e13b0eb..e365171fe28f 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -14,26 +14,6 @@
#include <asm/facility.h>
#include "ap_bus.h"
-#include "ap_asm.h"
-
-/**
- * ap_queue_irq_ctrl(): Control interruption on a AP queue.
- * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- *
- * Control interruption on the given AP queue.
- * Just a simple wrapper function for the low level PQAP(AQIC)
- * instruction available for other kernel modules.
- */
-struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind)
-{
- return ap_aqic(qid, qirqctrl, ind);
-}
-EXPORT_SYMBOL(ap_queue_irq_ctrl);
/**
* ap_queue_enable_interruption(): Enable interruption on an AP queue.
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 3929c8be8098..e663432395c1 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -699,7 +699,7 @@ static int query_crypto_facility(u16 cardnr, u16 domain,
/* fill request cprb param block with FQ request */
preqparm = (struct fqreqparm *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "FQ", 2);
- strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+ memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
preqparm->lv1.len = sizeof(preqparm->lv1);
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 233e1e695208..da2c8dfd4d74 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -83,9 +83,21 @@ static ssize_t zcrypt_card_online_store(struct device *dev,
static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
zcrypt_card_online_store);
+static ssize_t zcrypt_card_load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+}
+
+static DEVICE_ATTR(load, 0444, zcrypt_card_load_show, NULL);
+
static struct attribute *zcrypt_card_attrs[] = {
&dev_attr_type.attr,
&dev_attr_online.attr,
+ &dev_attr_load.attr,
NULL,
};
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 011d61d8a4ae..1752622b95f7 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -99,7 +99,7 @@ struct cca_pvt_ext_CRT_sec {
* @mex: pointer to user input data
* @p: pointer to memory area for the key
*
- * Returns the size of the key area or -EFAULT
+ * Returns the size of the key area or negative errno value.
*/
static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
{
@@ -118,6 +118,15 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
unsigned char *temp;
int i;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_modexpo(). However, do a plausibility check
+ * here to make sure the following copy_from_user() can't be utilized
+ * to compromise the system.
+ */
+ if (WARN_ON_ONCE(mex->inputdatalength > 512))
+ return -EINVAL;
+
memset(key, 0, sizeof(*key));
key->pubHdr = static_pub_hdr;
@@ -178,6 +187,15 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
struct cca_public_sec *pub;
int short_len, long_len, pad_len, key_len, size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_crt(). However, do a plausibility check
+ * here to make sure the following copy_from_user() can't be utilized
+ * to compromise the system.
+ */
+ if (WARN_ON_ONCE(crt->inputdatalength > 512))
+ return -EINVAL;
+
memset(key, 0, sizeof(*key));
short_len = (crt->inputdatalength + 1) / 2;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 97d4bacbc442..e70ae078c86b 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -246,7 +246,7 @@ int speed_idx_ep11(int req_type)
* @ap_msg: pointer to AP message
* @mex: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or negative errno value.
*/
static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
@@ -272,6 +272,14 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
} __packed * msg = ap_msg->message;
int size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_modexpo(). However, make sure the following
+ * copy_from_user() never exceeds the allocated buffer space.
+ */
+ if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE))
+ return -EINVAL;
+
/* VUD.ciphertext */
msg->length = mex->inputdatalength + 2;
if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
@@ -307,7 +315,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
* @ap_msg: pointer to AP message
* @crt: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or negative errno value.
*/
static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
@@ -334,6 +342,14 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
} __packed * msg = ap_msg->message;
int size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_crt(). However, make sure the following
+ * copy_from_user() never exceeds the allocated buffer space.
+ */
+ if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE))
+ return -EINVAL;
+
/* VUD.ciphertext */
msg->length = crt->inputdatalength + 2;
if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 720434e18007..91a52f268353 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -75,8 +75,20 @@ static ssize_t zcrypt_queue_online_store(struct device *dev,
static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
zcrypt_queue_online_store);
+static ssize_t zcrypt_queue_load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+}
+
+static DEVICE_ATTR(load, 0444, zcrypt_queue_load_show, NULL);
+
static struct attribute *zcrypt_queue_attrs[] = {
&dev_attr_online.attr,
+ &dev_attr_load.attr,
NULL,
};
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index a3a8c8d9d717..94f4d8fe85e0 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -101,7 +101,7 @@ static void __init zfcp_init_device_setup(char *devstr)
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
- strncpy(busid, token, ZFCP_BUS_ID_SIZE);
+ strlcpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 497a68389461..a43d44e7e7dd 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -88,10 +88,8 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
- struct inode *inode = NULL;
+ struct inode *inode;
int rc;
if (fops->owner && !try_module_get(fops->owner)) {
@@ -116,29 +114,15 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
goto err3;
}
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this);
- if (!path.dentry) {
- dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__);
- rc = -ENOMEM;
- goto err4;
- }
-
- path.mnt = mntget(ocxlflash_vfs_mount);
- d_instantiate(path.dentry, inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
if (IS_ERR(file)) {
rc = PTR_ERR(file);
dev_err(dev, "%s: alloc_file failed rc=%d\n",
__func__, rc);
- path_put(&path);
- goto err3;
+ goto err4;
}
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
out:
return file;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index ea23c8dffc25..ffec695e0bfb 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
case ELS_LOGO:
if (fip->mode == FIP_MODE_VN2VN) {
if (fip->state != FIP_ST_VNMP_UP)
- return -EINVAL;
+ goto drop;
if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
- return -EINVAL;
+ goto drop;
} else {
if (fip->state != FIP_ST_ENABLED)
return 0;
@@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
fip->send(fip, skb);
return -EINPROGRESS;
drop:
- kfree_skb(skb);
LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
op, ntoh24(fh->fh_d_id));
+ kfree_skb(skb);
return -EINVAL;
}
EXPORT_SYMBOL(fcoe_ctlr_els_send);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 31d31aad3de1..89b1f1af2fd4 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
fc_rport_state(rdata));
+ rdata->flags &= ~FC_RP_STARTED;
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 569392d0d4c9..e44c91edf92d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3343,11 +3343,10 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
spinlock_t *writeq_lock)
{
unsigned long flags;
- __u64 data_out = b;
spin_lock_irqsave(writeq_lock, flags);
- writel((u32)(data_out), addr);
- writel((u32)(data_out >> 32), (addr + 4));
+ __raw_writel((u32)(b), addr);
+ __raw_writel((u32)(b >> 32), (addr + 4));
mmiowb();
spin_unlock_irqrestore(writeq_lock, flags);
}
@@ -3367,7 +3366,8 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
static inline void
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
{
- writeq(b, addr);
+ __raw_writeq(b, addr);
+ mmiowb();
}
#else
static inline void
@@ -5268,7 +5268,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* send message 32-bits at a time */
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
- writel((u32)(request[i]), &ioc->chip->Doorbell);
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
if ((_base_wait_for_doorbell_ack(ioc, 5)))
failed = 1;
}
@@ -5289,7 +5289,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = (u16)(readl(&ioc->chip->Doorbell)
+ reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5298,7 +5298,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
ioc->name, __LINE__);
return -EFAULT;
}
- reply[1] = (u16)(readl(&ioc->chip->Doorbell)
+ reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
@@ -5312,7 +5312,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
if (i >= reply_bytes/2) /* overflow case */
readl(&ioc->chip->Doorbell);
else
- reply[i] = (u16)(readl(&ioc->chip->Doorbell)
+ reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 091ec1207bea..cff83b9457f7 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -888,7 +888,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
ipv6_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
- snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+ snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
block->target[index].target_name.byte);
tgt->ipv6_en = ipv6_en;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a91cca52b5d5..dd93a22fe843 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2130,34 +2130,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
req_cnt = 1;
handle = 0;
- if (!sp)
- goto skip_cmd_array;
-
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds) {
- ql_log(ql_log_warn, vha, 0x700b,
- "No room on outstanding cmd array.\n");
- goto queuing_error;
- }
-
- /* Prep command array. */
- req->current_outstanding_cmd = handle;
- req->outstanding_cmds[handle] = sp;
- sp->handle = handle;
-
- /* Adjust entry-counts as needed. */
- if (sp->type != SRB_SCSI_CMD)
+ if (sp && (sp->type != SRB_SCSI_CMD)) {
+ /* Adjust entry-counts as needed. */
req_cnt = sp->iocbs;
+ }
-skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
if (qpair->use_shadow_reg)
@@ -2183,6 +2160,28 @@ skip_cmd_array:
if (req->cnt < req_cnt + 2)
goto queuing_error;
+ if (sp) {
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds) {
+ ql_log(ql_log_warn, vha, 0x700b,
+ "No room on outstanding cmd array.\n");
+ goto queuing_error;
+ }
+
+ /* Prep command array. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ }
+
/* Prep packet */
req->cnt -= req_cnt;
pkt = req->ring_ptr;
@@ -2195,6 +2194,8 @@ skip_cmd_array:
pkt->handle = handle;
}
+ return pkt;
+
queuing_error:
qpair->tgt_counters.num_alloc_iocb_failed++;
return pkt;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 3f3cb72e0c0c..d0389b20574d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
struct scsi_cd *cd;
+ struct scsi_device *sdev;
int ret = -ENXIO;
+ cd = scsi_cd_get(bdev->bd_disk);
+ if (!cd)
+ goto out;
+
+ sdev = cd->device;
+ scsi_autopm_get_device(sdev);
check_disk_change(bdev);
mutex_lock(&sr_mutex);
- cd = scsi_cd_get(bdev->bd_disk);
- if (cd) {
- ret = cdrom_open(&cd->cdi, bdev, mode);
- if (ret)
- scsi_cd_put(cd);
- }
+ ret = cdrom_open(&cd->cdi, bdev, mode);
mutex_unlock(&sr_mutex);
+
+ scsi_autopm_put_device(sdev);
+ if (ret)
+ scsi_cd_put(cd);
+
+out:
return ret;
}
@@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (ret)
goto out;
+ scsi_autopm_get_device(sdev);
+
/*
* Send SCSI addressing ioctls directly to mid level, send other
* ioctls to cdrom/block level.
@@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
ret = scsi_ioctl(sdev, cmd, argp);
- goto out;
+ goto put;
}
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
- goto out;
+ goto put;
ret = scsi_ioctl(sdev, cmd, argp);
+put:
+ scsi_autopm_put_device(sdev);
+
out:
mutex_unlock(&sr_mutex);
return ret;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 777e5f1e52d1..0cd947f78b5b 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
(btstat == BTSTAT_SUCCESS ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
- cmd->result = (DID_OK << 16) | sdstat;
- if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
- cmd->result |= (DRIVER_SENSE << 24);
+ if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
+ cmd->result = (DID_RESET << 16);
+ } else {
+ cmd->result = (DID_OK << 16) | sdstat;
+ if (sdstat == SAM_STAT_CHECK_CONDITION &&
+ cmd->sense_buffer)
+ cmd->result |= (DRIVER_SENSE << 24);
+ }
} else
switch (btstat) {
case BTSTAT_SUCCESS:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index a502f1af4a21..ed3114556fda 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1560,9 +1560,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
d->iotlb = niotlb;
for (i = 0; i < d->nvqs; ++i) {
- mutex_lock(&d->vqs[i]->mutex);
- d->vqs[i]->iotlb = niotlb;
- mutex_unlock(&d->vqs[i]->mutex);
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->iotlb = niotlb;
+ __vhost_vq_meta_reset(vq);
+ mutex_unlock(&vq->mutex);
}
vhost_umem_clean(oiotlb);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 46a4484e3da7..c6f78d27947b 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -20,7 +20,7 @@
#include <drm/drm_connector.h> /* For DRM_MODE_PANEL_ORIENTATION_* */
static bool request_mem_succeeded = false;
-static bool nowc = false;
+static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
@@ -68,8 +68,12 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
static void efifb_destroy(struct fb_info *info)
{
- if (info->screen_base)
- iounmap(info->screen_base);
+ if (info->screen_base) {
+ if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+ iounmap(info->screen_base);
+ else
+ memunmap(info->screen_base);
+ }
if (request_mem_succeeded)
release_mem_region(info->apertures->ranges[0].base,
info->apertures->ranges[0].size);
@@ -104,7 +108,7 @@ static int efifb_setup(char *options)
else if (!strncmp(this_opt, "width:", 6))
screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
else if (!strcmp(this_opt, "nowc"))
- nowc = true;
+ mem_flags &= ~EFI_MEMORY_WC;
}
}
@@ -164,6 +168,7 @@ static int efifb_probe(struct platform_device *dev)
unsigned int size_remap;
unsigned int size_total;
char *option = NULL;
+ efi_memory_desc_t md;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
return -ENODEV;
@@ -272,12 +277,35 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- if (nowc)
- info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
- else
- info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+ if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+ if ((efifb_fix.smem_start + efifb_fix.smem_len) >
+ (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
+ pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
+ efifb_fix.smem_start);
+ err = -EIO;
+ goto err_release_fb;
+ }
+ /*
+ * If the UEFI memory map covers the efifb region, we may only
+ * remap it using the attributes the memory map prescribes.
+ */
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= md.attribute;
+ }
+ if (mem_flags & EFI_MEMORY_WC)
+ info->screen_base = ioremap_wc(efifb_fix.smem_start,
+ efifb_fix.smem_len);
+ else if (mem_flags & EFI_MEMORY_UC)
+ info->screen_base = ioremap(efifb_fix.smem_start,
+ efifb_fix.smem_len);
+ else if (mem_flags & EFI_MEMORY_WT)
+ info->screen_base = memremap(efifb_fix.smem_start,
+ efifb_fix.smem_len, MEMREMAP_WT);
+ else if (mem_flags & EFI_MEMORY_WB)
+ info->screen_base = memremap(efifb_fix.smem_start,
+ efifb_fix.smem_len, MEMREMAP_WB);
if (!info->screen_base) {
- pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+ pr_err("efifb: abort, cannot remap video memory 0x%x @ 0x%lx\n",
efifb_fix.smem_len, efifb_fix.smem_start);
err = -EIO;
goto err_release_fb;
@@ -371,7 +399,10 @@ err_fb_dealoc:
err_groups:
sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
err_unmap:
- iounmap(info->screen_base);
+ if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+ iounmap(info->screen_base);
+ else
+ memunmap(info->screen_base);
err_release_fb:
framebuffer_release(info);
err_release_mem:
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 42e102e2e74a..85ff859d3af5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -859,8 +859,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
static int
v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened)
+ struct file *file, unsigned flags, umode_t mode)
{
int err;
u32 perm;
@@ -917,7 +916,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
v9inode->writeback_fid = (void *) inode_fid;
}
mutex_unlock(&v9inode->v_mutex);
- err = finish_open(file, dentry, generic_file_open, opened);
+ err = finish_open(file, dentry, generic_file_open);
if (err)
goto error;
@@ -925,7 +924,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(d_inode(dentry), file);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
out:
dput(res);
return err;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 7f6ae21a27b3..4823e1c46999 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -241,8 +241,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
static int
v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t omode,
- int *opened)
+ struct file *file, unsigned flags, umode_t omode)
{
int err = 0;
kgid_t gid;
@@ -352,13 +351,13 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
}
mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
- err = finish_open(file, dentry, generic_file_open, opened);
+ err = finish_open(file, dentry, generic_file_open);
if (err)
goto err_clunk_old_fid;
file->private_data = ofid;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(inode, file);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
out:
v9fs_put_acl(dacl, pacl);
dput(res);
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index c836c425ca94..e91028d4340a 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -287,7 +287,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->mmu_private = inode->i_size;
}
- insert_inode_hash(inode);
+ inode_fake_hash(inode);
out:
return inode;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 71fa525d63a0..7e099a7a4eb1 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -291,6 +291,7 @@ static void destroy_inodecache(void)
static const struct super_operations adfs_sops = {
.alloc_inode = adfs_alloc_inode,
.destroy_inode = adfs_destroy_inode,
+ .drop_inode = generic_delete_inode,
.write_inode = adfs_write_inode,
.put_super = adfs_put_super,
.statfs = adfs_statfs,
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 7d623008157f..855bf2b79fed 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -822,6 +822,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct inode *inode;
+ struct dentry *d;
struct key *key;
int ret;
@@ -862,43 +863,17 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
afs_stat_v(dvnode, n_lookup);
inode = afs_do_lookup(dir, dentry, key);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- if (ret == -ENOENT) {
- inode = afs_try_auto_mntpt(dentry, dir);
- if (!IS_ERR(inode)) {
- key_put(key);
- goto success;
- }
-
- ret = PTR_ERR(inode);
- }
-
- key_put(key);
- if (ret == -ENOENT) {
- d_add(dentry, NULL);
- _leave(" = NULL [negative]");
- return NULL;
- }
- _leave(" = %d [do]", ret);
- return ERR_PTR(ret);
- }
- dentry->d_fsdata = (void *)(unsigned long)dvnode->status.data_version;
-
- /* instantiate the dentry */
key_put(key);
- if (IS_ERR(inode)) {
- _leave(" = %ld", PTR_ERR(inode));
- return ERR_CAST(inode);
+ if (inode == ERR_PTR(-ENOENT)) {
+ inode = afs_try_auto_mntpt(dentry, dir);
+ } else {
+ dentry->d_fsdata =
+ (void *)(unsigned long)dvnode->status.data_version;
}
-
-success:
- d_add(dentry, inode);
- _leave(" = 0 { ino=%lu v=%u }",
- d_inode(dentry)->i_ino,
- d_inode(dentry)->i_generation);
-
- return NULL;
+ d = d_splice_alias(inode, dentry);
+ if (!IS_ERR_OR_NULL(d))
+ d->d_fsdata = dentry->d_fsdata;
+ return d;
}
/*
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 174e843f0633..1cde710a8013 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -83,7 +83,7 @@ struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
out:
_leave("= %d", ret);
- return ERR_PTR(ret);
+ return ret == -ENOENT ? NULL : ERR_PTR(ret);
}
/*
@@ -141,12 +141,6 @@ out_p:
static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct afs_vnode *vnode;
- struct inode *inode;
- int ret;
-
- vnode = AFS_FS_I(dir);
-
_enter("%pd", dentry);
ASSERTCMP(d_inode(dentry), ==, NULL);
@@ -160,22 +154,7 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
memcmp(dentry->d_name.name, "@cell", 5) == 0)
return afs_lookup_atcell(dentry);
- inode = afs_try_auto_mntpt(dentry, dir);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- if (ret == -ENOENT) {
- d_add(dentry, NULL);
- _leave(" = NULL [negative]");
- return NULL;
- }
- _leave(" = %d [do]", ret);
- return ERR_PTR(ret);
- }
-
- d_add(dentry, inode);
- _leave(" = 0 { ino=%lu v=%u }",
- d_inode(dentry)->i_ino, d_inode(dentry)->i_generation);
- return NULL;
+ return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry);
}
const struct inode_operations afs_dynroot_inode_operations = {
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a1b18082991b..183cc5418722 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -648,7 +648,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
trace_afs_notify_call(rxcall, call);
call->need_attention = true;
- u = __atomic_add_unless(&call->usage, 1, 0);
+ u = atomic_fetch_add_unless(&call->usage, 1, 0);
if (u != 0) {
trace_afs_call(call, afs_call_trace_wake, u,
atomic_read(&call->net->nr_outstanding_calls),
diff --git a/fs/aio.c b/fs/aio.c
index 27454594e37a..b9350f3360c6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,6 +5,7 @@
* Implements an efficient asynchronous io interface.
*
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright 2018 Christoph Hellwig.
*
* See ../COPYING for licensing terms.
*/
@@ -18,6 +19,7 @@
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/backing-dev.h>
+#include <linux/refcount.h>
#include <linux/uio.h>
#include <linux/sched/signal.h>
@@ -164,10 +166,21 @@ struct fsync_iocb {
bool datasync;
};
+struct poll_iocb {
+ struct file *file;
+ struct wait_queue_head *head;
+ __poll_t events;
+ bool woken;
+ bool cancelled;
+ struct wait_queue_entry wait;
+ struct work_struct work;
+};
+
struct aio_kiocb {
union {
struct kiocb rw;
struct fsync_iocb fsync;
+ struct poll_iocb poll;
};
struct kioctx *ki_ctx;
@@ -178,6 +191,7 @@ struct aio_kiocb {
struct list_head ki_list; /* the aio core uses this
* for cancellation */
+ refcount_t ki_refcnt;
/*
* If the aio_resfd field of the userspace iocb is not zero,
@@ -202,9 +216,7 @@ static const struct address_space_operations aio_ctx_aops;
static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
{
- struct qstr this = QSTR_INIT("[aio]", 5);
struct file *file;
- struct path path;
struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -213,31 +225,17 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
inode->i_mapping->private_data = ctx;
inode->i_size = PAGE_SIZE * nr_pages;
- path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
- if (!path.dentry) {
+ file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
+ O_RDWR, &aio_ring_fops);
+ if (IS_ERR(file))
iput(inode);
- return ERR_PTR(-ENOMEM);
- }
- path.mnt = mntget(aio_mnt);
-
- d_instantiate(path.dentry, inode);
- file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops);
- if (IS_ERR(file)) {
- path_put(&path);
- return file;
- }
-
- file->f_flags = O_RDWR;
return file;
}
static struct dentry *aio_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- static const struct dentry_operations ops = {
- .d_dname = simple_dname,
- };
- struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
+ struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL,
AIO_RING_MAGIC);
if (!IS_ERR(root))
@@ -1015,6 +1013,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
percpu_ref_get(&ctx->reqs);
INIT_LIST_HEAD(&req->ki_list);
+ refcount_set(&req->ki_refcnt, 0);
req->ki_ctx = ctx;
return req;
out_put:
@@ -1049,6 +1048,15 @@ out:
return ret;
}
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+ if (refcount_read(&iocb->ki_refcnt) == 0 ||
+ refcount_dec_and_test(&iocb->ki_refcnt)) {
+ percpu_ref_put(&iocb->ki_ctx->reqs);
+ kmem_cache_free(kiocb_cachep, iocb);
+ }
+}
+
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
@@ -1118,8 +1126,6 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
eventfd_ctx_put(iocb->ki_eventfd);
}
- kmem_cache_free(kiocb_cachep, iocb);
-
/*
* We have to order our ring_info tail store above and test
* of the wait list below outside the wait lock. This is
@@ -1130,8 +1136,7 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
-
- percpu_ref_put(&ctx->reqs);
+ iocb_put(iocb);
}
/* aio_read_events_ring
@@ -1592,6 +1597,182 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
return 0;
}
+static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
+{
+ struct file *file = iocb->poll.file;
+
+ aio_complete(iocb, mangle_poll(mask), 0);
+ fput(file);
+}
+
+static void aio_poll_complete_work(struct work_struct *work)
+{
+ struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+ struct poll_table_struct pt = { ._key = req->events };
+ struct kioctx *ctx = iocb->ki_ctx;
+ __poll_t mask = 0;
+
+ if (!READ_ONCE(req->cancelled))
+ mask = vfs_poll(req->file, &pt) & req->events;
+
+ /*
+ * Note that ->ki_cancel callers also delete iocb from active_reqs after
+ * calling ->ki_cancel. We need the ctx_lock roundtrip here to
+ * synchronize with them. In the cancellation case the list_del_init
+ * itself is not actually needed, but harmless so we keep it in to
+ * avoid further branches in the fast path.
+ */
+ spin_lock_irq(&ctx->ctx_lock);
+ if (!mask && !READ_ONCE(req->cancelled)) {
+ add_wait_queue(req->head, &req->wait);
+ spin_unlock_irq(&ctx->ctx_lock);
+ return;
+ }
+ list_del_init(&iocb->ki_list);
+ spin_unlock_irq(&ctx->ctx_lock);
+
+ aio_poll_complete(iocb, mask);
+}
+
+/* assumes we are called with irqs disabled */
+static int aio_poll_cancel(struct kiocb *iocb)
+{
+ struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
+ struct poll_iocb *req = &aiocb->poll;
+
+ spin_lock(&req->head->lock);
+ WRITE_ONCE(req->cancelled, true);
+ if (!list_empty(&req->wait.entry)) {
+ list_del_init(&req->wait.entry);
+ schedule_work(&aiocb->poll.work);
+ }
+ spin_unlock(&req->head->lock);
+
+ return 0;
+}
+
+static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ void *key)
+{
+ struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+ __poll_t mask = key_to_poll(key);
+
+ req->woken = true;
+
+ /* for instances that support it check for an event match first: */
+ if (mask) {
+ if (!(mask & req->events))
+ return 0;
+
+ /* try to complete the iocb inline if we can: */
+ if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+ list_del(&iocb->ki_list);
+ spin_unlock(&iocb->ki_ctx->ctx_lock);
+
+ list_del_init(&req->wait.entry);
+ aio_poll_complete(iocb, mask);
+ return 1;
+ }
+ }
+
+ list_del_init(&req->wait.entry);
+ schedule_work(&req->work);
+ return 1;
+}
+
+struct aio_poll_table {
+ struct poll_table_struct pt;
+ struct aio_kiocb *iocb;
+ int error;
+};
+
+static void
+aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
+ struct poll_table_struct *p)
+{
+ struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
+
+ /* multiple wait queues per file are not supported */
+ if (unlikely(pt->iocb->poll.head)) {
+ pt->error = -EINVAL;
+ return;
+ }
+
+ pt->error = 0;
+ pt->iocb->poll.head = head;
+ add_wait_queue(head, &pt->iocb->poll.wait);
+}
+
+static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
+{
+ struct kioctx *ctx = aiocb->ki_ctx;
+ struct poll_iocb *req = &aiocb->poll;
+ struct aio_poll_table apt;
+ __poll_t mask;
+
+ /* reject any unknown events outside the normal event mask. */
+ if ((u16)iocb->aio_buf != iocb->aio_buf)
+ return -EINVAL;
+ /* reject fields that are not defined for poll */
+ if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
+ return -EINVAL;
+
+ INIT_WORK(&req->work, aio_poll_complete_work);
+ req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
+ req->file = fget(iocb->aio_fildes);
+ if (unlikely(!req->file))
+ return -EBADF;
+
+ apt.pt._qproc = aio_poll_queue_proc;
+ apt.pt._key = req->events;
+ apt.iocb = aiocb;
+ apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
+
+ /* initialized the list so that we can do list_empty checks */
+ INIT_LIST_HEAD(&req->wait.entry);
+ init_waitqueue_func_entry(&req->wait, aio_poll_wake);
+
+ /* one for removal from waitqueue, one for this function */
+ refcount_set(&aiocb->ki_refcnt, 2);
+
+ mask = vfs_poll(req->file, &apt.pt) & req->events;
+ if (unlikely(!req->head)) {
+ /* we did not manage to set up a waitqueue, done */
+ goto out;
+ }
+
+ spin_lock_irq(&ctx->ctx_lock);
+ spin_lock(&req->head->lock);
+ if (req->woken) {
+ /* wake_up context handles the rest */
+ mask = 0;
+ apt.error = 0;
+ } else if (mask || apt.error) {
+ /* if we get an error or a mask we are done */
+ WARN_ON_ONCE(list_empty(&req->wait.entry));
+ list_del_init(&req->wait.entry);
+ } else {
+ /* actually waiting for an event */
+ list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+ aiocb->ki_cancel = aio_poll_cancel;
+ }
+ spin_unlock(&req->head->lock);
+ spin_unlock_irq(&ctx->ctx_lock);
+
+out:
+ if (unlikely(apt.error)) {
+ fput(req->file);
+ return apt.error;
+ }
+
+ if (mask)
+ aio_poll_complete(aiocb, mask);
+ iocb_put(aiocb);
+ return 0;
+}
+
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
@@ -1665,6 +1846,9 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
case IOCB_CMD_FDSYNC:
ret = aio_fsync(&req->fsync, &iocb, true);
break;
+ case IOCB_CMD_POLL:
+ ret = aio_poll(req, &iocb);
+ break;
default:
pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
ret = -EINVAL;
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 3168ee4e77f4..91262c34b797 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -71,8 +71,6 @@ struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
if (IS_ERR(anon_inode_inode))
@@ -82,39 +80,23 @@ struct file *anon_inode_getfile(const char *name,
return ERR_PTR(-ENOENT);
/*
- * Link the inode to a directory entry by creating a unique name
- * using the inode sequence number.
- */
- file = ERR_PTR(-ENOMEM);
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
- if (!path.dentry)
- goto err_module;
-
- path.mnt = mntget(anon_inode_mnt);
- /*
* We know the anon_inode inode count is always greater than zero,
* so ihold() is safe.
*/
ihold(anon_inode_inode);
-
- d_instantiate(path.dentry, anon_inode_inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ file = alloc_file_pseudo(anon_inode_inode, anon_inode_mnt, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
if (IS_ERR(file))
- goto err_dput;
+ goto err;
+
file->f_mapping = anon_inode_inode->i_mapping;
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
return file;
-err_dput:
- path_put(&path);
-err_module:
+err:
+ iput(anon_inode_inode);
module_put(fops->owner);
return file;
}
diff --git a/fs/attr.c b/fs/attr.c
index e3d53bf12240..d22e8187477f 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(setattr_prepare);
* inode_newsize_ok - may this inode be truncated to a given size
* @inode: the inode to be truncated
* @offset: the new size to assign to the inode
- * @Returns: 0 on success, -ve errno on failure
*
* inode_newsize_ok must be called with i_mutex held.
*
@@ -130,6 +129,8 @@ EXPORT_SYMBOL(setattr_prepare);
* returned. @inode must be a file (not directory), with appropriate
* permissions to allow truncate (inode_newsize_ok does NOT check these
* conditions).
+ *
+ * Return: 0 on success, -ve errno on failure
*/
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
@@ -205,7 +206,7 @@ EXPORT_SYMBOL(setattr_copy);
/**
* notify_change - modify attributes of a filesytem object
* @dentry: object affected
- * @iattr: new attributes
+ * @attr: new attributes
* @delegated_inode: returns inode, if the inode is delegated
*
* The caller must hold the i_mutex on the affected object.
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 125e8bbd22a2..8035d2a44561 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -134,7 +134,7 @@ static int bad_inode_update_time(struct inode *inode, struct timespec64 *time,
static int bad_inode_atomic_open(struct inode *inode, struct dentry *dentry,
struct file *file, unsigned int open_flag,
- umode_t create_mode, int *opened)
+ umode_t create_mode)
{
return -EIO;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 816cc921cf36..efae2fb0930a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1751,7 +1751,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
if (regset->core_note_type && regset->get &&
- (!regset->active || regset->active(t->task, regset))) {
+ (!regset->active || regset->active(t->task, regset) > 0)) {
int ret;
size_t size = regset_size(t->task, regset);
void *data = kmalloc(size, GFP_KERNEL);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 4b5fff31ef27..aa4a7a23ff99 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -205,7 +205,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
goto error;
if (fmt->flags & MISC_FMT_OPEN_FILE) {
- interp_file = filp_clone_open(fmt->interp_file);
+ interp_file = file_clone_open(fmt->interp_file);
if (!IS_ERR(interp_file))
deny_write_access(interp_file);
} else {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3f51ddc18f98..9357a19d2bff 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6295,8 +6295,10 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
location->type = BTRFS_INODE_ITEM_KEY;
ret = btrfs_insert_inode_locked(inode);
- if (ret < 0)
+ if (ret < 0) {
+ iput(inode);
goto fail;
+ }
path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
@@ -6355,12 +6357,11 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
return inode;
fail_unlock:
- unlock_new_inode(inode);
+ discard_new_inode(inode);
fail:
if (dir && name)
BTRFS_I(dir)->index_cnt--;
btrfs_free_path(path);
- iput(inode);
return ERR_PTR(ret);
}
@@ -6464,7 +6465,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int err;
- int drop_inode = 0;
u64 objectid;
u64 index = 0;
@@ -6486,6 +6486,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
@@ -6500,31 +6501,24 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
0, index);
- if (err) {
- goto out_unlock_inode;
- } else {
- btrfs_update_inode(trans, root, inode);
- d_instantiate_new(dentry, inode);
- }
+ if (err)
+ goto out_unlock;
+
+ btrfs_update_inode(trans, root, inode);
+ d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- if (drop_inode) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
return err;
-
-out_unlock_inode:
- drop_inode = 1;
- unlock_new_inode(inode);
- goto out_unlock;
-
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
@@ -6534,7 +6528,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
- int drop_inode_on_err = 0;
int err;
u64 objectid;
u64 index = 0;
@@ -6557,9 +6550,9 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
- drop_inode_on_err = 1;
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
@@ -6572,33 +6565,28 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_update_inode(trans, root, inode);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
0, index);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
- if (err && drop_inode_on_err) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_unlock_inode:
- unlock_new_inode(inode);
- goto out_unlock;
-
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6707,6 +6695,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_fail;
}
@@ -6717,34 +6706,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_fail_inode;
+ goto out_fail;
btrfs_i_size_write(BTRFS_I(inode), 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
- goto out_fail_inode;
+ goto out_fail;
err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
- goto out_fail_inode;
+ goto out_fail;
d_instantiate_new(dentry, inode);
drop_on_err = 0;
out_fail:
btrfs_end_transaction(trans);
- if (drop_on_err) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_fail_inode:
- unlock_new_inode(inode);
- goto out_fail;
}
static noinline int uncompress_inline(struct btrfs_path *path,
@@ -10071,7 +10056,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
struct btrfs_key key;
struct inode *inode = NULL;
int err;
- int drop_inode = 0;
u64 objectid;
u64 index = 0;
int name_len;
@@ -10104,6 +10088,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
objectid, S_IFLNK|S_IRWXUGO, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
@@ -10120,12 +10105,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
- goto out_unlock_inode;
+ goto out_unlock;
}
key.objectid = btrfs_ino(BTRFS_I(inode));
key.offset = 0;
@@ -10135,7 +10120,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
datasize);
if (err) {
btrfs_free_path(path);
- goto out_unlock_inode;
+ goto out_unlock;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -10167,26 +10152,19 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (!err)
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
BTRFS_I(inode), 0, index);
- if (err) {
- drop_inode = 1;
- goto out_unlock_inode;
- }
+ if (err)
+ goto out_unlock;
d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
- if (drop_inode) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_unlock_inode:
- drop_inode = 1;
- unlock_new_inode(inode);
- goto out_unlock;
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -10395,14 +10373,14 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
ret = btrfs_init_inode_security(trans, inode, dir, NULL);
if (ret)
- goto out_inode;
+ goto out;
ret = btrfs_update_inode(trans, root, inode);
if (ret)
- goto out_inode;
+ goto out;
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret)
- goto out_inode;
+ goto out;
/*
* We set number of links to 0 in btrfs_new_inode(), and here we set
@@ -10412,21 +10390,15 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
* d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
*/
set_nlink(inode, 1);
- unlock_new_inode(inode);
d_tmpfile(dentry, inode);
+ unlock_new_inode(inode);
mark_inode_dirty(inode);
-
out:
btrfs_end_transaction(trans);
- if (ret)
- iput(inode);
+ if (ret && inode)
+ discard_new_inode(inode);
btrfs_btree_balance_dirty(fs_info);
return ret;
-
-out_inode:
- unlock_new_inode(inode);
- goto out;
-
}
__attribute__((const))
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ad0bed99b1d5..e2679e8a2535 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -429,8 +429,7 @@ out:
* file or symlink, return 1 so the VFS can retry.
*/
int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened)
+ struct file *file, unsigned flags, umode_t mode)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -507,9 +506,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
ceph_init_inode_acls(d_inode(dentry), &acls);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
}
- err = finish_open(file, dentry, ceph_open, opened);
+ err = finish_open(file, dentry, ceph_open);
}
out_req:
if (!req->r_err && req->r_target_inode)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index a7077a0c989f..971328b99ede 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1025,8 +1025,7 @@ extern const struct file_operations ceph_file_fops;
extern int ceph_renew_caps(struct inode *inode);
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened);
+ struct file *file, unsigned flags, umode_t mode);
extern int ceph_release(struct inode *inode, struct file *filp);
extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 5f0231803431..f3a78efc3109 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -65,8 +65,7 @@ extern struct inode *cifs_root_iget(struct super_block *);
extern int cifs_create(struct inode *, struct dentry *, umode_t,
bool excl);
extern int cifs_atomic_open(struct inode *, struct dentry *,
- struct file *, unsigned, umode_t,
- int *);
+ struct file *, unsigned, umode_t);
extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
unsigned int);
extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ddae52bd1993..3713d22b95a7 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -465,8 +465,7 @@ out_err:
int
cifs_atomic_open(struct inode *inode, struct dentry *direntry,
- struct file *file, unsigned oflags, umode_t mode,
- int *opened)
+ struct file *file, unsigned oflags, umode_t mode)
{
int rc;
unsigned int xid;
@@ -539,9 +538,9 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
}
if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
- rc = finish_open(file, direntry, generic_file_open, opened);
+ rc = finish_open(file, direntry, generic_file_open);
if (rc) {
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
diff --git a/fs/dcache.c b/fs/dcache.c
index 0e8e5de3c48a..8d2ec4898c2b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -358,14 +358,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
__releases(dentry->d_inode->i_lock)
{
struct inode *inode = dentry->d_inode;
- bool hashed = !d_unhashed(dentry);
- if (hashed)
- raw_write_seqcount_begin(&dentry->d_seq);
+ raw_write_seqcount_begin(&dentry->d_seq);
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
- if (hashed)
- raw_write_seqcount_end(&dentry->d_seq);
+ raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
@@ -732,16 +729,16 @@ static inline bool fast_dput(struct dentry *dentry)
if (dentry->d_lockref.count > 1) {
dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
* If we weren't the last ref, we're done.
*/
if (ret)
- return 1;
+ return true;
/*
* Careful, careful. The reference count went down
@@ -770,7 +767,7 @@ static inline bool fast_dput(struct dentry *dentry)
/* Nothing to do? Dropping the reference was all we needed? */
if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
- return 1;
+ return true;
/*
* Not the fast normal case? Get the lock. We've already decremented
@@ -787,7 +784,7 @@ static inline bool fast_dput(struct dentry *dentry)
*/
if (dentry->d_lockref.count) {
spin_unlock(&dentry->d_lock);
- return 1;
+ return true;
}
/*
@@ -796,7 +793,7 @@ static inline bool fast_dput(struct dentry *dentry)
* set it to 1.
*/
dentry->d_lockref.count = 1;
- return 0;
+ return false;
}
@@ -1892,50 +1889,25 @@ void d_instantiate_new(struct dentry *entry, struct inode *inode)
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
+ inode->i_state &= ~I_NEW & ~I_CREATING;
smp_mb();
wake_up_bit(&inode->i_state, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_instantiate_new);
-/**
- * d_instantiate_no_diralias - instantiate a non-aliased dentry
- * @entry: dentry to complete
- * @inode: inode to attach to this dentry
- *
- * Fill in inode information in the entry. If a directory alias is found, then
- * return an error (and drop inode). Together with d_materialise_unique() this
- * guarantees that a directory inode may never have more than one alias.
- */
-int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
-{
- BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
-
- security_d_instantiate(entry, inode);
- spin_lock(&inode->i_lock);
- if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
- spin_unlock(&inode->i_lock);
- iput(inode);
- return -EBUSY;
- }
- __d_instantiate(entry, inode);
- spin_unlock(&inode->i_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(d_instantiate_no_diralias);
-
struct dentry *d_make_root(struct inode *root_inode)
{
struct dentry *res = NULL;
if (root_inode) {
res = d_alloc_anon(root_inode->i_sb);
- if (res)
+ if (res) {
+ res->d_flags |= DCACHE_RCUACCESS;
d_instantiate(res, root_inode);
- else
+ } else {
iput(root_inode);
+ }
}
return res;
}
@@ -2676,33 +2648,6 @@ struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
}
EXPORT_SYMBOL(d_exact_alias);
-/**
- * dentry_update_name_case - update case insensitive dentry with a new name
- * @dentry: dentry to be updated
- * @name: new name
- *
- * Update a case insensitive dentry with new case of name.
- *
- * dentry must have been returned by d_lookup with name @name. Old and new
- * name lengths must match (ie. no d_compare which allows mismatched name
- * lengths).
- *
- * Parent inode i_mutex must be held over d_lookup and into this call (to
- * keep renames and concurrent inserts, and readdir(2) away).
- */
-void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
-{
- BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
- BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
-
- spin_lock(&dentry->d_lock);
- write_seqcount_begin(&dentry->d_seq);
- memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
- write_seqcount_end(&dentry->d_seq);
- spin_unlock(&dentry->d_lock);
-}
-EXPORT_SYMBOL(dentry_update_name_case);
-
static void swap_names(struct dentry *dentry, struct dentry *target)
{
if (unlikely(dname_external(target))) {
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 71fccccf317e..8c6ab6c95727 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -86,7 +86,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
/* length of the variable name itself: remove GUID and separator */
namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
- uuid_le_to_bin(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ if (err)
+ goto out;
if (efivar_variable_is_removable(var->var.VendorGuid,
dentry->d_name.name, namelen))
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 6484199b35d1..5c3d7b7e4975 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -611,8 +611,7 @@ fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
clear_nlink(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return ERR_PTR(err);
fail:
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 152453a91877..0c26dcc5d850 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -45,8 +45,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
return 0;
}
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -192,8 +191,7 @@ out:
out_fail:
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput (inode);
+ discard_new_inode(inode);
goto out;
}
@@ -261,8 +259,7 @@ out:
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
out_dir:
inode_dec_link_count(dir);
goto out;
diff --git a/fs/file_table.c b/fs/file_table.c
index 7ec0b3e5f05d..d6eccd04d703 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -51,6 +51,7 @@ static void file_free_rcu(struct rcu_head *head)
static inline void file_free(struct file *f)
{
+ security_file_free(f);
percpu_counter_dec(&nr_files);
call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
}
@@ -100,9 +101,8 @@ int proc_nr_files(struct ctl_table *table, int write,
* done, you will imbalance int the mount's writer count
* and a warning at __fput() time.
*/
-struct file *get_empty_filp(void)
+struct file *alloc_empty_file(int flags, const struct cred *cred)
{
- const struct cred *cred = current_cred();
static long old_max;
struct file *f;
int error;
@@ -123,11 +123,10 @@ struct file *get_empty_filp(void)
if (unlikely(!f))
return ERR_PTR(-ENOMEM);
- percpu_counter_inc(&nr_files);
f->f_cred = get_cred(cred);
error = security_file_alloc(f);
if (unlikely(error)) {
- file_free(f);
+ file_free_rcu(&f->f_u.fu_rcuhead);
return ERR_PTR(error);
}
@@ -136,7 +135,10 @@ struct file *get_empty_filp(void)
spin_lock_init(&f->f_lock);
mutex_init(&f->f_pos_lock);
eventpoll_init_file(f);
+ f->f_flags = flags;
+ f->f_mode = OPEN_FMODE(flags);
/* f->f_version: 0 */
+ percpu_counter_inc(&nr_files);
return f;
over:
@@ -152,15 +154,15 @@ over:
* alloc_file - allocate and initialize a 'struct file'
*
* @path: the (dentry, vfsmount) pair for the new file
- * @mode: the mode with which the new file will be opened
+ * @flags: O_... flags with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*/
-struct file *alloc_file(const struct path *path, fmode_t mode,
+static struct file *alloc_file(const struct path *path, int flags,
const struct file_operations *fop)
{
struct file *file;
- file = get_empty_filp();
+ file = alloc_empty_file(flags, current_cred());
if (IS_ERR(file))
return file;
@@ -168,19 +170,56 @@ struct file *alloc_file(const struct path *path, fmode_t mode,
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
- if ((mode & FMODE_READ) &&
+ if ((file->f_mode & FMODE_READ) &&
likely(fop->read || fop->read_iter))
- mode |= FMODE_CAN_READ;
- if ((mode & FMODE_WRITE) &&
+ file->f_mode |= FMODE_CAN_READ;
+ if ((file->f_mode & FMODE_WRITE) &&
likely(fop->write || fop->write_iter))
- mode |= FMODE_CAN_WRITE;
- file->f_mode = mode;
+ file->f_mode |= FMODE_CAN_WRITE;
+ file->f_mode |= FMODE_OPENED;
file->f_op = fop;
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(path->dentry->d_inode);
return file;
}
-EXPORT_SYMBOL(alloc_file);
+
+struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
+ const char *name, int flags,
+ const struct file_operations *fops)
+{
+ static const struct dentry_operations anon_ops = {
+ .d_dname = simple_dname
+ };
+ struct qstr this = QSTR_INIT(name, strlen(name));
+ struct path path;
+ struct file *file;
+
+ path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
+ if (!path.dentry)
+ return ERR_PTR(-ENOMEM);
+ if (!mnt->mnt_sb->s_d_op)
+ d_set_d_op(path.dentry, &anon_ops);
+ path.mnt = mntget(mnt);
+ d_instantiate(path.dentry, inode);
+ file = alloc_file(&path, flags, fops);
+ if (IS_ERR(file)) {
+ ihold(inode);
+ path_put(&path);
+ }
+ return file;
+}
+EXPORT_SYMBOL(alloc_file_pseudo);
+
+struct file *alloc_file_clone(struct file *base, int flags,
+ const struct file_operations *fops)
+{
+ struct file *f = alloc_file(&base->f_path, flags, fops);
+ if (!IS_ERR(f)) {
+ path_get(&f->f_path);
+ f->f_mapping = base->f_mapping;
+ }
+ return f;
+}
/* the real guts of fput() - releasing the last reference to file
*/
@@ -190,6 +229,9 @@ static void __fput(struct file *file)
struct vfsmount *mnt = file->f_path.mnt;
struct inode *inode = file->f_inode;
+ if (unlikely(!(file->f_mode & FMODE_OPENED)))
+ goto out;
+
might_sleep();
fsnotify_close(file);
@@ -207,7 +249,6 @@ static void __fput(struct file *file)
}
if (file->f_op->release)
file->f_op->release(inode, file);
- security_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
!(file->f_mode & FMODE_PATH))) {
cdev_put(inode->i_cdev);
@@ -220,12 +261,10 @@ static void __fput(struct file *file)
put_write_access(inode);
__mnt_drop_write(mnt);
}
- file->f_path.dentry = NULL;
- file->f_path.mnt = NULL;
- file->f_inode = NULL;
- file_free(file);
dput(dentry);
mntput(mnt);
+out:
+ file_free(file);
}
static LLIST_HEAD(delayed_fput_list);
@@ -300,14 +339,6 @@ void __fput_sync(struct file *file)
EXPORT_SYMBOL(fput);
-void put_filp(struct file *file)
-{
- if (atomic_long_dec_and_test(&file->f_count)) {
- security_file_free(file);
- file_free(file);
- }
-}
-
void __init files_init(void)
{
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 56231b31f806..d80aab0d5982 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -399,7 +399,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
*/
static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
int err;
struct inode *inode;
@@ -469,7 +469,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
d_instantiate(entry, inode);
fuse_change_entry_timeout(entry, &outentry);
fuse_invalidate_attr(dir);
- err = finish_open(file, entry, generic_file_open, opened);
+ err = finish_open(file, entry, generic_file_open);
if (err) {
fuse_sync_release(ff, flags);
} else {
@@ -489,7 +489,7 @@ out_err:
static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
int err;
struct fuse_conn *fc = get_fuse_conn(dir);
@@ -508,12 +508,12 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
goto no_open;
/* Only creates */
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
if (fc->no_create)
goto mknod;
- err = fuse_create_open(dir, entry, file, flags, mode, opened);
+ err = fuse_create_open(dir, entry, file, flags, mode);
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
@@ -539,6 +539,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
{
struct fuse_entry_out outarg;
struct inode *inode;
+ struct dentry *d;
int err;
struct fuse_forget_link *forget;
@@ -570,11 +571,17 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
}
kfree(forget);
- err = d_instantiate_no_diralias(entry, inode);
- if (err)
- return err;
+ d_drop(entry);
+ d = d_splice_alias(inode, entry);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
- fuse_change_entry_timeout(entry, &outarg);
+ if (d) {
+ fuse_change_entry_timeout(d, &outarg);
+ dput(d);
+ } else {
+ fuse_change_entry_timeout(entry, &outarg);
+ }
fuse_invalidate_attr(dir);
return 0;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index feda55f67050..648f0ca1ad57 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -580,7 +580,7 @@ static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
struct file *file,
umode_t mode, dev_t dev, const char *symname,
- unsigned int size, int excl, int *opened)
+ unsigned int size, int excl)
{
const struct qstr *name = &dentry->d_name;
struct posix_acl *default_acl, *acl;
@@ -626,7 +626,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = 0;
if (file) {
if (S_ISREG(inode->i_mode))
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ error = finish_open(file, dentry, gfs2_open_common);
else
error = finish_no_open(file, NULL);
}
@@ -767,8 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
d_instantiate(dentry, inode);
if (file) {
- *opened |= FILE_CREATED;
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ file->f_mode |= FMODE_CREATED;
+ error = finish_open(file, dentry, gfs2_open_common);
}
gfs2_glock_dq_uninit(ghs);
gfs2_glock_dq_uninit(ghs + 1);
@@ -822,7 +822,7 @@ fail:
static int gfs2_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
- return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl);
}
/**
@@ -830,14 +830,13 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
* @dir: The directory inode
* @dentry: The dentry of the new inode
* @file: File to be opened
- * @opened: atomic_open flags
*
*
* Returns: errno
*/
static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
- struct file *file, int *opened)
+ struct file *file)
{
struct inode *inode;
struct dentry *d;
@@ -866,7 +865,7 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
return d;
}
if (file && S_ISREG(inode->i_mode))
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ error = finish_open(file, dentry, gfs2_open_common);
gfs2_glock_dq_uninit(&gh);
if (error) {
@@ -879,7 +878,7 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
unsigned flags)
{
- return __gfs2_lookup(dir, dentry, NULL, NULL);
+ return __gfs2_lookup(dir, dentry, NULL);
}
/**
@@ -1189,7 +1188,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
if (size >= gfs2_max_stuffed_size(GFS2_I(dir)))
return -ENAMETOOLONG;
- return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0);
}
/**
@@ -1204,7 +1203,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
- return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
}
/**
@@ -1219,7 +1218,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t dev)
{
- return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0);
}
/**
@@ -1229,14 +1228,13 @@ static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
* @file: The proposed new struct file
* @flags: open flags
* @mode: File mode
- * @opened: Flag to say whether the file has been opened or not
*
* Returns: error code or 0 for success
*/
static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
struct dentry *d;
bool excl = !!(flags & O_EXCL);
@@ -1244,13 +1242,13 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
if (!d_in_lookup(dentry))
goto skip_lookup;
- d = __gfs2_lookup(dir, dentry, file, opened);
+ d = __gfs2_lookup(dir, dentry, file);
if (IS_ERR(d))
return PTR_ERR(d);
if (d != NULL)
dentry = d;
if (d_really_is_positive(dentry)) {
- if (!(*opened & FILE_OPENED))
+ if (!(file->f_mode & FMODE_OPENED))
return finish_no_open(file, d);
dput(d);
return 0;
@@ -1262,7 +1260,7 @@ skip_lookup:
if (!(flags & O_CREAT))
return -ENOENT;
- return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl, opened);
+ return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl);
}
/*
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 2a16111d312f..a2dfa1b2a89c 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -541,7 +541,7 @@ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
HFS_I(inode)->rsrc_inode = dir;
HFS_I(dir)->rsrc_inode = inode;
igrab(dir);
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
mark_inode_dirty(inode);
dont_mount(dentry);
out:
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2597b290c2a5..444c7b170359 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -610,33 +610,21 @@ static struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
int err;
inode = hostfs_iget(ino->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
+ if (IS_ERR(inode))
goto out;
- }
err = -ENOMEM;
name = dentry_name(dentry);
- if (name == NULL)
- goto out_put;
-
- err = read_name(inode, name);
-
- __putname(name);
- if (err == -ENOENT) {
+ if (name) {
+ err = read_name(inode, name);
+ __putname(name);
+ }
+ if (err) {
iput(inode);
- inode = NULL;
+ inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
}
- else if (err)
- goto out_put;
-
- d_add(dentry, inode);
- return NULL;
-
- out_put:
- iput(inode);
out:
- return ERR_PTR(err);
+ return d_splice_alias(inode, dentry);
}
static int hostfs_link(struct dentry *to, struct inode *ino,
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index c83ece7facc5..d85230c84ef2 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -244,6 +244,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
result = iget_locked(dir->i_sb, ino);
if (!result) {
hpfs_error(dir->i_sb, "hpfs_lookup: can't get inode");
+ result = ERR_PTR(-ENOMEM);
goto bail1;
}
if (result->i_state & I_NEW) {
@@ -266,6 +267,8 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
if (de->has_acl || de->has_xtd_perm) if (!sb_rdonly(dir->i_sb)) {
hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
+ iput(result);
+ result = ERR_PTR(-EINVAL);
goto bail1;
}
@@ -301,29 +304,17 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
}
}
+bail1:
hpfs_brelse4(&qbh);
/*
* Made it.
*/
- end:
- end_add:
+end:
+end_add:
hpfs_unlock(dir->i_sb);
- d_add(dentry, result);
- return NULL;
-
- /*
- * Didn't.
- */
- bail1:
-
- hpfs_brelse4(&qbh);
-
- /*bail:*/
-
- hpfs_unlock(dir->i_sb);
- return ERR_PTR(-ENOENT);
+ return d_splice_alias(result, dentry);
}
const struct file_operations hpfs_dir_ops =
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 40d4c66c7751..346a146c7617 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1310,10 +1310,6 @@ static int get_hstate_idx(int page_size_log)
return h - hstates;
}
-static const struct dentry_operations anon_ops = {
- .d_dname = simple_dname
-};
-
/*
* Note that size should be aligned to proper hugepage size in caller side,
* otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
@@ -1322,19 +1318,18 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
vm_flags_t acctflag, struct user_struct **user,
int creat_flags, int page_size_log)
{
- struct file *file = ERR_PTR(-ENOMEM);
struct inode *inode;
- struct path path;
- struct super_block *sb;
- struct qstr quick_string;
+ struct vfsmount *mnt;
int hstate_idx;
+ struct file *file;
hstate_idx = get_hstate_idx(page_size_log);
if (hstate_idx < 0)
return ERR_PTR(-ENODEV);
*user = NULL;
- if (!hugetlbfs_vfsmount[hstate_idx])
+ mnt = hugetlbfs_vfsmount[hstate_idx];
+ if (!mnt)
return ERR_PTR(-ENOENT);
if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
@@ -1350,45 +1345,28 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
}
}
- sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
- quick_string.name = name;
- quick_string.len = strlen(quick_string.name);
- quick_string.hash = 0;
- path.dentry = d_alloc_pseudo(sb, &quick_string);
- if (!path.dentry)
- goto out_shm_unlock;
-
- d_set_d_op(path.dentry, &anon_ops);
- path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
file = ERR_PTR(-ENOSPC);
- inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
+ inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
if (!inode)
- goto out_dentry;
+ goto out;
if (creat_flags == HUGETLB_SHMFS_INODE)
inode->i_flags |= S_PRIVATE;
- file = ERR_PTR(-ENOMEM);
- if (hugetlb_reserve_pages(inode, 0,
- size >> huge_page_shift(hstate_inode(inode)), NULL,
- acctflag))
- goto out_inode;
-
- d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode);
- file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
- &hugetlbfs_file_operations);
- if (IS_ERR(file))
- goto out_dentry; /* inode is already attached */
-
- return file;
+ if (hugetlb_reserve_pages(inode, 0,
+ size >> huge_page_shift(hstate_inode(inode)), NULL,
+ acctflag))
+ file = ERR_PTR(-ENOMEM);
+ else
+ file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
+ &hugetlbfs_file_operations);
+ if (!IS_ERR(file))
+ return file;
-out_inode:
iput(inode);
-out_dentry:
- path_put(&path);
-out_shm_unlock:
+out:
if (*user) {
user_shm_unlock(size, *user);
*user = NULL;
diff --git a/fs/inode.c b/fs/inode.c
index 8c86c809ca17..a06de4454232 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -804,6 +804,10 @@ repeat:
__wait_on_freeing_inode(inode);
goto repeat;
}
+ if (unlikely(inode->i_state & I_CREATING)) {
+ spin_unlock(&inode->i_lock);
+ return ERR_PTR(-ESTALE);
+ }
__iget(inode);
spin_unlock(&inode->i_lock);
return inode;
@@ -831,6 +835,10 @@ repeat:
__wait_on_freeing_inode(inode);
goto repeat;
}
+ if (unlikely(inode->i_state & I_CREATING)) {
+ spin_unlock(&inode->i_lock);
+ return ERR_PTR(-ESTALE);
+ }
__iget(inode);
spin_unlock(&inode->i_lock);
return inode;
@@ -961,13 +969,26 @@ void unlock_new_inode(struct inode *inode)
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
+ inode->i_state &= ~I_NEW & ~I_CREATING;
smp_mb();
wake_up_bit(&inode->i_state, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(unlock_new_inode);
+void discard_new_inode(struct inode *inode)
+{
+ lockdep_annotate_inode_mutex_key(inode);
+ spin_lock(&inode->i_lock);
+ WARN_ON(!(inode->i_state & I_NEW));
+ inode->i_state &= ~I_NEW;
+ smp_mb();
+ wake_up_bit(&inode->i_state, __I_NEW);
+ spin_unlock(&inode->i_lock);
+ iput(inode);
+}
+EXPORT_SYMBOL(discard_new_inode);
+
/**
* lock_two_nondirectories - take two i_mutexes on non-directory objects
*
@@ -1029,6 +1050,7 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
struct inode *old;
+ bool creating = inode->i_state & I_CREATING;
again:
spin_lock(&inode_hash_lock);
@@ -1039,6 +1061,8 @@ again:
* Use the old inode instead of the preallocated one.
*/
spin_unlock(&inode_hash_lock);
+ if (IS_ERR(old))
+ return NULL;
wait_on_inode(old);
if (unlikely(inode_unhashed(old))) {
iput(old);
@@ -1060,6 +1084,8 @@ again:
inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
+ if (!creating)
+ inode_sb_list_add(inode);
unlock:
spin_unlock(&inode_hash_lock);
@@ -1094,12 +1120,13 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
struct inode *inode = ilookup5(sb, hashval, test, data);
if (!inode) {
- struct inode *new = new_inode(sb);
+ struct inode *new = alloc_inode(sb);
if (new) {
+ new->i_state = 0;
inode = inode_insert5(new, hashval, test, set, data);
if (unlikely(inode != new))
- iput(new);
+ destroy_inode(new);
}
}
return inode;
@@ -1128,6 +1155,8 @@ again:
inode = find_inode_fast(sb, head, ino);
spin_unlock(&inode_hash_lock);
if (inode) {
+ if (IS_ERR(inode))
+ return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
@@ -1165,6 +1194,8 @@ again:
*/
spin_unlock(&inode_hash_lock);
destroy_inode(inode);
+ if (IS_ERR(old))
+ return NULL;
inode = old;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
@@ -1282,7 +1313,7 @@ struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
inode = find_inode(sb, head, test, data);
spin_unlock(&inode_hash_lock);
- return inode;
+ return IS_ERR(inode) ? NULL : inode;
}
EXPORT_SYMBOL(ilookup5_nowait);
@@ -1338,6 +1369,8 @@ again:
spin_unlock(&inode_hash_lock);
if (inode) {
+ if (IS_ERR(inode))
+ return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
@@ -1421,12 +1454,17 @@ int insert_inode_locked(struct inode *inode)
}
if (likely(!old)) {
spin_lock(&inode->i_lock);
- inode->i_state |= I_NEW;
+ inode->i_state |= I_NEW | I_CREATING;
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
}
+ if (unlikely(old->i_state & I_CREATING)) {
+ spin_unlock(&old->i_lock);
+ spin_unlock(&inode_hash_lock);
+ return -EBUSY;
+ }
__iget(old);
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
@@ -1443,7 +1481,10 @@ EXPORT_SYMBOL(insert_inode_locked);
int insert_inode_locked4(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
- struct inode *old = inode_insert5(inode, hashval, test, NULL, data);
+ struct inode *old;
+
+ inode->i_state |= I_CREATING;
+ old = inode_insert5(inode, hashval, test, NULL, data);
if (old != inode) {
iput(old);
diff --git a/fs/internal.h b/fs/internal.h
index 5645b4ebf494..52a346903748 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -93,7 +93,7 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
/*
* file_table.c
*/
-extern struct file *get_empty_filp(void);
+extern struct file *alloc_empty_file(int, const struct cred *);
/*
* super.c
@@ -125,8 +125,7 @@ int do_fchmodat(int dfd, const char __user *filename, umode_t mode);
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
int flag);
-extern int open_check_o_direct(struct file *f);
-extern int vfs_open(const struct path *, struct file *, const struct cred *);
+extern int vfs_open(const struct path *, struct file *);
/*
* inode.c
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index f36ef68905a7..93e8c590ff5c 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -491,13 +491,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
/* release the page */
release_metapage(mp);
- /*
- * __mark_inode_dirty expects inodes to be hashed. Since we don't
- * want special inodes in the fileset inode space, we make them
- * appear hashed, but do not put on any lists. hlist_del()
- * will work fine and require no locking.
- */
- hlist_add_fake(&ip->i_hash);
+ inode_fake_hash(ip);
return (ip);
}
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 5e9b7bb3aabf..4572b7cf183d 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -61,8 +61,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
inode = new_inode(sb);
if (!inode) {
jfs_warn("ialloc: new_inode returned NULL!");
- rc = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
jfs_inode = JFS_IP(inode);
@@ -70,8 +69,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
rc = diAlloc(parent, S_ISDIR(mode), inode);
if (rc) {
jfs_warn("ialloc: diAlloc returned %d!", rc);
- if (rc == -EIO)
- make_bad_inode(inode);
goto fail_put;
}
@@ -141,9 +138,10 @@ fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
clear_nlink(inode);
- unlock_new_inode(inode);
+ discard_new_inode(inode);
+ return ERR_PTR(rc);
+
fail_put:
iput(inode);
-fail:
return ERR_PTR(rc);
}
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 56c3fcbfe80e..14528c0ffe63 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -175,8 +175,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -309,8 +308,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -1054,8 +1052,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -1441,8 +1438,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index f08571433aba..09da5cf14e27 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -581,7 +581,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_ino = 0;
inode->i_size = i_size_read(sb->s_bdev->bd_inode);
inode->i_mapping->a_ops = &jfs_metapage_aops;
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
sbi->direct_inode = inode;
diff --git a/fs/locks.c b/fs/locks.c
index db7b6917d9c5..bc047a7edc47 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -202,10 +202,6 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
* we often hold the flc_lock as well. In certain cases, when reading the fields
* protected by this lock, we can skip acquiring it iff we already hold the
* flc_lock.
- *
- * In particular, adding an entry to the fl_block list requires that you hold
- * both the flc_lock and the blocked_lock_lock (acquired in that order).
- * Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_SPINLOCK(blocked_lock_lock);
@@ -990,6 +986,7 @@ out:
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
+ trace_flock_lock_inode(inode, request, error);
return error;
}
@@ -2072,6 +2069,13 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
return -1;
if (IS_REMOTELCK(fl))
return fl->fl_pid;
+ /*
+ * If the flock owner process is dead and its pid has been already
+ * freed, the translation below won't work, but we still want to show
+ * flock owner pid number in init pidns.
+ */
+ if (ns == &init_pid_ns)
+ return (pid_t)fl->fl_pid;
rcu_read_lock();
pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
@@ -2626,12 +2630,10 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
fl_pid = locks_translate_pid(fl, proc_pidns);
/*
- * If there isn't a fl_pid don't display who is waiting on
- * the lock if we are called from locks_show, or if we are
- * called from __show_fd_info - skip lock entirely
+ * If lock owner is dead (and pid is freed) or not visible in current
+ * pidns, zero is shown as a pid value. Check lock info from
+ * init_pid_ns to get saved lock pid value.
*/
- if (fl_pid == 0)
- return;
if (fl->fl_file != NULL)
inode = locks_inode(fl->fl_file);
diff --git a/fs/namei.c b/fs/namei.c
index 734cef54fdf8..3cd396277cd3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2028,6 +2028,8 @@ static int link_path_walk(const char *name, struct nameidata *nd)
{
int err;
+ if (IS_ERR(name))
+ return PTR_ERR(name);
while (*name=='/')
name++;
if (!*name)
@@ -2125,12 +2127,15 @@ OK:
}
}
+/* must be paired with terminate_walk() */
static const char *path_init(struct nameidata *nd, unsigned flags)
{
const char *s = nd->name->name;
if (!*s)
flags &= ~LOOKUP_RCU;
+ if (flags & LOOKUP_RCU)
+ rcu_read_lock();
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
@@ -2143,7 +2148,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
- rcu_read_lock();
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
nd->root_seq = nd->seq;
nd->m_seq = read_seqbegin(&mount_lock);
@@ -2159,21 +2163,15 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->m_seq = read_seqbegin(&mount_lock);
if (*s == '/') {
- if (flags & LOOKUP_RCU)
- rcu_read_lock();
set_root(nd);
if (likely(!nd_jump_root(nd)))
return s;
- nd->root.mnt = NULL;
- rcu_read_unlock();
return ERR_PTR(-ECHILD);
} else if (nd->dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
- rcu_read_lock();
-
do {
seq = read_seqcount_begin(&fs->seq);
nd->path = fs->pwd;
@@ -2195,16 +2193,13 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
dentry = f.file->f_path.dentry;
- if (*s) {
- if (!d_can_lookup(dentry)) {
- fdput(f);
- return ERR_PTR(-ENOTDIR);
- }
+ if (*s && unlikely(!d_can_lookup(dentry))) {
+ fdput(f);
+ return ERR_PTR(-ENOTDIR);
}
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
- rcu_read_lock();
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
@@ -2272,24 +2267,15 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
const char *s = path_init(nd, flags);
int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
-
- if (unlikely(flags & LOOKUP_DOWN)) {
+ if (unlikely(flags & LOOKUP_DOWN) && !IS_ERR(s)) {
err = handle_lookup_down(nd);
- if (unlikely(err < 0)) {
- terminate_walk(nd);
- return err;
- }
+ if (unlikely(err < 0))
+ s = ERR_PTR(err);
}
while (!(err = link_path_walk(s, nd))
&& ((err = lookup_last(nd)) > 0)) {
s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- err = PTR_ERR(s);
- break;
- }
}
if (!err)
err = complete_walk(nd);
@@ -2336,10 +2322,7 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
{
const char *s = path_init(nd, flags);
- int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
- err = link_path_walk(s, nd);
+ int err = link_path_walk(s, nd);
if (!err)
err = complete_walk(nd);
if (!err) {
@@ -2666,15 +2649,10 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
+
while (!(err = link_path_walk(s, nd)) &&
(err = mountpoint_last(nd)) > 0) {
s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- err = PTR_ERR(s);
- break;
- }
}
if (!err) {
*path = nd->path;
@@ -3027,17 +3005,16 @@ static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t m
* Returns 0 if successful. The file will have been created and attached to
* @file by the filesystem calling finish_open().
*
- * Returns 1 if the file was looked up only or didn't need creating. The
- * caller will need to perform the open themselves. @path will have been
- * updated to point to the new dentry. This may be negative.
+ * If the file was looked up only or didn't need creating, FMODE_OPENED won't
+ * be set. The caller will need to perform the open themselves. @path will
+ * have been updated to point to the new dentry. This may be negative.
*
* Returns an error code otherwise.
*/
static int atomic_open(struct nameidata *nd, struct dentry *dentry,
struct path *path, struct file *file,
const struct open_flags *op,
- int open_flag, umode_t mode,
- int *opened)
+ int open_flag, umode_t mode)
{
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
struct inode *dir = nd->path.dentry->d_inode;
@@ -3052,39 +3029,38 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
file->f_path.dentry = DENTRY_NOT_SET;
file->f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file,
- open_to_namei_flags(open_flag),
- mode, opened);
+ open_to_namei_flags(open_flag), mode);
d_lookup_done(dentry);
if (!error) {
- /*
- * We didn't have the inode before the open, so check open
- * permission here.
- */
- int acc_mode = op->acc_mode;
- if (*opened & FILE_CREATED) {
- WARN_ON(!(open_flag & O_CREAT));
- fsnotify_create(dir, dentry);
- acc_mode = 0;
- }
- error = may_open(&file->f_path, acc_mode, open_flag);
- if (WARN_ON(error > 0))
- error = -EINVAL;
- } else if (error > 0) {
- if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
+ if (file->f_mode & FMODE_OPENED) {
+ /*
+ * We didn't have the inode before the open, so check open
+ * permission here.
+ */
+ int acc_mode = op->acc_mode;
+ if (file->f_mode & FMODE_CREATED) {
+ WARN_ON(!(open_flag & O_CREAT));
+ fsnotify_create(dir, dentry);
+ acc_mode = 0;
+ }
+ error = may_open(&file->f_path, acc_mode, open_flag);
+ if (WARN_ON(error > 0))
+ error = -EINVAL;
+ } else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
} else {
if (file->f_path.dentry) {
dput(dentry);
dentry = file->f_path.dentry;
}
- if (*opened & FILE_CREATED)
+ if (file->f_mode & FMODE_CREATED)
fsnotify_create(dir, dentry);
if (unlikely(d_is_negative(dentry))) {
error = -ENOENT;
} else {
path->dentry = dentry;
path->mnt = nd->path.mnt;
- return 1;
+ return 0;
}
}
}
@@ -3095,25 +3071,22 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
/*
* Look up and maybe create and open the last component.
*
- * Must be called with i_mutex held on parent.
- *
- * Returns 0 if the file was successfully atomically created (if necessary) and
- * opened. In this case the file will be returned attached to @file.
- *
- * Returns 1 if the file was not completely opened at this time, though lookups
- * and creations will have been performed and the dentry returned in @path will
- * be positive upon return if O_CREAT was specified. If O_CREAT wasn't
- * specified then a negative dentry may be returned.
+ * Must be called with parent locked (exclusive in O_CREAT case).
*
- * An error code is returned otherwise.
+ * Returns 0 on success, that is, if
+ * the file was successfully atomically created (if necessary) and opened, or
+ * the file was not completely opened at this time, though lookups and
+ * creations were performed.
+ * These case are distinguished by presence of FMODE_OPENED on file->f_mode.
+ * In the latter case dentry returned in @path might be negative if O_CREAT
+ * hadn't been specified.
*
- * FILE_CREATE will be set in @*opened if the dentry was created and will be
- * cleared otherwise prior to returning.
+ * An error code is returned on failure.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
struct file *file,
const struct open_flags *op,
- bool got_write, int *opened)
+ bool got_write)
{
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
@@ -3126,7 +3099,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
if (unlikely(IS_DEADDIR(dir_inode)))
return -ENOENT;
- *opened &= ~FILE_CREATED;
+ file->f_mode &= ~FMODE_CREATED;
dentry = d_lookup(dir, &nd->last);
for (;;) {
if (!dentry) {
@@ -3188,7 +3161,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
if (dir_inode->i_op->atomic_open) {
error = atomic_open(nd, dentry, path, file, op, open_flag,
- mode, opened);
+ mode);
if (unlikely(error == -ENOENT) && create_error)
error = create_error;
return error;
@@ -3211,7 +3184,7 @@ no_open:
/* Negative dentry, just create the file */
if (!dentry->d_inode && (open_flag & O_CREAT)) {
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
if (!dir_inode->i_op->create) {
error = -EACCES;
@@ -3230,7 +3203,7 @@ no_open:
out_no_open:
path->dentry = dentry;
path->mnt = nd->path.mnt;
- return 1;
+ return 0;
out_dput:
dput(dentry);
@@ -3241,8 +3214,7 @@ out_dput:
* Handle the last step of open()
*/
static int do_last(struct nameidata *nd,
- struct file *file, const struct open_flags *op,
- int *opened)
+ struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
@@ -3308,17 +3280,17 @@ static int do_last(struct nameidata *nd,
inode_lock(dir->d_inode);
else
inode_lock_shared(dir->d_inode);
- error = lookup_open(nd, &path, file, op, got_write, opened);
+ error = lookup_open(nd, &path, file, op, got_write);
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
inode_unlock_shared(dir->d_inode);
- if (error <= 0) {
- if (error)
- goto out;
+ if (error)
+ goto out;
- if ((*opened & FILE_CREATED) ||
+ if (file->f_mode & FMODE_OPENED) {
+ if ((file->f_mode & FMODE_CREATED) ||
!S_ISREG(file_inode(file)->i_mode))
will_truncate = false;
@@ -3326,7 +3298,7 @@ static int do_last(struct nameidata *nd,
goto opened;
}
- if (*opened & FILE_CREATED) {
+ if (file->f_mode & FMODE_CREATED) {
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
will_truncate = false;
@@ -3395,20 +3367,15 @@ finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
goto out;
- BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
- error = vfs_open(&nd->path, file, current_cred());
+ BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
+ error = vfs_open(&nd->path, file);
if (error)
goto out;
- *opened |= FILE_OPENED;
opened:
- error = open_check_o_direct(file);
- if (!error)
- error = ima_file_check(file, op->acc_mode, *opened);
+ error = ima_file_check(file, op->acc_mode);
if (!error && will_truncate)
error = handle_truncate(file);
out:
- if (unlikely(error) && (*opened & FILE_OPENED))
- fput(file);
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
@@ -3458,7 +3425,7 @@ EXPORT_SYMBOL(vfs_tmpfile);
static int do_tmpfile(struct nameidata *nd, unsigned flags,
const struct open_flags *op,
- struct file *file, int *opened)
+ struct file *file)
{
struct dentry *child;
struct path path;
@@ -3480,12 +3447,7 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
if (error)
goto out2;
file->f_path.mnt = path.mnt;
- error = finish_open(file, child, NULL, opened);
- if (error)
- goto out2;
- error = open_check_o_direct(file);
- if (error)
- fput(file);
+ error = finish_open(file, child, NULL);
out2:
mnt_drop_write(path.mnt);
out:
@@ -3499,7 +3461,7 @@ static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
int error = path_lookupat(nd, flags, &path);
if (!error) {
audit_inode(nd->name, path.dentry, 0);
- error = vfs_open(&path, file, current_cred());
+ error = vfs_open(&path, file);
path_put(&path);
}
return error;
@@ -3508,59 +3470,40 @@ static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
static struct file *path_openat(struct nameidata *nd,
const struct open_flags *op, unsigned flags)
{
- const char *s;
struct file *file;
- int opened = 0;
int error;
- file = get_empty_filp();
+ file = alloc_empty_file(op->open_flag, current_cred());
if (IS_ERR(file))
return file;
- file->f_flags = op->open_flag;
-
if (unlikely(file->f_flags & __O_TMPFILE)) {
- error = do_tmpfile(nd, flags, op, file, &opened);
- goto out2;
- }
-
- if (unlikely(file->f_flags & O_PATH)) {
+ error = do_tmpfile(nd, flags, op, file);
+ } else if (unlikely(file->f_flags & O_PATH)) {
error = do_o_path(nd, flags, file);
- if (!error)
- opened |= FILE_OPENED;
- goto out2;
- }
-
- s = path_init(nd, flags);
- if (IS_ERR(s)) {
- put_filp(file);
- return ERR_CAST(s);
- }
- while (!(error = link_path_walk(s, nd)) &&
- (error = do_last(nd, file, op, &opened)) > 0) {
- nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
- s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- break;
+ } else {
+ const char *s = path_init(nd, flags);
+ while (!(error = link_path_walk(s, nd)) &&
+ (error = do_last(nd, file, op)) > 0) {
+ nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
+ s = trailing_symlink(nd);
}
+ terminate_walk(nd);
}
- terminate_walk(nd);
-out2:
- if (!(opened & FILE_OPENED)) {
- BUG_ON(!error);
- put_filp(file);
+ if (likely(!error)) {
+ if (likely(file->f_mode & FMODE_OPENED))
+ return file;
+ WARN_ON(1);
+ error = -EINVAL;
}
- if (unlikely(error)) {
- if (error == -EOPENSTALE) {
- if (flags & LOOKUP_RCU)
- error = -ECHILD;
- else
- error = -ESTALE;
- }
- file = ERR_PTR(error);
+ fput(file);
+ if (error == -EOPENSTALE) {
+ if (flags & LOOKUP_RCU)
+ error = -ECHILD;
+ else
+ error = -ESTALE;
}
- return file;
+ return ERR_PTR(error);
}
struct file *do_filp_open(int dfd, struct filename *pathname,
@@ -4712,29 +4655,6 @@ out:
return len;
}
-/*
- * A helper for ->readlink(). This should be used *ONLY* for symlinks that
- * have ->get_link() not calling nd_jump_link(). Using (or not using) it
- * for any given inode is up to filesystem.
- */
-static int generic_readlink(struct dentry *dentry, char __user *buffer,
- int buflen)
-{
- DEFINE_DELAYED_CALL(done);
- struct inode *inode = d_inode(dentry);
- const char *link = inode->i_link;
- int res;
-
- if (!link) {
- link = inode->i_op->get_link(dentry, inode, &done);
- if (IS_ERR(link))
- return PTR_ERR(link);
- }
- res = readlink_copy(buffer, buflen, link);
- do_delayed_call(&done);
- return res;
-}
-
/**
* vfs_readlink - copy symlink body into userspace buffer
* @dentry: dentry on which to get symbolic link
@@ -4748,6 +4668,9 @@ static int generic_readlink(struct dentry *dentry, char __user *buffer,
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct inode *inode = d_inode(dentry);
+ DEFINE_DELAYED_CALL(done);
+ const char *link;
+ int res;
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
if (unlikely(inode->i_op->readlink))
@@ -4761,7 +4684,15 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
spin_unlock(&inode->i_lock);
}
- return generic_readlink(dentry, buffer, buflen);
+ link = inode->i_link;
+ if (!link) {
+ link = inode->i_op->get_link(dentry, inode, &done);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+ }
+ res = readlink_copy(buffer, buflen, link);
+ do_delayed_call(&done);
+ return res;
}
EXPORT_SYMBOL(vfs_readlink);
diff --git a/fs/namespace.c b/fs/namespace.c
index 8ddd14806799..bd2f4c68506a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -659,12 +659,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
+ smp_mb(); // see mntput_no_expire()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
mnt_add_count(mnt, -1);
return 1;
}
+ lock_mount_hash();
+ if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+ mnt_add_count(mnt, -1);
+ unlock_mount_hash();
+ return 1;
+ }
+ unlock_mount_hash();
+ /* caller will mntput() */
return -1;
}
@@ -1195,12 +1204,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt)
{
rcu_read_lock();
- mnt_add_count(mnt, -1);
- if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
+ if (likely(READ_ONCE(mnt->mnt_ns))) {
+ /*
+ * Since we don't do lock_mount_hash() here,
+ * ->mnt_ns can change under us. However, if it's
+ * non-NULL, then there's a reference that won't
+ * be dropped until after an RCU delay done after
+ * turning ->mnt_ns NULL. So if we observe it
+ * non-NULL under rcu_read_lock(), the reference
+ * we are dropping is not the final one.
+ */
+ mnt_add_count(mnt, -1);
rcu_read_unlock();
return;
}
lock_mount_hash();
+ /*
+ * make sure that if __legitimize_mnt() has not seen us grab
+ * mount_lock, we'll see their refcount increment here.
+ */
+ smp_mb();
+ mnt_add_count(mnt, -1);
if (mnt_get_count(mnt)) {
rcu_read_unlock();
unlock_mount_hash();
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7a9c14426855..d7f158c3efc8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1434,12 +1434,11 @@ static int do_open(struct inode *inode, struct file *filp)
static int nfs_finish_open(struct nfs_open_context *ctx,
struct dentry *dentry,
- struct file *file, unsigned open_flags,
- int *opened)
+ struct file *file, unsigned open_flags)
{
int err;
- err = finish_open(file, dentry, do_open, opened);
+ err = finish_open(file, dentry, do_open);
if (err)
goto out;
if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
@@ -1452,7 +1451,7 @@ out:
int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned open_flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct nfs_open_context *ctx;
@@ -1461,6 +1460,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct inode *inode;
unsigned int lookup_flags = 0;
bool switched = false;
+ int created = 0;
int err;
/* Expect a negative dentry */
@@ -1521,7 +1521,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
goto out;
trace_nfs_atomic_open_enter(dir, ctx, open_flags);
- inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created);
+ if (created)
+ file->f_mode |= FMODE_CREATED;
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
@@ -1546,7 +1548,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
goto out;
}
- err = nfs_finish_open(ctx, ctx->dentry, file, open_flags, opened);
+ err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
out:
@@ -1641,6 +1643,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct dentry *parent = dget_parent(dentry);
struct inode *dir = d_inode(parent);
struct inode *inode;
+ struct dentry *d;
int error = -EACCES;
d_drop(dentry);
@@ -1662,10 +1665,12 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
goto out_error;
}
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
- error = PTR_ERR(inode);
- if (IS_ERR(inode))
+ d = d_splice_alias(inode, dentry);
+ if (IS_ERR(d)) {
+ error = PTR_ERR(d);
goto out_error;
- d_add(dentry, inode);
+ }
+ dput(d);
out:
dput(parent);
return 0;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 137e18abb7e7..51beb6e38c90 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -258,7 +258,7 @@ extern const struct dentry_operations nfs4_dentry_operations;
/* dir.c */
int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
- unsigned, umode_t, int *);
+ unsigned, umode_t);
/* super.c */
extern struct file_system_type nfs4_fs_type;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f6c4ccd693f4..b790976d3913 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2951,7 +2951,7 @@ static int _nfs4_do_open(struct inode *dir,
}
}
if (opened && opendata->file_created)
- *opened |= FILE_CREATED;
+ *opened = 1;
if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
*ctx_th = opendata->f_attr.mdsthreshold;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index b0555d7d8200..55a099e47ba2 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -763,7 +763,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
goto out_nfserr;
}
- host_err = ima_file_check(file, may_flags, 0);
+ host_err = ima_file_check(file, may_flags);
if (host_err) {
fput(file);
goto out_nfserr;
diff --git a/fs/open.c b/fs/open.c
index d0e955b558ad..d98e19239bb7 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -724,27 +724,13 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
return ksys_fchown(fd, user, group);
}
-int open_check_o_direct(struct file *f)
-{
- /* NB: we're sure to have correct a_ops only after f_op->open */
- if (f->f_flags & O_DIRECT) {
- if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
- return -EINVAL;
- }
- return 0;
-}
-
static int do_dentry_open(struct file *f,
struct inode *inode,
- int (*open)(struct inode *, struct file *),
- const struct cred *cred)
+ int (*open)(struct inode *, struct file *))
{
static const struct file_operations empty_fops = {};
int error;
- f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
- FMODE_PREAD | FMODE_PWRITE;
-
path_get(&f->f_path);
f->f_inode = inode;
f->f_mapping = inode->i_mapping;
@@ -753,7 +739,7 @@ static int do_dentry_open(struct file *f,
f->f_wb_err = filemap_sample_wb_err(f->f_mapping);
if (unlikely(f->f_flags & O_PATH)) {
- f->f_mode = FMODE_PATH;
+ f->f_mode = FMODE_PATH | FMODE_OPENED;
f->f_op = &empty_fops;
return 0;
}
@@ -780,7 +766,7 @@ static int do_dentry_open(struct file *f,
goto cleanup_all;
}
- error = security_file_open(f, cred);
+ error = security_file_open(f);
if (error)
goto cleanup_all;
@@ -788,6 +774,8 @@ static int do_dentry_open(struct file *f,
if (error)
goto cleanup_all;
+ /* normally all 3 are set; ->open() can clear them if needed */
+ f->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
if (!open)
open = f->f_op->open;
if (open) {
@@ -795,6 +783,7 @@ static int do_dentry_open(struct file *f,
if (error)
goto cleanup_all;
}
+ f->f_mode |= FMODE_OPENED;
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(inode);
if ((f->f_mode & FMODE_READ) &&
@@ -809,9 +798,16 @@ static int do_dentry_open(struct file *f,
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
+ /* NB: we're sure to have correct a_ops only after f_op->open */
+ if (f->f_flags & O_DIRECT) {
+ if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
+ return -EINVAL;
+ }
return 0;
cleanup_all:
+ if (WARN_ON_ONCE(error > 0))
+ error = -EINVAL;
fops_put(f->f_op);
if (f->f_mode & FMODE_WRITER) {
put_write_access(inode);
@@ -847,19 +843,12 @@ cleanup_file:
* Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened)
+ int (*open)(struct inode *, struct file *))
{
- int error;
- BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
+ BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
file->f_path.dentry = dentry;
- error = do_dentry_open(file, d_backing_inode(dentry), open,
- current_cred());
- if (!error)
- *opened |= FILE_OPENED;
-
- return error;
+ return do_dentry_open(file, d_backing_inode(dentry), open);
}
EXPORT_SYMBOL(finish_open);
@@ -874,13 +863,13 @@ EXPORT_SYMBOL(finish_open);
* NB: unlike finish_open() this function does consume the dentry reference and
* the caller need not dput() it.
*
- * Returns "1" which must be the return value of ->atomic_open() after having
+ * Returns "0" which must be the return value of ->atomic_open() after having
* called this function.
*/
int finish_no_open(struct file *file, struct dentry *dentry)
{
file->f_path.dentry = dentry;
- return 1;
+ return 0;
}
EXPORT_SYMBOL(finish_no_open);
@@ -896,8 +885,7 @@ EXPORT_SYMBOL(file_path);
* @file: newly allocated file with f_flag initialized
* @cred: credentials to use
*/
-int vfs_open(const struct path *path, struct file *file,
- const struct cred *cred)
+int vfs_open(const struct path *path, struct file *file)
{
struct dentry *dentry = d_real(path->dentry, NULL, file->f_flags, 0);
@@ -905,7 +893,7 @@ int vfs_open(const struct path *path, struct file *file,
return PTR_ERR(dentry);
file->f_path = *path;
- return do_dentry_open(file, d_backing_inode(dentry), NULL, cred);
+ return do_dentry_open(file, d_backing_inode(dentry), NULL);
}
struct file *dentry_open(const struct path *path, int flags,
@@ -919,19 +907,11 @@ struct file *dentry_open(const struct path *path, int flags,
/* We must always pass in a valid mount pointer. */
BUG_ON(!path->mnt);
- f = get_empty_filp();
+ f = alloc_empty_file(flags, cred);
if (!IS_ERR(f)) {
- f->f_flags = flags;
- error = vfs_open(path, f, cred);
- if (!error) {
- /* from now on we need fput() to dispose of f */
- error = open_check_o_direct(f);
- if (error) {
- fput(f);
- f = ERR_PTR(error);
- }
- } else {
- put_filp(f);
+ error = vfs_open(path, f);
+ if (error) {
+ fput(f);
f = ERR_PTR(error);
}
}
@@ -1063,26 +1043,6 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
}
EXPORT_SYMBOL(file_open_root);
-struct file *filp_clone_open(struct file *oldfile)
-{
- struct file *file;
- int retval;
-
- file = get_empty_filp();
- if (IS_ERR(file))
- return file;
-
- file->f_flags = oldfile->f_flags;
- retval = vfs_open(&oldfile->f_path, file, oldfile->f_cred);
- if (retval) {
- put_filp(file);
- return ERR_PTR(retval);
- }
-
- return file;
-}
-EXPORT_SYMBOL(filp_clone_open);
-
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
diff --git a/fs/pipe.c b/fs/pipe.c
index 39d6f431da83..bdc5d3c0977d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -741,54 +741,33 @@ fail_inode:
int create_pipe_files(struct file **res, int flags)
{
- int err;
struct inode *inode = get_pipe_inode();
struct file *f;
- struct path path;
if (!inode)
return -ENFILE;
- err = -ENOMEM;
- path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name);
- if (!path.dentry)
- goto err_inode;
- path.mnt = mntget(pipe_mnt);
-
- d_instantiate(path.dentry, inode);
-
- f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
+ f = alloc_file_pseudo(inode, pipe_mnt, "",
+ O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
+ &pipefifo_fops);
if (IS_ERR(f)) {
- err = PTR_ERR(f);
- goto err_dentry;
+ free_pipe_info(inode->i_pipe);
+ iput(inode);
+ return PTR_ERR(f);
}
- f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
f->private_data = inode->i_pipe;
- res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
+ res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
+ &pipefifo_fops);
if (IS_ERR(res[0])) {
- err = PTR_ERR(res[0]);
- goto err_file;
+ put_pipe_info(inode, inode->i_pipe);
+ fput(f);
+ return PTR_ERR(res[0]);
}
-
- path_get(&path);
res[0]->private_data = inode->i_pipe;
- res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
res[1] = f;
return 0;
-
-err_file:
- put_filp(f);
-err_dentry:
- free_pipe_info(inode->i_pipe);
- path_put(&path);
- return err;
-
-err_inode:
- free_pipe_info(inode->i_pipe);
- iput(inode);
- return err;
}
static int __do_pipe_flags(int *fd, struct file **files, int flags)
diff --git a/fs/timerfd.c b/fs/timerfd.c
index cdad49da3ff7..d69ad801eb80 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -66,7 +66,7 @@ static void timerfd_triggered(struct timerfd_ctx *ctx)
spin_lock_irqsave(&ctx->wqh.lock, flags);
ctx->expired = 1;
ctx->ticks++;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
}
@@ -107,7 +107,7 @@ void timerfd_clock_was_set(void)
if (ctx->moffs != moffs) {
ctx->moffs = KTIME_MAX;
ctx->ticks++;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
}
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
}
@@ -345,7 +345,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
spin_lock_irq(&ctx->wqh.lock);
if (!timerfd_canceled(ctx)) {
ctx->ticks = ticks;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
} else
ret = -ECANCELED;
spin_unlock_irq(&ctx->wqh.lock);
@@ -533,8 +533,8 @@ static int do_timerfd_gettime(int ufd, struct itimerspec64 *t)
}
SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
- const struct itimerspec __user *, utmr,
- struct itimerspec __user *, otmr)
+ const struct __kernel_itimerspec __user *, utmr,
+ struct __kernel_itimerspec __user *, otmr)
{
struct itimerspec64 new, old;
int ret;
@@ -550,7 +550,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
return ret;
}
-SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
+SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *, otmr)
{
struct itimerspec64 kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
@@ -559,7 +559,7 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
return put_itimerspec64(&kotmr, otmr) ? -EFAULT : 0;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
const struct compat_itimerspec __user *, utmr,
struct compat_itimerspec __user *, otmr)
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 06f37ddd2997..58cc2414992b 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -602,8 +602,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (unlikely(!fi)) {
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -694,8 +693,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
if (!fi) {
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
set_nlink(inode, 2);
@@ -713,8 +711,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (!fi) {
clear_nlink(inode);
mark_inode_dirty(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -1041,8 +1038,7 @@ out:
out_no_entry:
up_write(&iinfo->i_data_sem);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index e1ef0f0a1353..02c0a4be4212 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -343,8 +343,7 @@ cg_found:
fail_remove_inode:
mutex_unlock(&sbi->s_lock);
clear_nlink(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
UFSD("EXIT (FAILED): err %d\n", err);
return ERR_PTR(err);
failed:
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index d5f43ba76c59..9ef40f100415 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -43,8 +43,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
return 0;
}
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -142,8 +141,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
out_fail:
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -198,8 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput (inode);
+ discard_new_inode(inode);
out_dir:
inode_dec_link_count(dir);
return err;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 0fa29f39d658..3a75de777843 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -1253,7 +1253,7 @@ xfs_setup_inode(
inode_sb_list_add(inode);
/* make the inode look hashed for the writeback code */
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index ec07f23678ea..0d4b1d3dbc1e 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -84,42 +84,59 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne
}
#endif
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+#ifdef arch_atomic_fetch_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
kasan_check_write(v, sizeof(*v));
- return __arch_atomic_add_unless(v, a, u);
+ return arch_atomic_fetch_add_unless(v, a, u);
}
+#endif
-
-static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+#ifdef arch_atomic64_fetch_add_unless
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
+ return arch_atomic64_fetch_add_unless(v, a, u);
}
+#endif
+#ifdef arch_atomic_inc
+#define atomic_inc atomic_inc
static __always_inline void atomic_inc(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_inc(v);
}
+#endif
+#ifdef arch_atomic64_inc
+#define atomic64_inc atomic64_inc
static __always_inline void atomic64_inc(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
+#endif
+#ifdef arch_atomic_dec
+#define atomic_dec atomic_dec
static __always_inline void atomic_dec(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_dec(v);
}
+#endif
+#ifdef atch_atomic64_dec
+#define atomic64_dec
static __always_inline void atomic64_dec(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
+#endif
static __always_inline void atomic_add(int i, atomic_t *v)
{
@@ -181,65 +198,95 @@ static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
arch_atomic64_xor(i, v);
}
+#ifdef arch_atomic_inc_return
+#define atomic_inc_return atomic_inc_return
static __always_inline int atomic_inc_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
+#endif
+#ifdef arch_atomic64_in_return
+#define atomic64_inc_return atomic64_inc_return
static __always_inline s64 atomic64_inc_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
+#endif
+#ifdef arch_atomic_dec_return
+#define atomic_dec_return atomic_dec_return
static __always_inline int atomic_dec_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
+#endif
+#ifdef arch_atomic64_dec_return
+#define atomic64_dec_return atomic64_dec_return
static __always_inline s64 atomic64_dec_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
+#endif
-static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+#ifdef arch_atomic64_inc_not_zero
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
+#endif
+#ifdef arch_atomic64_dec_if_positive
+#define atomic64_dec_if_positive atomic64_dec_if_positive
static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
+#endif
+#ifdef arch_atomic_dec_and_test
+#define atomic_dec_and_test atomic_dec_and_test
static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic64_dec_and_test
+#define atomic64_dec_and_test atomic64_dec_and_test
static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic_inc_and_test
+#define atomic_inc_and_test atomic_inc_and_test
static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
+#endif
+#ifdef arch_atomic64_inc_and_test
+#define atomic64_inc_and_test atomic64_inc_and_test
static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
+#endif
static __always_inline int atomic_add_return(int i, atomic_t *v)
{
@@ -325,152 +372,96 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
return arch_atomic64_fetch_xor(i, v);
}
+#ifdef arch_atomic_sub_and_test
+#define atomic_sub_and_test atomic_sub_and_test
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic64_sub_and_test
+#define atomic64_sub_and_test atomic64_sub_and_test
static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic_add_negative
+#define atomic_add_negative atomic_add_negative
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
+#endif
+#ifdef arch_atomic64_add_negative
+#define atomic64_add_negative atomic64_add_negative
static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
+#endif
-static __always_inline unsigned long
-cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
+#define xchg(ptr, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, (new)); \
+})
#define cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
- (unsigned long)(new), sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define sync_cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define cmpxchg_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64(ptr, old, new);
-}
-
#define cmpxchg64(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64_local(ptr, old, new);
-}
-
#define cmpxchg64_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, (old), (new)); \
})
-/*
- * Originally we had the following code here:
- * __typeof__(p1) ____p1 = (p1);
- * kasan_check_write(____p1, 2 * sizeof(*____p1));
- * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
- * But it leads to compilation failures (see gcc issue 72873).
- * So for now it's left non-instrumented.
- * There are few callers of cmpxchg_double(), so it's not critical.
- */
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
- arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
-({ \
- arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index abe6dd9ca2a8..13324aa828eb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^)
#include <linux/irqflags.h>
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
@@ -201,35 +196,7 @@ static inline void atomic_sub(int i, atomic_t *v)
atomic_sub_return(i, v);
}
-static inline void atomic_inc(atomic_t *v)
-{
- atomic_add_return(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
- atomic_sub_return(1, v);
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#ifndef __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-#endif
-
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 8d28eb010d0d..97b28b7f1f29 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -11,6 +11,7 @@
*/
#ifndef _ASM_GENERIC_ATOMIC64_H
#define _ASM_GENERIC_ATOMIC64_H
+#include <linux/types.h>
typedef struct {
long long counter;
@@ -50,18 +51,10 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 04deffaf5f7d..dd90c9792909 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,189 +2,67 @@
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
-#include <asm/types.h>
-#include <linux/irqflags.h>
-
-#ifdef CONFIG_SMP
-#include <asm/spinlock.h>
-#include <asm/cache.h> /* we use L1_CACHE_BYTES */
-
-/* Use an array of spinlocks for our atomic_ts.
- * Hash function to index into a different SPINLOCK.
- * Since "a" is usually an address, use one spinlock per cacheline.
- */
-# define ATOMIC_HASH_SIZE 4
-# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-
-extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
-
-/* Can't use raw_spin_lock_irq because of #include problems, so
- * this is the substitute */
-#define _atomic_spin_lock_irqsave(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- local_irq_save(f); \
- arch_spin_lock(s); \
-} while(0)
-
-#define _atomic_spin_unlock_irqrestore(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- arch_spin_unlock(s); \
- local_irq_restore(f); \
-} while(0)
-
-
-#else
-# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
-#endif
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
/*
- * NMI events can occur at any time, including when interrupts have been
- * disabled by *_irqsave(). So you can get NMI events occurring while a
- * *_bit function is holding a spin lock. If the NMI handler also wants
- * to do bit manipulation (and they do) then you can get a deadlock
- * between the original caller of *_bit() and the NMI handler.
- *
- * by Keith Owens
+ * Implementation of atomic bitops using atomic-fetch ops.
+ * See Documentation/atomic_bitops.txt for details.
*/
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void set_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p |= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p &= ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered. It may be
- * reordered on other architectures than x86.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p ^= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It may be reordered on other architectures than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old | mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
- return (old & mask) != 0;
+ old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It can be reorderdered on other architectures other than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old & ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (!(READ_ONCE(*p) & mask))
+ return 0;
- return (old & mask) != 0;
+ old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old ^ mask;
- _atomic_spin_unlock_irqrestore(p, flags);
- return (old & mask) != 0;
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 67ab280ad134..3ae021368f48 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -2,6 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
#define _ASM_GENERIC_BITOPS_LOCK_H_
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
@@ -11,7 +15,20 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
+static inline int test_and_set_bit_lock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
+
+ old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -20,11 +37,11 @@
*
* This operation is atomic and provides release barrier semantics.
*/
-#define clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+}
/**
* __clear_bit_unlock - Clear a bit in memory, for unlock
@@ -37,11 +54,38 @@ do { \
*
* See for example x86's implementation.
*/
-#define __clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void __clear_bit_unlock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ unsigned long old;
-#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+ p += BIT_WORD(nr);
+ old = READ_ONCE(*p);
+ old &= ~BIT_MASK(nr);
+ atomic_long_set_release((atomic_long_t *)p, old);
+}
+
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+#ifndef clear_bit_unlock_is_negative_byte
+static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ return !!(old & BIT(7));
+}
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f59639afaa39..b081794ba135 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
-int pud_free_pmd_page(pud_t *pud);
-int pmd_free_pte_page(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
-static inline int pud_free_pmd_page(pud_t *pud)
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
}
-static inline int pmd_free_pte_page(pmd_t *pmd)
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return 0;
}
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 3063125197ad..e811ef7b8350 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -303,4 +303,14 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_migrate_finish(mm) do {} while (0)
+/*
+ * Used to flush the TLB when page tables are removed, when lazy
+ * TLB mode may cause a CPU to retain intermediate translations
+ * pointing to about-to-be-freed page table memory.
+ */
+#ifndef HAVE_TLB_FLUSH_REMOVE_TABLES
+#define tlb_flush_remove_tables(mm) do {} while (0)
+#define tlb_flush_remove_tables_local(mm) do {} while (0)
+#endif
+
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 01ce3997cb42..1e8e88bdaf09 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,8 @@
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
+#include <linux/types.h>
+
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -36,40 +38,46 @@
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
- * Besides, if an arch has a special barrier for acquire/release, it could
- * implement its own __atomic_op_* and use the same framework for building
- * variants
- *
- * If an architecture overrides __atomic_op_acquire() it will probably want
- * to define smp_mb__after_spinlock().
+ * If an architecture overrides __atomic_acquire_fence() it will probably
+ * want to define smp_mb__after_spinlock().
*/
-#ifndef __atomic_op_acquire
+#ifndef __atomic_acquire_fence
+#define __atomic_acquire_fence smp_mb__after_atomic
+#endif
+
+#ifndef __atomic_release_fence
+#define __atomic_release_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_pre_full_fence
+#define __atomic_pre_full_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_post_full_fence
+#define __atomic_post_full_fence smp_mb__after_atomic
+#endif
+
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_acquire_fence(); \
__ret; \
})
-#endif
-#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
- smp_mb__before_atomic(); \
+ __atomic_release_fence(); \
op##_relaxed(args); \
})
-#endif
-#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
- smp_mb__before_atomic(); \
+ __atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_post_full_fence(); \
__ret; \
})
-#endif
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
@@ -95,11 +103,23 @@
#endif
#endif /* atomic_add_return_relaxed */
+#ifndef atomic_inc
+#define atomic_inc(v) atomic_add(1, (v))
+#endif
+
/* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_relaxed
+
+#ifndef atomic_inc_return
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
+#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
+#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
+#else /* atomic_inc_return */
#define atomic_inc_return_relaxed atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#define atomic_inc_return_release atomic_inc_return
+#endif /* atomic_inc_return */
#else /* atomic_inc_return_relaxed */
@@ -143,11 +163,23 @@
#endif
#endif /* atomic_sub_return_relaxed */
+#ifndef atomic_dec
+#define atomic_dec(v) atomic_sub(1, (v))
+#endif
+
/* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_relaxed
+
+#ifndef atomic_dec_return
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
+#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
+#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
+#else /* atomic_dec_return */
#define atomic_dec_return_relaxed atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#define atomic_dec_return_release atomic_dec_return
+#endif /* atomic_dec_return */
#else /* atomic_dec_return_relaxed */
@@ -328,12 +360,22 @@
#endif
#endif /* atomic_fetch_and_relaxed */
-#ifdef atomic_andnot
-/* atomic_fetch_andnot_relaxed */
+#ifndef atomic_andnot
+#define atomic_andnot(i, v) atomic_and(~(int)(i), (v))
+#endif
+
#ifndef atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
+
+#ifndef atomic_fetch_andnot
+#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v))
+#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v))
+#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v))
+#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v))
+#else /* atomic_fetch_andnot */
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot
+#define atomic_fetch_andnot_release atomic_fetch_andnot
+#endif /* atomic_fetch_andnot */
#else /* atomic_fetch_andnot_relaxed */
@@ -352,7 +394,6 @@
__atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic_fetch_andnot_relaxed */
-#endif /* atomic_andnot */
/* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_relaxed
@@ -520,112 +561,140 @@
#endif /* xchg_relaxed */
/**
+ * atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic_fetch_add_unless
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#endif
+
+/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
*/
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline bool atomic_add_unless(atomic_t *v, int a, int u)
{
- return __atomic_add_unless(v, a, u) != u;
+ return atomic_fetch_add_unless(v, a, u) != u;
}
/**
* atomic_inc_not_zero - increment unless the number is zero
* @v: pointer of type atomic_t
*
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
*/
#ifndef atomic_inc_not_zero
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
-#ifndef atomic_andnot
-static inline void atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_inc_and_test
+static inline bool atomic_inc_and_test(atomic_t *v)
{
- return atomic_fetch_and_relaxed(~i, v);
+ return atomic_inc_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic_dec_and_test
+static inline bool atomic_dec_and_test(atomic_t *v)
{
- return atomic_fetch_and_acquire(~i, v);
+ return atomic_dec_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_sub_and_test
+static inline bool atomic_sub_and_test(int i, atomic_t *v)
{
- return atomic_fetch_and_release(~i, v);
+ return atomic_sub_return(i, v) == 0;
}
#endif
/**
- * atomic_inc_not_zero_hint - increment if not null
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
* @v: pointer of type atomic_t
- * @hint: probable value of the atomic before the increment
- *
- * This version of atomic_inc_not_zero() gives a hint of probable
- * value of the atomic. This helps processor to not read the memory
- * before doing the atomic read/modify/write cycle, lowering
- * number of bus transactions on some arches.
*
- * Returns: 0 if increment was not done, 1 otherwise.
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
*/
-#ifndef atomic_inc_not_zero_hint
-static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+#ifndef atomic_add_negative
+static inline bool atomic_add_negative(int i, atomic_t *v)
{
- int val, c = hint;
-
- /* sanity test, should be removed by compiler if hint is a constant */
- if (!hint)
- return atomic_inc_not_zero(v);
-
- do {
- val = atomic_cmpxchg(v, c, c + 1);
- if (val == c)
- return 1;
- c = val;
- } while (c);
-
- return 0;
+ return atomic_add_return(i, v) < 0;
}
#endif
#ifndef atomic_inc_unless_negative
-static inline int atomic_inc_unless_negative(atomic_t *p)
+static inline bool atomic_inc_unless_negative(atomic_t *v)
{
- int v, v1;
- for (v = 0; v >= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v + 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
}
#endif
#ifndef atomic_dec_unless_positive
-static inline int atomic_dec_unless_positive(atomic_t *p)
+static inline bool atomic_dec_unless_positive(atomic_t *v)
{
- int v, v1;
- for (v = 0; v <= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v - 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
}
#endif
@@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p)
#ifndef atomic_dec_if_positive
static inline int atomic_dec_if_positive(atomic_t *v)
{
- int c, old, dec;
- c = atomic_read(v);
- for (;;) {
+ int dec, c = atomic_read(v);
+
+ do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- old = atomic_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
+ } while (!atomic_try_cmpxchg(v, &c, dec));
+
return dec;
}
#endif
@@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_add_return_relaxed */
+#ifndef atomic64_inc
+#define atomic64_inc(v) atomic64_add(1, (v))
+#endif
+
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
+#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
+#else /* atomic64_inc_return */
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
+#endif /* atomic64_inc_return */
#else /* atomic64_inc_return_relaxed */
@@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_sub_return_relaxed */
+#ifndef atomic64_dec
+#define atomic64_dec(v) atomic64_sub(1, (v))
+#endif
+
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
+#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
+#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
+#else /* atomic64_dec_return */
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
+#endif /* atomic64_dec_return */
#else /* atomic64_dec_return_relaxed */
@@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_fetch_and_relaxed */
-#ifdef atomic64_andnot
-/* atomic64_fetch_andnot_relaxed */
+#ifndef atomic64_andnot
+#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v))
+#endif
+
#ifndef atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+
+#ifndef atomic64_fetch_andnot
+#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v))
+#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v))
+#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v))
+#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v))
+#else /* atomic64_fetch_andnot */
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+#endif /* atomic64_fetch_andnot */
#else /* atomic64_fetch_andnot_relaxed */
@@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
__atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic64_fetch_andnot_relaxed */
-#endif /* atomic64_andnot */
/* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_relaxed
@@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
#endif /* atomic64_try_cmpxchg */
-#ifndef atomic64_andnot
-static inline void atomic64_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic64_fetch_add_unless
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- atomic64_and(~i, v);
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
}
+#endif
-static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- return atomic64_fetch_and(~i, v);
+ return atomic64_fetch_add_unless(v, a, u) != u;
}
-static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+#ifndef atomic64_inc_not_zero
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#endif
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_inc_and_test
+static inline bool atomic64_inc_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_relaxed(~i, v);
+ return atomic64_inc_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic64_dec_and_test
+static inline bool atomic64_dec_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_acquire(~i, v);
+ return atomic64_dec_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_sub_and_test
+static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
+#endif
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#ifndef atomic64_add_negative
+static inline bool atomic64_add_negative(long long i, atomic64_t *v)
{
- return atomic64_fetch_and_release(~i, v);
+ return atomic64_add_return(i, v) < 0;
+}
+#endif
+
+#ifndef atomic64_inc_unless_negative
+static inline bool atomic64_inc_unless_negative(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#endif
+
+#ifndef atomic64_dec_unless_positive
+static inline bool atomic64_dec_unless_positive(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#endif
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic64 variable, v, was not decremented.
+ */
+#ifndef atomic64_dec_if_positive
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long dec, c = atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
}
#endif
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 4cac4e1a72ff..af419012d77d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -2,29 +2,9 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
+#include <linux/bits.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
- (((~0ULL) - (1ULL << (l)) + 1) & \
- (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 7dff1963c185..308918928767 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -194,6 +194,9 @@ extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
+extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index c68acc47da57..df45ee8413d6 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -115,11 +115,6 @@ typedef compat_ulong_t compat_aio_context_t;
struct compat_sel_arg_struct;
struct rusage;
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
struct compat_utimbuf {
compat_time_t actime;
compat_time_t modtime;
@@ -300,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
extern int compat_put_timespec(const struct timespec *, void __user *);
extern int compat_get_timeval(struct timeval *, const void __user *);
extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
struct compat_iovec {
compat_uptr_t iov_base;
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
index 31f2774f1994..e70bfd1d2c3f 100644
--- a/include/linux/compat_time.h
+++ b/include/linux/compat_time.h
@@ -17,7 +17,16 @@ struct compat_timeval {
s32 tv_usec;
};
+struct compat_itimerspec {
+ struct compat_timespec it_interval;
+ struct compat_timespec it_value;
+};
+
extern int compat_get_timespec64(struct timespec64 *, const void __user *);
extern int compat_put_timespec64(const struct timespec64 *, void __user *);
+extern int get_compat_itimerspec64(struct itimerspec64 *its,
+ const struct compat_itimerspec __user *uits);
+extern int put_compat_itimerspec64(const struct itimerspec64 *its,
+ struct compat_itimerspec __user *uits);
#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a97a63eef59f..3233fbe23594 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -30,7 +30,7 @@ struct cpu {
};
extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
extern void cpu_init(void);
extern void trap_init(void);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 8796ba387152..4cf06a64bc02 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -164,6 +164,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+ CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 66c6e17e61e5..d32957b423d5 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -227,7 +227,6 @@ extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
-extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -271,8 +270,6 @@ extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
-extern void dentry_update_name_case(struct dentry *, const struct qstr *);
-
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 10b2654d549b..a0aa00cc909d 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -44,4 +44,12 @@ static inline void arch_sync_dma_for_cpu(struct device *dev,
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(struct device *dev);
+#else
+static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 56add823f190..401e4b254e30 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
void *flush;
} efi_file_handle_t;
+typedef struct {
+ u64 revision;
+ u32 open_volume;
+} efi_file_io_interface_32_t;
+
+typedef struct {
+ u64 revision;
+ u64 open_volume;
+} efi_file_io_interface_64_t;
+
typedef struct _efi_file_io_interface {
u64 revision;
int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
-extern void efi_late_init(void);
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
extern void efi_find_mirror(void);
#else
-static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
extern int efi_tpm_eventlog_init(void);
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/file.h b/include/linux/file.h
index 279720db984a..6b2fb032416c 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -17,9 +17,12 @@ extern void fput(struct file *);
struct file_operations;
struct vfsmount;
struct dentry;
+struct inode;
struct path;
-extern struct file *alloc_file(const struct path *, fmode_t mode,
- const struct file_operations *fop);
+extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_clone(struct file *, int flags,
+ const struct file_operations *);
static inline void fput_light(struct file *file, int fput_needed)
{
@@ -78,7 +81,6 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
-extern void put_filp(struct file *);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 805bf22898cf..1ec33fd0423f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -148,6 +148,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Has write method(s) */
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_OPENED ((__force fmode_t)0x80000)
+#define FMODE_CREATED ((__force fmode_t)0x100000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -275,6 +278,7 @@ struct writeback_control;
/*
* Write life time hint values.
+ * Stored in struct inode as u8.
*/
enum rw_hint {
WRITE_LIFE_NOT_SET = 0,
@@ -609,8 +613,8 @@ struct inode {
struct timespec64 i_ctime;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
- unsigned int i_blkbits;
- enum rw_hint i_write_hint;
+ u8 i_blkbits;
+ u8 i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
@@ -685,6 +689,17 @@ static inline int inode_unhashed(struct inode *inode)
}
/*
+ * __mark_inode_dirty expects inodes to be hashed. Since we don't
+ * want special inodes in the fileset inode space, we make them
+ * appear hashed, but do not put on any lists. hlist_del()
+ * will work fine and require no locking.
+ */
+static inline void inode_fake_hash(struct inode *inode)
+{
+ hlist_add_fake(&inode->i_hash);
+}
+
+/*
* inode->i_mutex nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
@@ -1776,7 +1791,7 @@ struct inode_operations {
int (*update_time)(struct inode *, struct timespec64 *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
+ umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int);
} ____cacheline_aligned;
@@ -2014,6 +2029,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
* and work dirs among overlayfs mounts.
*
+ * I_CREATING New object's inode in the middle of setting up.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2034,7 +2051,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define __I_DIRTY_TIME_EXPIRED 12
#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
+#define I_OVL_INUSE (1 << 14)
+#define I_CREATING (1 << 15)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2420,7 +2438,10 @@ extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
-extern struct file *filp_clone_open(struct file *);
+static inline struct file *file_clone_open(struct file *file)
+{
+ return dentry_open(&file->f_path, file->f_flags, file->f_cred);
+}
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
@@ -2428,13 +2449,8 @@ extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);
-enum {
- FILE_CREATED = 1,
- FILE_OPENED = 2
-};
extern int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened);
+ int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
/* fs/ioctl.c */
@@ -2622,8 +2638,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
-extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
- loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
@@ -2918,6 +2932,7 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
+extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e4647e0eb60..d9ba3fc363b7 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -16,7 +16,7 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask, int opened);
+extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
@@ -34,7 +34,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_file_check(struct file *file, int mask, int opened)
+static inline int ima_file_check(struct file *file, int mask)
{
return 0;
}
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index cbb872c1b607..9d2ea3e907d0 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -73,6 +73,7 @@
#define GICD_TYPER_MBIS (1U << 16)
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
@@ -576,8 +577,8 @@ struct rdists {
phys_addr_t phys_base;
} __percpu *rdist;
struct page *prop_page;
- int id_bits;
u64 flags;
+ u32 gicd_typer;
bool has_vlpis;
bool has_direct_lpi;
};
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9440a2fc8893..e909413e4e38 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -63,7 +63,6 @@ struct pt_regs;
struct kretprobe;
struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
-typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
@@ -101,12 +100,6 @@ struct kprobe {
*/
kprobe_fault_handler_t fault_handler;
- /*
- * ... called if breakpoint trap occurs in probe handler.
- * Return 1 if it handled break, otherwise kernel will see it.
- */
- kprobe_break_handler_t break_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -155,24 +148,6 @@ static inline int kprobe_ftrace(struct kprobe *p)
}
/*
- * Special probe type that uses setjmp-longjmp type tricks to resume
- * execution at a specified entry with a matching prototype corresponding
- * to the probed function - a trick to enable arguments to become
- * accessible seamlessly by probe handling logic.
- * Note:
- * Because of the way compilers allocate stack space for local variables
- * etc upfront, regardless of sub-scopes within a function, this mirroring
- * principle currently works only for probes placed on function entry points.
- */
-struct jprobe {
- struct kprobe kp;
- void *entry; /* probe handling code to jump to */
-};
-
-/* For backward compatibility with old code using JPROBE_ENTRY() */
-#define JPROBE_ENTRY(handler) (handler)
-
-/*
* Function-return probe -
* Note:
* User needs to provide a handler function, and initialize maxactive.
@@ -389,9 +364,6 @@ int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-void jprobe_return(void);
unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
@@ -439,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p)
static inline void unregister_kprobes(struct kprobe **kps, int num)
{
}
-static inline void jprobe_return(void)
-{
-}
static inline int register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
@@ -468,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp)
return -ENOSYS;
}
#endif /* CONFIG_KPROBES */
-static inline int register_jprobe(struct jprobe *p)
-{
- return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
- return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -490,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp)
{
return enable_kprobe(&rp->kp);
}
-static inline int disable_jprobe(struct jprobe *jp)
-{
- return -ENOSYS;
-}
-static inline int enable_jprobe(struct jprobe *jp)
-{
- return -ENOSYS;
-}
#ifndef CONFIG_KPROBES
static inline bool is_kprobe_insn_slot(unsigned long addr)
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 5b9fddbaac41..b2bb44f87f5a 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
#define ktime_to_timeval(kt) ns_to_timeval((kt))
-/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt) (kt)
+/* Convert ktime_t to nanoseconds */
+static inline s64 ktime_to_ns(const ktime_t kt)
+{
+ return kt;
+}
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 8f1131c8dd54..a8ee106b865d 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1569,7 +1569,7 @@ union security_list_options {
int (*file_send_sigiotask)(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive)(struct file *file);
- int (*file_open)(struct file *file, const struct cred *cred);
+ int (*file_open)(struct file *file);
int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
void (*task_free)(struct task_struct *task);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 99ce070e7dcb..efdc24dd9e97 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -335,176 +335,183 @@ struct core_state {
struct kioctx_table;
struct mm_struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ struct {
+ struct vm_area_struct *mmap; /* list of VMAs */
+ struct rb_root mm_rb;
+ u32 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
- unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
#endif
- unsigned long mmap_base; /* base of mmap area */
- unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
- unsigned long mmap_compat_base;
- unsigned long mmap_compat_legacy_base;
+ /* Base adresses for compatible mmap() */
+ unsigned long mmap_compat_base;
+ unsigned long mmap_compat_legacy_base;
#endif
- unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
- pgd_t * pgd;
-
- /**
- * @mm_users: The number of users including userspace.
- *
- * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
- * to 0 (i.e. when the task exits and there are no other temporary
- * reference holders), we also release a reference on @mm_count
- * (which may then free the &struct mm_struct if @mm_count also
- * drops to 0).
- */
- atomic_t mm_users;
-
- /**
- * @mm_count: The number of references to &struct mm_struct
- * (@mm_users count as 1).
- *
- * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
- * &struct mm_struct is freed.
- */
- atomic_t mm_count;
+ unsigned long task_size; /* size of task vm space */
+ unsigned long highest_vm_end; /* highest vma end address */
+ pgd_t * pgd;
+
+ /**
+ * @mm_users: The number of users including userspace.
+ *
+ * Use mmget()/mmget_not_zero()/mmput() to modify. When this
+ * drops to 0 (i.e. when the task exits and there are no other
+ * temporary reference holders), we also release a reference on
+ * @mm_count (which may then free the &struct mm_struct if
+ * @mm_count also drops to 0).
+ */
+ atomic_t mm_users;
+
+ /**
+ * @mm_count: The number of references to &struct mm_struct
+ * (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
+ * &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
#ifdef CONFIG_MMU
- atomic_long_t pgtables_bytes; /* PTE page table pages */
+ atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
- int map_count; /* number of VMAs */
+ int map_count; /* number of VMAs */
- spinlock_t page_table_lock; /* Protects page tables and some counters */
- struct rw_semaphore mmap_sem;
+ spinlock_t page_table_lock; /* Protects page tables and some
+ * counters
+ */
+ struct rw_semaphore mmap_sem;
- struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
+ struct list_head mmlist; /* List of maybe swapped mm's. These
+ * are globally strung together off
+ * init_mm.mmlist, and are protected
+ * by mmlist_lock
+ */
- unsigned long hiwater_rss; /* High-watermark of RSS usage */
- unsigned long hiwater_vm; /* High-water virtual memory usage */
+ unsigned long hiwater_rss; /* High-watermark of RSS usage */
+ unsigned long hiwater_vm; /* High-water virtual memory usage */
- unsigned long total_vm; /* Total pages mapped */
- unsigned long locked_vm; /* Pages that have PG_mlocked set */
- unsigned long pinned_vm; /* Refcount permanently increased */
- unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
- unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
- unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ unsigned long total_vm; /* Total pages mapped */
+ unsigned long locked_vm; /* Pages that have PG_mlocked set */
+ unsigned long pinned_vm; /* Refcount permanently increased */
+ unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+ unsigned long stack_vm; /* VM_STACK */
+ unsigned long def_flags;
- spinlock_t arg_lock; /* protect the below fields */
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
+ spinlock_t arg_lock; /* protect the below fields */
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- /*
- * Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- struct mm_rss_stat rss_stat;
-
- struct linux_binfmt *binfmt;
+ /*
+ * Special counters, in some configurations protected by the
+ * page_table_lock, in other configurations by being atomic.
+ */
+ struct mm_rss_stat rss_stat;
- cpumask_var_t cpu_vm_mask_var;
+ struct linux_binfmt *binfmt;
- /* Architecture-specific MM context */
- mm_context_t context;
+ /* Architecture-specific MM context */
+ mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access the bits */
+ unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */
+ struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_MEMBARRIER
- atomic_t membarrier_state;
+ atomic_t membarrier_state;
#endif
#ifdef CONFIG_AIO
- spinlock_t ioctx_lock;
- struct kioctx_table __rcu *ioctx_table;
+ spinlock_t ioctx_lock;
+ struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
- /*
- * "owner" points to a task that is regarded as the canonical
- * user/owner of this mm. All of the following must be true in
- * order for it to be changed:
- *
- * current == mm->owner
- * current->mm != mm
- * new_owner->mm == mm
- * new_owner->alloc_lock is held
- */
- struct task_struct __rcu *owner;
+ /*
+ * "owner" points to a task that is regarded as the canonical
+ * user/owner of this mm. All of the following must be true in
+ * order for it to be changed:
+ *
+ * current == mm->owner
+ * current->mm != mm
+ * new_owner->mm == mm
+ * new_owner->alloc_lock is held
+ */
+ struct task_struct __rcu *owner;
#endif
- struct user_namespace *user_ns;
+ struct user_namespace *user_ns;
- /* store ref to file /proc/<pid>/exe symlink points to */
- struct file __rcu *exe_file;
+ /* store ref to file /proc/<pid>/exe symlink points to */
+ struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
+ struct mmu_notifier_mm *mmu_notifier_mm;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page_table_lock */
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
- struct cpumask cpumask_allocation;
+ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
- /*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and migrate
- * pages to new nodes if necessary.
- */
- unsigned long numa_next_scan;
+ /*
+ * numa_next_scan is the next time that the PTEs will be marked
+ * pte_numa. NUMA hinting faults will gather statistics and
+ * migrate pages to new nodes if necessary.
+ */
+ unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
- unsigned long numa_scan_offset;
+ /* Restart point for scanning and setting pte_numa */
+ unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
- int numa_scan_seq;
+ /* numa_scan_seq prevents two threads setting pte_numa */
+ int numa_scan_seq;
#endif
- /*
- * An operation with batched TLB flushing is going on. Anything that
- * can move process memory needs to flush the TLB when moving a
- * PROT_NONE or PROT_NUMA mapped page.
- */
- atomic_t tlb_flush_pending;
+ /*
+ * An operation with batched TLB flushing is going on. Anything
+ * that can move process memory needs to flush the TLB when
+ * moving a PROT_NONE or PROT_NUMA mapped page.
+ */
+ atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
- /* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
+ /* See flush_tlb_batched_pending() */
+ bool tlb_flush_batched;
#endif
- struct uprobes_state uprobes_state;
+ struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
- atomic_long_t hugetlb_usage;
+ atomic_long_t hugetlb_usage;
#endif
- struct work_struct async_put_work;
+ struct work_struct async_put_work;
#if IS_ENABLED(CONFIG_HMM)
- /* HMM needs to track a few things per mm */
- struct hmm *hmm;
+ /* HMM needs to track a few things per mm */
+ struct hmm *hmm;
#endif
-} __randomize_layout;
+ } __randomize_layout;
+
+ /*
+ * The mm_cpumask needs to be at the end of mm_struct, because it
+ * is dynamically sized based on nr_cpu_ids.
+ */
+ unsigned long cpu_bitmap[];
+};
extern struct mm_struct init_mm;
+/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- mm->cpu_vm_mask_var = &mm->cpumask_allocation;
-#endif
- cpumask_clear(mm->cpu_vm_mask_var);
+ unsigned long cpu_bitmap = (unsigned long)mm;
+
+ cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return mm->cpu_vm_mask_var;
+ return (struct cpumask *)&mm->cpu_bitmap;
}
struct mmu_gather;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b8d868d23e79..08f9247e9827 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
-#else
+
+extern int lockup_detector_online_cpu(unsigned int cpu);
+extern int lockup_detector_offline_cpu(unsigned int cpu);
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
static inline void touch_softlockup_watchdog_sched(void) { }
static inline void touch_softlockup_watchdog(void) { }
static inline void touch_softlockup_watchdog_sync(void) { }
static inline void touch_all_softlockup_watchdogs(void) { }
-#endif
+
+#define lockup_detector_online_cpu NULL
+#define lockup_detector_offline_cpu NULL
+#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 87f6db437e4a..53c500f0ca79 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -490,7 +490,7 @@ struct perf_addr_filters_head {
};
/**
- * enum perf_event_state - the states of a event
+ * enum perf_event_state - the states of an event:
*/
enum perf_event_state {
PERF_EVENT_STATE_DEAD = -4,
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index c85704fcdbd2..ee7e987ea1b4 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -95,8 +95,8 @@ struct k_itimer {
clockid_t it_clock;
timer_t it_id;
int it_active;
- int it_overrun;
- int it_overrun_last;
+ s64 it_overrun;
+ s64 it_overrun_last;
int it_requeue_pending;
int it_sigev_notify;
ktime_t it_interval;
diff --git a/include/linux/pti.h b/include/linux/pti.h
index 0174883a935a..1a941efcaa62 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -6,6 +6,7 @@
#include <asm/pti.h>
#else
static inline void pti_init(void) { }
+static inline void pti_finalize(void) { }
#endif
#endif
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 36df6ccbc874..4786c2235b98 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
- * the current position.
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_from_rcu() except
+ * this starts after the given position and that one starts at the given
+ * position.
*/
#define list_for_each_entry_continue_rcu(pos, head, member) \
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
*
* Iterate over the tail of a list starting from a given position,
* which must have been in the list when the RCU read lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_continue_rcu() except
+ * this starts from the given position and that one starts from the position
+ * after the given position.
*/
#define list_for_each_entry_from_rcu(pos, head, member) \
for (; &(pos)->member != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 65163aa0bb04..75e5b393cf44 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
-void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);
/*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
} while (0)
/*
- * Note a voluntary context switch for RCU-tasks benefit. This is a
- * macro rather than an inline function to avoid #include hell.
+ * Note a quasi-voluntary context switch for RCU-tasks's benefit.
+ * This is a macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU
-#define rcu_note_voluntary_context_switch_lite(t) \
+#define rcu_tasks_qs(t) \
do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
- rcu_note_voluntary_context_switch_lite(t); \
+ rcu_tasks_qs(t); \
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
-#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
+#define rcu_tasks_qs(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#define call_rcu_tasks call_rcu_sched
#define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
*/
#define cond_resched_tasks_rcu_qs() \
do { \
- if (!cond_resched()) \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_tasks_qs(current); \
+ cond_resched(); \
} while (0)
/*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
* This is simply an identity function, but it documents where a pointer
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
- * kill_dependency(). It could be used as follows:
- * ``
+ * kill_dependency(). It could be used as follows::
+ *
* rcu_read_lock();
* p = rcu_dereference(gp);
* long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
* p = rcu_pointer_handoff(p);
* }
* rcu_read_unlock();
- *``
*/
#define rcu_pointer_handoff(p) (p)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7b3c82e8a625..8d9a0ea8f0b5 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
#define rcu_note_context_switch(preempt) \
do { \
rcu_sched_qs(); \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_tasks_qs(current); \
} while (0)
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a685da2c4522..e28cce21bad6 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -3,9 +3,10 @@
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock_types.h>
+
+struct mutex;
/**
* struct refcount_t - variant of atomic_t specialized for reference counts
@@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
+extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
+extern void refcount_add_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
+extern void refcount_inc_checked(refcount_t *r);
+
+extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
+extern void refcount_dec_checked(refcount_t *r);
+
#ifdef CONFIG_REFCOUNT_FULL
-extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
-extern void refcount_add(unsigned int i, refcount_t *r);
-extern __must_check bool refcount_inc_not_zero(refcount_t *r);
-extern void refcount_inc(refcount_t *r);
+#define refcount_add_not_zero refcount_add_not_zero_checked
+#define refcount_add refcount_add_checked
+
+#define refcount_inc_not_zero refcount_inc_not_zero_checked
+#define refcount_inc refcount_inc_checked
+
+#define refcount_sub_and_test refcount_sub_and_test_checked
-extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
+#define refcount_dec_and_test refcount_dec_and_test_checked
+#define refcount_dec refcount_dec_checked
-extern __must_check bool refcount_dec_and_test(refcount_t *r);
-extern void refcount_dec(refcount_t *r);
#else
# ifdef CONFIG_ARCH_HAS_REFCOUNT
# include <asm/refcount.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 43731fe51c97..dac5086e3815 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -167,8 +167,8 @@ struct task_group;
* need_sleep = false;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * Where wake_up_state() (and all other wakeup primitives) imply enough
- * barriers to order the store of the variable against wakeup.
+ * where wake_up_state() executes a full memory barrier before accessing the
+ * task state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -1017,7 +1017,6 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
- struct list_head numa_entry;
struct numa_group *numa_group;
/*
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1c1a1512ec55..913488d828cb 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-extern __read_mostly unsigned int sysctl_sched_time_avg;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 411b52e424e1..abe28d5cb3f4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -9,17 +9,16 @@
#define LINUX_SCHED_CLOCK
#ifdef CONFIG_GENERIC_SCHED_CLOCK
-extern void sched_clock_postinit(void);
+extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
#else
-static inline void sched_clock_postinit(void) { }
+static inline void generic_sched_clock_init(void) { }
static inline void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
- ;
}
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 63030c85ee19..88d30fc975e7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -309,7 +309,7 @@ void security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
-int security_file_open(struct file *file, const struct cred *cred);
+int security_file_open(struct file *file);
int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -858,8 +858,7 @@ static inline int security_file_receive(struct file *file)
return 0;
}
-static inline int security_file_open(struct file *file,
- const struct cred *cred)
+static inline int security_file_open(struct file *file)
{
return 0;
}
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index c174844cf663..d0884b525001 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -25,8 +25,6 @@ struct smpboot_thread_data;
* parked (cpu offline)
* @unpark: Optional unpark function, called when the thread is
* unparked (cpu online)
- * @cpumask: Internal state. To update which threads are unparked,
- * call smpboot_update_cpumask_percpu_thread().
* @selfparking: Thread is not parked by the park function.
* @thread_comm: The base name of the thread
*/
@@ -40,23 +38,12 @@ struct smp_hotplug_thread {
void (*cleanup)(unsigned int cpu, bool online);
void (*park)(unsigned int cpu);
void (*unpark)(unsigned int cpu);
- cpumask_var_t cpumask;
bool selfparking;
const char *thread_comm;
};
-int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *cpumask);
-
-static inline int
-smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
-{
- return smpboot_register_percpu_thread_cpumask(plug_thread,
- cpu_possible_mask);
-}
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *);
#endif
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index fd57888d4942..3190997df9ca 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -114,29 +114,48 @@ do { \
#endif /*arch_spin_is_contended*/
/*
- * This barrier must provide two things:
+ * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
+ * between program-order earlier lock acquisitions and program-order later
+ * memory accesses.
*
- * - it must guarantee a STORE before the spin_lock() is ordered against a
- * LOAD after it, see the comments at its two usage sites.
+ * This guarantees that the following two properties hold:
*
- * - it must ensure the critical section is RCsc.
+ * 1) Given the snippet:
*
- * The latter is important for cases where we observe values written by other
- * CPUs in spin-loops, without barriers, while being subject to scheduling.
+ * { X = 0; Y = 0; }
*
- * CPU0 CPU1 CPU2
+ * CPU0 CPU1
*
- * for (;;) {
- * if (READ_ONCE(X))
- * break;
- * }
- * X=1
- * <sched-out>
- * <sched-in>
- * r = X;
+ * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
+ * spin_lock(S); smp_mb();
+ * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
+ * r0 = READ_ONCE(Y);
+ * spin_unlock(S);
*
- * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
- * we get migrated and CPU2 sees X==0.
+ * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
+ * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
+ * preceding the call to smp_mb__after_spinlock() in __schedule() and in
+ * try_to_wake_up().
+ *
+ * 2) Given the snippet:
+ *
+ * { X = 0; Y = 0; }
+ *
+ * CPU0 CPU1 CPU2
+ *
+ * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
+ * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
+ * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
+ * WRITE_ONCE(Y, 1);
+ * spin_unlock(S);
+ *
+ * it is forbidden that CPU0's critical section executes before CPU1's
+ * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
+ * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
+ * preceding the calls to smp_rmb() in try_to_wake_up() for similar
+ * snippets but "projected" onto two CPUs.
+ *
+ * Property (2) upgrades the lock to an RCsc lock.
*
* Since most load-store architectures implement ACQUIRE with an smp_mb() after
* the LL/SC loop, they need no further barriers. Similarly all our TSO
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 91494d7e8e41..3e72a291c401 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
return retval;
}
+/* Used by tracing, cannot be traced and cannot invoke lockdep. */
+static inline notrace int
+srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+{
+ int retval;
+
+ retval = __srcu_read_lock(sp);
+ return retval;
+}
+
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__srcu_read_unlock(sp, idx);
}
+/* Used by tracing, cannot be traced and cannot call lockdep. */
+static inline notrace void
+srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+{
+ __srcu_read_unlock(sp, idx);
+}
+
/**
* smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
*
diff --git a/include/linux/swait.h b/include/linux/swait.h
index bf8cb0dee23c..73e06e9986d4 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -16,7 +16,7 @@
* wait-queues, but the semantics are actually completely different, and
* every single user we have ever had has been buggy (or pointless).
*
- * A "swake_up()" only wakes up _one_ waiter, which is not at all what
+ * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
* "wake_up()" does, and has led to problems. In other cases, it has
* been fine, because there's only ever one waiter (kvm), but in that
* case gthe whole "simple" wait-queue is just pointless to begin with,
@@ -38,8 +38,8 @@
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
* sleeper state.
*
- * - the exclusive mode; because this requires preserving the list order
- * and this is hard.
+ * - the !exclusive mode; because that leads to O(n) wakeups, everything is
+ * exclusive.
*
* - custom wake callback functions; because you cannot give any guarantees
* about random code. This also allows swait to be used in RT, such that
@@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
* CPU0 - waker CPU1 - waiter
*
* for (;;) {
- * @cond = true; prepare_to_swait(&wq_head, &wait, state);
+ * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
* smp_mb(); // smp_mb() from set_current_state()
* if (swait_active(wq_head)) if (@cond)
* wake_up(wq_head); break;
@@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
return swait_active(wq);
}
-extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);
-extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
-/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+/* as per ___wait_event() but for swait, therefore "exclusive == 1" */
#define ___swait_event(wq, condition, state, ret, cmd) \
({ \
+ __label__ __out; \
struct swait_queue __wait; \
long __ret = ret; \
\
@@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
- break; \
+ goto __out; \
} \
\
cmd; \
} \
finish_swait(&wq, &__wait); \
- __ret; \
+__out: __ret; \
})
#define __swait_event(wq, condition) \
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
schedule())
-#define swait_event(wq, condition) \
+#define swait_event_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -208,7 +208,7 @@ do { \
TASK_UNINTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_timeout(wq, condition, timeout) \
+#define swait_event_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -220,7 +220,7 @@ do { \
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
schedule())
-#define swait_event_interruptible(wq, condition) \
+#define swait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
@@ -233,7 +233,7 @@ do { \
TASK_INTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
+#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -246,7 +246,7 @@ do { \
(void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
/**
- * swait_event_idle - wait without system load contribution
+ * swait_event_idle_exclusive - wait without system load contribution
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
@@ -257,7 +257,7 @@ do { \
* condition and doesn't want to contribute to system load. Signals are
* ignored.
*/
-#define swait_event_idle(wq, condition) \
+#define swait_event_idle_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -270,7 +270,7 @@ do { \
__ret = schedule_timeout(__ret))
/**
- * swait_event_idle_timeout - wait up to timeout without load contribution
+ * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout at which we'll give up in jiffies
@@ -288,7 +288,7 @@ do { \
* or the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed.
*/
-#define swait_event_idle_timeout(wq, condition, timeout) \
+#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 5c1a0933768e..ebb2f24027e8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -506,9 +506,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
/* fs/timerfd.c */
asmlinkage long sys_timerfd_create(int clockid, int flags);
asmlinkage long sys_timerfd_settime(int ufd, int flags,
- const struct itimerspec __user *utmr,
- struct itimerspec __user *otmr);
-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
+ const struct __kernel_itimerspec __user *utmr,
+ struct __kernel_itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr);
/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
@@ -573,10 +573,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id);
asmlinkage long sys_timer_gettime(timer_t timer_id,
- struct itimerspec __user *setting);
+ struct __kernel_itimerspec __user *setting);
asmlinkage long sys_timer_getoverrun(timer_t timer_id);
asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
- const struct itimerspec __user *new_setting,
+ const struct __kernel_itimerspec __user *new_setting,
struct itimerspec __user *old_setting);
asmlinkage long sys_timer_delete(timer_t timer_id);
asmlinkage long sys_clock_settime(clockid_t which_clock,
diff --git a/include/linux/time.h b/include/linux/time.h
index aed74463592d..27d83fd2ae61 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts,
int put_timespec64(const struct timespec64 *ts,
struct __kernel_timespec __user *uts);
int get_itimerspec64(struct itimerspec64 *it,
- const struct itimerspec __user *uit);
+ const struct __kernel_itimerspec __user *uit);
int put_itimerspec64(const struct itimerspec64 *it,
- struct itimerspec __user *uit);
+ struct __kernel_itimerspec __user *uit);
extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 0a7b2f79cec7..05634afba0db 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,6 +12,7 @@ typedef __u64 timeu64_t;
*/
#ifndef CONFIG_64BIT_TIME
#define __kernel_timespec timespec
+#define __kernel_itimerspec itimerspec
#endif
#include <uapi/linux/time.h>
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 86bc2026efce..e79861418fd7 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -177,7 +177,7 @@ static inline time64_t ktime_get_clocktai_seconds(void)
extern bool timekeeping_rtc_skipsuspend(void);
extern bool timekeeping_rtc_skipresume(void);
-extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
+extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
/*
* struct system_time_snapshot - simultaneous raw/real time capture with
@@ -243,7 +243,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock64(struct timespec64 *ts);
+void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
+ struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
/*
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 66272862070b..61dfd93b6ee4 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -64,6 +64,8 @@ struct torture_random_state {
long trs_count;
};
#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
+ DEFINE_PER_CPU(struct torture_random_state, name)
unsigned long torture_random(struct torture_random_state *trsp);
/* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v);
+bool torture_init_begin(char *ttype, int v);
void torture_init_end(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 9324ac2d9ff2..43913ae79f64 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -64,7 +64,8 @@ struct vsock_sock {
struct list_head pending_links;
struct list_head accept_queue;
bool rejected;
- struct delayed_work dwork;
+ struct delayed_work connect_work;
+ struct delayed_work pending_work;
struct delayed_work close_work;
bool close_work_scheduled;
u32 peer_shutdown;
@@ -77,7 +78,6 @@ struct vsock_sock {
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
diff --git a/include/net/llc.h b/include/net/llc.h
index dc35f25eb679..890a87318014 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
refcount_inc(&sap->refcnt);
}
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+ return refcount_inc_not_zero(&sap->refcnt);
+}
+
void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index d1faf3597b9d..68b17c116907 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -112,8 +112,11 @@ DEFINE_EVENT(filelock_lock, locks_remove_posix,
TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
TP_ARGS(inode, fl, ret));
-DECLARE_EVENT_CLASS(filelock_lease,
+DEFINE_EVENT(filelock_lock, flock_lock_inode,
+ TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
+ TP_ARGS(inode, fl, ret));
+DECLARE_EVENT_CLASS(filelock_lease,
TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5936aac357ab..a8d07feff6a0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
* "cpuqs": CPU passes through a quiescent state.
* "cpuonl": CPU comes online.
* "cpuofl": CPU goes offline.
+ * "cpuofl-bgp": CPU goes offline while blocking a grace period.
* "reqwait": GP kthread sleeps waiting for grace-period request.
* "reqwaitsig": GP kthread awakened by signal from reqwait state.
* "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
*/
TRACE_EVENT(rcu_grace_period,
- TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
- TP_ARGS(rcuname, gpnum, gpevent),
+ TP_ARGS(rcuname, gp_seq, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->gpevent = gpevent;
),
TP_printk("%s %lu %s",
- __entry->rcuname, __entry->gpnum, __entry->gpevent)
+ __entry->rcuname, __entry->gp_seq, __entry->gpevent)
);
/*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
*
* "Startleaf": Request a grace period based on leaf-node data.
* "Prestarted": Someone beat us to the request
- * "Startedleaf": Leaf-node start proved sufficient.
- * "Startedleafroot": Leaf-node start proved sufficient after checking root.
+ * "Startedleaf": Leaf node marked for future GP.
+ * "Startedleafroot": All nodes from leaf to root marked for future GP.
* "Startedroot": Requested a nocb grace period based on root-node data.
* "NoGPkthread": The RCU grace-period kthread has not yet started.
* "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
*/
TRACE_EVENT(rcu_future_grace_period,
- TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
- unsigned long c, u8 level, int grplo, int grphi,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq,
+ unsigned long gp_seq_req, u8 level, int grplo, int grphi,
const char *gpevent),
- TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
+ TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
- __field(unsigned long, completed)
- __field(unsigned long, c)
+ __field(unsigned long, gp_seq)
+ __field(unsigned long, gp_seq_req)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
- __entry->completed = completed;
- __entry->c = c;
+ __entry->gp_seq = gp_seq;
+ __entry->gp_seq_req = gp_seq_req;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %lu %lu %u %d %d %s",
- __entry->rcuname, __entry->gpnum, __entry->completed,
- __entry->c, __entry->level, __entry->grplo, __entry->grphi,
- __entry->gpevent)
+ TP_printk("%s %lu %lu %u %d %d %s",
+ __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->gpevent)
);
/*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
*/
TRACE_EVENT(rcu_grace_period_init,
- TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
int grplo, int grphi, unsigned long qsmask),
- TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+ TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
),
TP_printk("%s %lu %u %d %d %lx",
- __entry->rcuname, __entry->gpnum, __entry->level,
+ __entry->rcuname, __entry->gp_seq, __entry->level,
__entry->grplo, __entry->grphi, __entry->qsmask)
);
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
*/
TRACE_EVENT(rcu_preempt_task,
- TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
+ TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
- TP_ARGS(rcuname, pid, gpnum),
+ TP_ARGS(rcuname, pid, gp_seq),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->pid = pid;
),
TP_printk("%s %lu %d",
- __entry->rcuname, __entry->gpnum, __entry->pid)
+ __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
*/
TRACE_EVENT(rcu_unlock_preempted_task,
- TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
- TP_ARGS(rcuname, gpnum, pid),
+ TP_ARGS(rcuname, gp_seq, pid),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->pid = pid;
),
- TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+ TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
*/
TRACE_EVENT(rcu_quiescent_state_report,
- TP_PROTO(const char *rcuname, unsigned long gpnum,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq,
unsigned long mask, unsigned long qsmask,
u8 level, int grplo, int grphi, int gp_tasks),
- TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+ TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
__field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->mask = mask;
__entry->qsmask = qsmask;
__entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
),
TP_printk("%s %lu %lx>%lx %u %d %d %u",
- __entry->rcuname, __entry->gpnum,
+ __entry->rcuname, __entry->gp_seq,
__entry->mask, __entry->qsmask, __entry->level,
__entry->grplo, __entry->grphi, __entry->gp_tasks)
);
/*
* Tracepoint for quiescent states detected by force_quiescent_state().
- * These trace events include the type of RCU, the grace-period number that
- * was blocked by the CPU, the CPU itself, and the type of quiescent state,
- * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick"
- * when kicking a CPU that has been in dyntick-idle mode for too long, or
- * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr.
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
+ * CPU got a quiescent state via its rcu_qs_ctr.
*/
TRACE_EVENT(rcu_fqs,
- TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
- TP_ARGS(rcuname, gpnum, cpu, qsevent),
+ TP_ARGS(rcuname, gp_seq, cpu, qsevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, cpu)
__field(const char *, qsevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->cpu = cpu;
__entry->qsevent = qsevent;
),
TP_printk("%s %lu %d %s",
- __entry->rcuname, __entry->gpnum,
+ __entry->rcuname, __entry->gp_seq,
__entry->cpu, __entry->qsevent)
);
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
#else /* #ifdef CONFIG_RCU_TRACE */
-#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
+#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
+#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
level, grplo, grphi, event) \
do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
qsmask) do { } while (0)
#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
do { } while (0)
#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
-#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
-#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
grplo, grphi, gp_tasks) do { } \
while (0)
-#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index d4593a6062ef..ce43d340f010 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -38,10 +38,8 @@ enum {
IOCB_CMD_PWRITE = 1,
IOCB_CMD_FSYNC = 2,
IOCB_CMD_FDSYNC = 3,
- /* These two are experimental.
- * IOCB_CMD_PREADX = 4,
- * IOCB_CMD_POLL = 5,
- */
+ /* 4 was the experimental IOCB_CMD_PREADX */
+ IOCB_CMD_POLL = 5,
IOCB_CMD_NOOP = 6,
IOCB_CMD_PREADV = 7,
IOCB_CMD_PWRITEV = 8,
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 4e12c423b9fe..c5358e0ae7c5 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -422,6 +422,8 @@ typedef struct elf64_shdr {
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
+#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
+#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index b6270a3b38e9..b955b986b341 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -949,6 +949,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index fcf936656493..6b56a2208be7 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -49,6 +49,13 @@ struct __kernel_timespec {
};
#endif
+#ifndef __kernel_itimerspec
+struct __kernel_itimerspec {
+ struct __kernel_timespec it_interval; /* timer period */
+ struct __kernel_timespec it_value; /* timer expiration */
+};
+#endif
+
/*
* legacy timeval structure, only embedded in structures that
* traditionally used 'timeval' to pass time intervals (not absolute
diff --git a/init/Kconfig b/init/Kconfig
index 041f3a022122..8b1ab81ecda8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -125,10 +125,13 @@ config HAVE_KERNEL_LZO
config HAVE_KERNEL_LZ4
bool
+config HAVE_KERNEL_UNCOMPRESSED
+ bool
+
choice
prompt "Kernel compression mode"
default KERNEL_GZIP
- depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4
+ depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_UNCOMPRESSED
help
The linux kernel is a kind of self-extracting executable.
Several compression algorithms are available, which differ
@@ -207,6 +210,16 @@ config KERNEL_LZ4
is about 8% bigger than LZO. But the decompression speed is
faster than LZO.
+config KERNEL_UNCOMPRESSED
+ bool "None"
+ depends on HAVE_KERNEL_UNCOMPRESSED
+ help
+ Produce uncompressed kernel image. This option is usually not what
+ you want. It is useful for debugging the kernel in slow simulation
+ environments, where decompressing and moving the kernel is awfully
+ slow. This option allows early boot code to skip the decompressor
+ and jump right at uncompressed kernel image.
+
endchoice
config DEFAULT_HOSTNAME
diff --git a/init/main.c b/init/main.c
index 3b4ada11ed52..38c68b593d0d 100644
--- a/init/main.c
+++ b/init/main.c
@@ -79,7 +79,7 @@
#include <linux/pti.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
-#include <linux/sched_clock.h>
+#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/context_tracking.h>
@@ -561,8 +561,8 @@ asmlinkage __visible void __init start_kernel(void)
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
- boot_cpu_state_init();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+ boot_cpu_hotplug_init();
build_all_zonelists(NULL);
page_alloc_init();
@@ -642,7 +642,6 @@ asmlinkage __visible void __init start_kernel(void)
softirq_init();
timekeeping_init();
time_init();
- sched_clock_postinit();
printk_safe_init();
perf_event_init();
profile_init();
@@ -697,6 +696,7 @@ asmlinkage __visible void __init start_kernel(void)
acpi_early_init();
if (late_time_init)
late_time_init();
+ sched_clock_init();
calibrate_delay();
pid_idr_init();
anon_vma_init();
@@ -1065,6 +1065,13 @@ static int __ref kernel_init(void *unused)
jump_label_invalidate_initmem();
free_initmem();
mark_readonly();
+
+ /*
+ * Kernel mappings are now finalized - update the userspace page-table
+ * to finalize PTI.
+ */
+ pti_finalize();
+
system_state = SYSTEM_RUNNING;
numa_default_policy();
diff --git a/ipc/shm.c b/ipc/shm.c
index fefa00d310fb..d8a38b3be5dd 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1366,15 +1366,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
struct shmid_kernel *shp;
unsigned long addr = (unsigned long)shmaddr;
unsigned long size;
- struct file *file;
+ struct file *file, *base;
int err;
unsigned long flags = MAP_SHARED;
unsigned long prot;
int acc_mode;
struct ipc_namespace *ns;
struct shm_file_data *sfd;
- struct path path;
- fmode_t f_mode;
+ int f_flags;
unsigned long populate = 0;
err = -EINVAL;
@@ -1407,11 +1406,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
if (shmflg & SHM_RDONLY) {
prot = PROT_READ;
acc_mode = S_IRUGO;
- f_mode = FMODE_READ;
+ f_flags = O_RDONLY;
} else {
prot = PROT_READ | PROT_WRITE;
acc_mode = S_IRUGO | S_IWUGO;
- f_mode = FMODE_READ | FMODE_WRITE;
+ f_flags = O_RDWR;
}
if (shmflg & SHM_EXEC) {
prot |= PROT_EXEC;
@@ -1447,46 +1446,44 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
goto out_unlock;
}
- path = shp->shm_file->f_path;
- path_get(&path);
+ /*
+ * We need to take a reference to the real shm file to prevent the
+ * pointer from becoming stale in cases where the lifetime of the outer
+ * file extends beyond that of the shm segment. It's not usually
+ * possible, but it can happen during remap_file_pages() emulation as
+ * that unmaps the memory, then does ->mmap() via file reference only.
+ * We'll deny the ->mmap() if the shm segment was since removed, but to
+ * detect shm ID reuse we need to compare the file pointers.
+ */
+ base = get_file(shp->shm_file);
shp->shm_nattch++;
- size = i_size_read(d_inode(path.dentry));
+ size = i_size_read(file_inode(base));
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
err = -ENOMEM;
sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
if (!sfd) {
- path_put(&path);
+ fput(base);
goto out_nattch;
}
- file = alloc_file(&path, f_mode,
- is_file_hugepages(shp->shm_file) ?
+ file = alloc_file_clone(base, f_flags,
+ is_file_hugepages(base) ?
&shm_file_operations_huge :
&shm_file_operations);
err = PTR_ERR(file);
if (IS_ERR(file)) {
kfree(sfd);
- path_put(&path);
+ fput(base);
goto out_nattch;
}
- file->private_data = sfd;
- file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns);
- /*
- * We need to take a reference to the real shm file to prevent the
- * pointer from becoming stale in cases where the lifetime of the outer
- * file extends beyond that of the shm segment. It's not usually
- * possible, but it can happen during remap_file_pages() emulation as
- * that unmaps the memory, then does ->mmap() via file reference only.
- * We'll deny the ->mmap() if the shm segment was since removed, but to
- * detect shm ID reuse we need to compare the file pointers.
- */
- sfd->file = get_file(shp->shm_file);
+ sfd->file = base;
sfd->vm_ops = NULL;
+ file->private_data = sfd;
err = security_mmap_file(file, prot, flags);
if (err)
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index e0918d180f08..46f5f29605d4 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -69,7 +69,7 @@ struct bpf_cpu_map {
};
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
- struct xdp_bulk_queue *bq);
+ struct xdp_bulk_queue *bq, bool in_napi_ctx);
static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
{
@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
/* No concurrent bq_enqueue can run at this point */
- bq_flush_to_queue(rcpu, bq);
+ bq_flush_to_queue(rcpu, bq, false);
}
free_percpu(rcpu->bulkq);
/* Cannot kthread_stop() here, last put free rcpu resources */
@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
};
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
- struct xdp_bulk_queue *bq)
+ struct xdp_bulk_queue *bq, bool in_napi_ctx)
{
unsigned int processed = 0, drops = 0;
const int to_cpu = rcpu->cpu;
@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
err = __ptr_ring_produce(q, xdpf);
if (err) {
drops++;
- xdp_return_frame_rx_napi(xdpf);
+ if (likely(in_napi_ctx))
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
}
processed++;
}
@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
- bq_flush_to_queue(rcpu, bq);
+ bq_flush_to_queue(rcpu, bq, true);
/* Notice, xdp_buff/page MUST be queued here, long enough for
* driver to code invoking us to finished, due to driver
@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
/* Flush all frames in bulkq to real queue */
bq = this_cpu_ptr(rcpu->bulkq);
- bq_flush_to_queue(rcpu, bq);
+ bq_flush_to_queue(rcpu, bq, true);
/* If already running, costs spin_lock_irqsave + smb_mb */
wake_up_process(rcpu->kthread);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d361fc1e3bf3..750d45edae79 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
}
static int bq_xmit_all(struct bpf_dtab_netdev *obj,
- struct xdp_bulk_queue *bq, u32 flags)
+ struct xdp_bulk_queue *bq, u32 flags,
+ bool in_napi_ctx)
{
struct net_device *dev = obj->dev;
int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ error:
struct xdp_frame *xdpf = bq->q[i];
/* RX path under NAPI protection, can return frames faster */
- xdp_return_frame_rx_napi(xdpf);
+ if (likely(in_napi_ctx))
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
drops++;
}
goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit(bit, bitmap);
bq = this_cpu_ptr(dev->bulkq);
- bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+ bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
}
}
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
- bq_xmit_all(obj, bq, 0);
+ bq_xmit_all(obj, bq, 0, true);
/* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit(dev->bit, bitmap);
bq = per_cpu_ptr(dev->bulkq, cpu);
- bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+ bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
}
}
}
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 98fb7938beea..c4d75c52b4fc 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
while (msg_data_left(msg)) {
- struct sk_msg_buff *m;
+ struct sk_msg_buff *m = NULL;
bool enospc = false;
int copy;
if (sk->sk_err) {
- err = sk->sk_err;
+ err = -sk->sk_err;
goto out_err;
}
@@ -1116,8 +1116,11 @@ wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
err = sk_stream_wait_memory(sk, &timeo);
- if (err)
+ if (err) {
+ if (m && m != psock->cork)
+ free_start_sg(sk, m);
goto out_err;
+ }
}
out_err:
if (err < 0)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a31a1ba0f8ea..b41c6cf2eb88 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -575,7 +575,7 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
{
int refold;
- refold = __atomic_add_unless(&map->refcnt, 1, 0);
+ refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
if (refold >= BPF_MAX_REFCNT) {
__bpf_map_put(map, false);
@@ -1144,7 +1144,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
int refold;
- refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
+ refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
if (refold >= BPF_MAX_REFCNT) {
__bpf_prog_put(prog, false);
diff --git a/kernel/compat.c b/kernel/compat.c
index 702aa846ddac..8e40efc2928a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -324,35 +324,6 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len,
return ret;
}
-/* Todo: Delete these extern declarations when get/put_compat_itimerspec64()
- * are moved to kernel/time/time.c .
- */
-extern int __compat_get_timespec64(struct timespec64 *ts64,
- const struct compat_timespec __user *cts);
-extern int __compat_put_timespec64(const struct timespec64 *ts64,
- struct compat_timespec __user *cts);
-
-int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits)
-{
-
- if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
- __compat_get_timespec64(&its->it_value, &uits->it_value))
- return -EFAULT;
- return 0;
-}
-EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
-
-int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits)
-{
- if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
- __compat_put_timespec64(&its->it_value, &uits->it_value))
- return -EFAULT;
- return 0;
-}
-EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
-
/*
* We currently only need the following fields from the sigevent
* structure: sigev_value, sigev_signo, sig_notify and (sometimes
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0db8938fbb23..dd8634dde1ae 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1274,7 +1274,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
* otherwise a RCU stall occurs.
*/
[CPUHP_TIMERS_PREPARE] = {
- .name = "timers:dead",
+ .name = "timers:prepare",
.startup.single = timers_prepare_cpu,
.teardown.single = timers_dead_cpu,
},
@@ -1344,6 +1344,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.startup.single = perf_event_init_cpu,
.teardown.single = perf_event_exit_cpu,
},
+ [CPUHP_AP_WATCHDOG_ONLINE] = {
+ .name = "lockup_detector:online",
+ .startup.single = lockup_detector_online_cpu,
+ .teardown.single = lockup_detector_offline_cpu,
+ },
[CPUHP_AP_WORKQUEUE_ONLINE] = {
.name = "workqueue:online",
.startup.single = workqueue_online_cpu,
@@ -2010,7 +2015,7 @@ void __init boot_cpu_init(void)
/*
* Must be called _AFTER_ setting up the per_cpu areas
*/
-void __init boot_cpu_state_init(void)
+void __init boot_cpu_hotplug_init(void)
{
per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
}
diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c
index 79e9a757387f..031fe235d958 100644
--- a/kernel/dma/noncoherent.c
+++ b/kernel/dma/noncoherent.c
@@ -49,11 +49,13 @@ static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
+ arch_sync_dma_for_cpu_all(dev);
}
static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
@@ -64,6 +66,7 @@ static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu_all(dev);
}
static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
@@ -89,7 +92,8 @@ const struct dma_map_ops dma_noncoherent_ops = {
.sync_sg_for_device = dma_noncoherent_sync_sg_for_device,
.map_page = dma_noncoherent_map_page,
.map_sg = dma_noncoherent_map_sg,
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
.sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu,
.sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu,
.unmap_page = dma_noncoherent_unmap_page,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eec2d5fb676b..f6ea33a9f904 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1656,7 +1656,7 @@ perf_event_groups_next(struct perf_event *event)
typeof(*event), group_node))
/*
- * Add a event from the lists for its context.
+ * Add an event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
@@ -1844,7 +1844,7 @@ static void perf_group_attach(struct perf_event *event)
}
/*
- * Remove a event from the lists for its context.
+ * Remove an event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
@@ -2148,7 +2148,7 @@ static void __perf_event_disable(struct perf_event *event,
}
/*
- * Disable a event.
+ * Disable an event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
@@ -2677,7 +2677,7 @@ static void __perf_event_enable(struct perf_event *event,
}
/*
- * Enable a event.
+ * Enable an event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
@@ -2755,7 +2755,7 @@ static int __perf_event_stop(void *info)
* events will refuse to restart because of rb::aux_mmap_count==0,
* see comments in perf_aux_output_begin().
*
- * Since this is happening on a event-local CPU, no trace is lost
+ * Since this is happening on an event-local CPU, no trace is lost
* while restarting.
*/
if (sd->restart)
@@ -4827,7 +4827,7 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
int ret;
/*
- * Return end-of-file for a read on a event that is in
+ * Return end-of-file for a read on an event that is in
* error state (i.e. because it was pinned but it couldn't be
* scheduled on to the CPU at some point).
*/
@@ -5273,11 +5273,11 @@ unlock:
}
EXPORT_SYMBOL_GPL(perf_event_update_userpage);
-static int perf_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
{
struct perf_event *event = vmf->vma->vm_file->private_data;
struct ring_buffer *rb;
- int ret = VM_FAULT_SIGBUS;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
if (vmf->pgoff == 0)
@@ -9904,7 +9904,7 @@ enabled:
}
/*
- * Allocate and initialize a event structure
+ * Allocate and initialize an event structure
*/
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
@@ -11235,7 +11235,7 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
}
/*
- * Inherit a event from parent task to child task.
+ * Inherit an event from parent task to child task.
*
* Returns:
* - valid pointer on success
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 6e28d2866be5..b3814fce5ecb 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -345,13 +345,13 @@ void release_bp_slot(struct perf_event *bp)
mutex_unlock(&nr_bp_mutex);
}
-static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
+static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
{
int err;
__release_bp_slot(bp, old_type);
- err = __reserve_bp_slot(bp, bp->attr.bp_type);
+ err = __reserve_bp_slot(bp, new_type);
if (err) {
/*
* Reserve the old_type slot back in case
@@ -367,12 +367,12 @@ static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
return err;
}
-static int modify_bp_slot(struct perf_event *bp, u64 old_type)
+static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
{
int ret;
mutex_lock(&nr_bp_mutex);
- ret = __modify_bp_slot(bp, old_type);
+ ret = __modify_bp_slot(bp, old_type, new_type);
mutex_unlock(&nr_bp_mutex);
return ret;
}
@@ -400,16 +400,18 @@ int dbg_release_bp_slot(struct perf_event *bp)
return 0;
}
-static int validate_hw_breakpoint(struct perf_event *bp)
+static int hw_breakpoint_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- int ret;
+ int err;
- ret = arch_validate_hwbkpt_settings(bp);
- if (ret)
- return ret;
+ err = hw_breakpoint_arch_parse(bp, attr, hw);
+ if (err)
+ return err;
- if (arch_check_bp_in_kernelspace(bp)) {
- if (bp->attr.exclude_kernel)
+ if (arch_check_bp_in_kernelspace(hw)) {
+ if (attr->exclude_kernel)
return -EINVAL;
/*
* Don't let unprivileged users set a breakpoint in the trap
@@ -424,19 +426,22 @@ static int validate_hw_breakpoint(struct perf_event *bp)
int register_perf_hw_breakpoint(struct perf_event *bp)
{
- int ret;
-
- ret = reserve_bp_slot(bp);
- if (ret)
- return ret;
+ struct arch_hw_breakpoint hw;
+ int err;
- ret = validate_hw_breakpoint(bp);
+ err = reserve_bp_slot(bp);
+ if (err)
+ return err;
- /* if arch_validate_hwbkpt_settings() fails then release bp slot */
- if (ret)
+ err = hw_breakpoint_parse(bp, &bp->attr, &hw);
+ if (err) {
release_bp_slot(bp);
+ return err;
+ }
- return ret;
+ bp->hw.info = hw;
+
+ return 0;
}
/**
@@ -456,35 +461,44 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
+static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
+ struct perf_event_attr *from)
+{
+ to->bp_addr = from->bp_addr;
+ to->bp_type = from->bp_type;
+ to->bp_len = from->bp_len;
+ to->disabled = from->disabled;
+}
+
int
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
bool check)
{
- u64 old_addr = bp->attr.bp_addr;
- u64 old_len = bp->attr.bp_len;
- int old_type = bp->attr.bp_type;
- bool modify = attr->bp_type != old_type;
- int err = 0;
+ struct arch_hw_breakpoint hw;
+ int err;
- bp->attr.bp_addr = attr->bp_addr;
- bp->attr.bp_type = attr->bp_type;
- bp->attr.bp_len = attr->bp_len;
+ err = hw_breakpoint_parse(bp, attr, &hw);
+ if (err)
+ return err;
- if (check && memcmp(&bp->attr, attr, sizeof(*attr)))
- return -EINVAL;
+ if (check) {
+ struct perf_event_attr old_attr;
- err = validate_hw_breakpoint(bp);
- if (!err && modify)
- err = modify_bp_slot(bp, old_type);
+ old_attr = bp->attr;
+ hw_breakpoint_copy_attr(&old_attr, attr);
+ if (memcmp(&old_attr, attr, sizeof(*attr)))
+ return -EINVAL;
+ }
- if (err) {
- bp->attr.bp_addr = old_addr;
- bp->attr.bp_type = old_type;
- bp->attr.bp_len = old_len;
- return err;
+ if (bp->attr.bp_type != attr->bp_type) {
+ err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
+ if (err)
+ return err;
}
- bp->attr.disabled = attr->disabled;
+ hw_breakpoint_copy_attr(&bp->attr, attr);
+ bp->hw.info = hw;
+
return 0;
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index ccc579a7d32e..aed1ba569954 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -918,7 +918,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
EXPORT_SYMBOL_GPL(uprobe_register);
/*
- * uprobe_apply - unregister a already registered probe.
+ * uprobe_apply - unregister an already registered probe.
* @inode: the file in which the probe has to be removed.
* @offset: offset from the start of the file.
* @uc: consumer which wants to add more or remove some breakpoints
@@ -947,7 +947,7 @@ int uprobe_apply(struct inode *inode, loff_t offset,
}
/*
- * uprobe_unregister - unregister a already registered probe.
+ * uprobe_unregister - unregister an already registered probe.
* @inode: the file in which the probe has to be removed.
* @offset: offset from the start of the file.
* @uc: identify which probe if multiple probes are colocated.
@@ -1403,7 +1403,7 @@ static struct return_instance *free_ret_instance(struct return_instance *ri)
/*
* Called with no locks held.
- * Called in context of a exiting or a exec-ing thread.
+ * Called in context of an exiting or an exec-ing thread.
*/
void uprobe_free_utask(struct task_struct *t)
{
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index 5349c91c2298..bc80a4e268c0 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -184,9 +184,6 @@ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
if (should_fail(&fei_fault_attr, 1)) {
regs_set_return_value(regs, attr->retval);
override_function_with_return(regs);
- /* Kprobe specific fixup */
- reset_current_kprobe();
- preempt_enable_no_resched();
return 1;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1b27babc4c78..9d8d0e016fc6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2276,6 +2276,8 @@ static void sighand_ctor(void *data)
void __init proc_caches_init(void)
{
+ unsigned int mm_size;
+
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
@@ -2292,15 +2294,16 @@ void __init proc_caches_init(void)
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL);
+
/*
- * FIXME! The "sizeof(struct mm_struct)" currently includes the
- * whole struct cpumask for the OFFSTACK case. We could change
- * this to *only* allocate as much of it as required by the
- * maximum number of CPU's we can ever have. The cpumask_allocation
- * is at the end of the structure, exactly for that reason.
+ * The mm_cpumask is located at the end of mm_struct, and is
+ * dynamically sized based on the maximum CPU number this system
+ * can have, taking hotplug into account (nr_cpu_ids).
*/
+ mm_size = sizeof(struct mm_struct) + cpumask_size();
+
mm_cachep = kmem_cache_create_usercopy("mm_struct",
- sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
+ mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
offsetof(struct mm_struct, saved_auxv),
sizeof_field(struct mm_struct, saved_auxv),
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index c6766f326072..5f3e2baefca9 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -134,7 +134,6 @@ config GENERIC_IRQ_DEBUGFS
endmenu
config GENERIC_IRQ_MULTI_HANDLER
- depends on !MULTI_IRQ_HANDLER
bool
help
Allow to specify the low level IRQ handler at run time.
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index afc7f902d74a..578d0e5f1b5b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -443,6 +443,7 @@ static void free_desc(unsigned int irq)
* We free the descriptor, masks and stat fields via RCU. That
* allows demultiplex interrupts to do rcu based management of
* the child interrupts.
+ * This also allows us to use rcu in kstat_irqs_usr().
*/
call_rcu(&desc->rcu, delayed_free_desc);
}
@@ -928,17 +929,17 @@ unsigned int kstat_irqs(unsigned int irq)
* kstat_irqs_usr - Get the statistics for an interrupt
* @irq: The interrupt number
*
- * Returns the sum of interrupt counts on all cpus since boot for
- * @irq. Contrary to kstat_irqs() this can be called from any
- * preemptible context. It's protected against concurrent removal of
- * an interrupt descriptor when sparse irqs are enabled.
+ * Returns the sum of interrupt counts on all cpus since boot for @irq.
+ * Contrary to kstat_irqs() this can be called from any context.
+ * It uses rcu since a concurrent removal of an interrupt descriptor is
+ * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
*/
unsigned int kstat_irqs_usr(unsigned int irq)
{
unsigned int sum;
- irq_lock_sparse();
+ rcu_read_lock();
sum = kstat_irqs(irq);
- irq_unlock_sparse();
+ rcu_read_unlock();
return sum;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9a8b7ba9aa88..fb86146037a7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -790,9 +790,19 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
static int irq_wait_for_interrupt(struct irqaction *action)
{
- set_current_state(TASK_INTERRUPTIBLE);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
+ if (kthread_should_stop()) {
+ /* may need to run one last time */
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ __set_current_state(TASK_RUNNING);
+ return -1;
+ }
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
@@ -800,10 +810,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return 0;
}
schedule();
- set_current_state(TASK_INTERRUPTIBLE);
}
- __set_current_state(TASK_RUNNING);
- return -1;
}
/*
@@ -1024,11 +1031,8 @@ static int irq_thread(void *data)
/*
* This is the regular exit path. __free_irq() is stopping the
* thread via kthread_stop() after calling
- * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
- * oneshot mask bit can be set. We cannot verify that as we
- * cannot touch the oneshot mask at this point anymore as
- * __setup_irq() might have given out currents thread_mask
- * again.
+ * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
+ * oneshot mask bit can be set.
*/
task_work_cancel(current, irq_thread_dtor);
return 0;
@@ -1251,8 +1255,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* Protects against a concurrent __free_irq() call which might wait
- * for synchronize_irq() to complete without holding the optional
- * chip bus lock and desc->lock.
+ * for synchronize_hardirq() to complete without holding the optional
+ * chip bus lock and desc->lock. Also protects against handing out
+ * a recycled oneshot thread_mask bit while it's still in use by
+ * its previous owner.
*/
mutex_lock(&desc->request_mutex);
@@ -1571,9 +1577,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
- if (!desc)
- return NULL;
-
mutex_lock(&desc->request_mutex);
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1620,11 +1623,11 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
/*
* Drop bus_lock here so the changes which were done in the chip
* callbacks above are synced out to the irq chips which hang
- * behind a slow bus (I2C, SPI) before calling synchronize_irq().
+ * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
*
* Aside of that the bus_lock can also be taken from the threaded
* handler in irq_finalize_oneshot() which results in a deadlock
- * because synchronize_irq() would wait forever for the thread to
+ * because kthread_stop() would wait forever for the thread to
* complete, which is blocked on the bus lock.
*
* The still held desc->request_mutex() protects against a
@@ -1636,7 +1639,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
unregister_handler_proc(irq, action);
/* Make sure it's not being used on another CPU: */
- synchronize_irq(irq);
+ synchronize_hardirq(irq);
#ifdef CONFIG_DEBUG_SHIRQ
/*
@@ -1645,7 +1648,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
* is so by doing an extra call to the handler ....
*
* ( We do this after actually deregistering it, to make sure that a
- * 'real' IRQ doesn't run in * parallel with our fake. )
+ * 'real' IRQ doesn't run in parallel with our fake. )
*/
if (action->flags & IRQF_SHARED) {
local_irq_save(flags);
@@ -1654,6 +1657,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
}
#endif
+ /*
+ * The action has already been removed above, but the thread writes
+ * its oneshot mask bit when it completes. Though request_mutex is
+ * held across this which prevents __setup_irq() from handing out
+ * the same bit to a newly requested action.
+ */
if (action->thread) {
kthread_stop(action->thread);
put_task_struct(action->thread);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 37eda10f5c36..da9addb8d655 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -475,22 +475,24 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- irq_lock_sparse();
+ rcu_read_lock();
desc = irq_to_desc(i);
if (!desc)
goto outsparse;
- raw_spin_lock_irqsave(&desc->lock, flags);
- for_each_online_cpu(j)
- any_count |= kstat_irqs_cpu(i, j);
- action = desc->action;
- if ((!action || irq_desc_is_chained(desc)) && !any_count)
- goto out;
+ if (desc->kstat_irqs)
+ for_each_online_cpu(j)
+ any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
+
+ if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
+ goto outsparse;
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, "%10u ", desc->kstat_irqs ?
+ *per_cpu_ptr(desc->kstat_irqs, j) : 0);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
@@ -511,6 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (desc->name)
seq_printf(p, "-%-8s", desc->name);
+ action = desc->action;
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
@@ -518,10 +521,9 @@ int show_interrupts(struct seq_file *p, void *v)
}
seq_putc(p, '\n');
-out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
outsparse:
- irq_unlock_sparse();
+ rcu_read_unlock();
return 0;
}
#endif
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ea619021d901..ab257be4d924 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -627,8 +627,8 @@ static void optimize_kprobe(struct kprobe *p)
(kprobe_disabled(p) || kprobes_all_disarmed))
return;
- /* Both of break_handler and post_handler are not supported. */
- if (p->break_handler || p->post_handler)
+ /* kprobes with post_handler can not be optimized */
+ if (p->post_handler)
return;
op = container_of(p, struct optimized_kprobe, kp);
@@ -710,9 +710,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
* there is still a relative jump) and disabled.
*/
op = container_of(ap, struct optimized_kprobe, kp);
- if (unlikely(list_empty(&op->list)))
- printk(KERN_WARNING "Warning: found a stray unused "
- "aggrprobe@%p\n", ap->addr);
+ WARN_ON_ONCE(list_empty(&op->list));
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
@@ -985,7 +983,8 @@ static int arm_kprobe_ftrace(struct kprobe *p)
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 0, 0);
if (ret) {
- pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+ pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
+ p->addr, ret);
return ret;
}
@@ -1025,7 +1024,8 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 1, 0);
- WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+ WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
+ p->addr, ret);
return ret;
}
#else /* !CONFIG_KPROBES_ON_FTRACE */
@@ -1116,20 +1116,6 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
}
NOKPROBE_SYMBOL(aggr_fault_handler);
-static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe *cur = __this_cpu_read(kprobe_instance);
- int ret = 0;
-
- if (cur && cur->break_handler) {
- if (cur->break_handler(cur, regs))
- ret = 1;
- }
- reset_kprobe_instance();
- return ret;
-}
-NOKPROBE_SYMBOL(aggr_break_handler);
-
/* Walks the list and increments nmissed count for multiprobe case */
void kprobes_inc_nmissed_count(struct kprobe *p)
{
@@ -1270,24 +1256,15 @@ static void cleanup_rp_inst(struct kretprobe *rp)
}
NOKPROBE_SYMBOL(cleanup_rp_inst);
-/*
-* Add the new probe to ap->list. Fail if this is the
-* second jprobe at the address - two jprobes can't coexist
-*/
+/* Add the new probe to ap->list */
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
- if (p->break_handler || p->post_handler)
+ if (p->post_handler)
unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
- if (p->break_handler) {
- if (ap->break_handler)
- return -EEXIST;
- list_add_tail_rcu(&p->list, &ap->list);
- ap->break_handler = aggr_break_handler;
- } else
- list_add_rcu(&p->list, &ap->list);
+ list_add_rcu(&p->list, &ap->list);
if (p->post_handler && !ap->post_handler)
ap->post_handler = aggr_post_handler;
@@ -1310,8 +1287,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
/* We don't care the kprobe which has gone. */
if (p->post_handler && !kprobe_gone(p))
ap->post_handler = aggr_post_handler;
- if (p->break_handler && !kprobe_gone(p))
- ap->break_handler = aggr_break_handler;
INIT_LIST_HEAD(&ap->list);
INIT_HLIST_NODE(&ap->hlist);
@@ -1706,8 +1681,6 @@ static int __unregister_kprobe_top(struct kprobe *p)
goto disarmed;
else {
/* If disabling probe has special handlers, update aggrprobe */
- if (p->break_handler && !kprobe_gone(p))
- ap->break_handler = NULL;
if (p->post_handler && !kprobe_gone(p)) {
list_for_each_entry_rcu(list_p, &ap->list, list) {
if ((list_p != p) && (list_p->post_handler))
@@ -1812,77 +1785,6 @@ unsigned long __weak arch_deref_entry_point(void *entry)
return (unsigned long)entry;
}
-#if 0
-int register_jprobes(struct jprobe **jps, int num)
-{
- int ret = 0, i;
-
- if (num <= 0)
- return -EINVAL;
-
- for (i = 0; i < num; i++) {
- ret = register_jprobe(jps[i]);
-
- if (ret < 0) {
- if (i > 0)
- unregister_jprobes(jps, i);
- break;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(register_jprobes);
-
-int register_jprobe(struct jprobe *jp)
-{
- unsigned long addr, offset;
- struct kprobe *kp = &jp->kp;
-
- /*
- * Verify probepoint as well as the jprobe handler are
- * valid function entry points.
- */
- addr = arch_deref_entry_point(jp->entry);
-
- if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
- kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
- kp->pre_handler = setjmp_pre_handler;
- kp->break_handler = longjmp_break_handler;
- return register_kprobe(kp);
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(register_jprobe);
-
-void unregister_jprobe(struct jprobe *jp)
-{
- unregister_jprobes(&jp, 1);
-}
-EXPORT_SYMBOL_GPL(unregister_jprobe);
-
-void unregister_jprobes(struct jprobe **jps, int num)
-{
- int i;
-
- if (num <= 0)
- return;
- mutex_lock(&kprobe_mutex);
- for (i = 0; i < num; i++)
- if (__unregister_kprobe_top(&jps[i]->kp) < 0)
- jps[i]->kp.addr = NULL;
- mutex_unlock(&kprobe_mutex);
-
- synchronize_sched();
- for (i = 0; i < num; i++) {
- if (jps[i]->kp.addr)
- __unregister_kprobe_bottom(&jps[i]->kp);
- }
-}
-EXPORT_SYMBOL_GPL(unregister_jprobes);
-#endif
-
#ifdef CONFIG_KRETPROBES
/*
* This kprobe pre_handler is registered with every kretprobe. When probe
@@ -1982,7 +1884,6 @@ int register_kretprobe(struct kretprobe *rp)
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
rp->kp.fault_handler = NULL;
- rp->kp.break_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0) {
@@ -2105,7 +2006,6 @@ static void kill_kprobe(struct kprobe *p)
list_for_each_entry_rcu(kp, &p->list, list)
kp->flags |= KPROBE_FLAG_GONE;
p->post_handler = NULL;
- p->break_handler = NULL;
kill_optimized_kprobe(p);
}
/*
@@ -2169,11 +2069,12 @@ out:
}
EXPORT_SYMBOL_GPL(enable_kprobe);
+/* Caller must NOT call this in usual path. This is only for critical case */
void dump_kprobe(struct kprobe *kp)
{
- printk(KERN_WARNING "Dumping kprobe:\n");
- printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
- kp->symbol_name, kp->addr, kp->offset);
+ pr_err("Dumping kprobe:\n");
+ pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
+ kp->symbol_name, kp->offset, kp->addr);
}
NOKPROBE_SYMBOL(dump_kprobe);
@@ -2196,11 +2097,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
entry = arch_deref_entry_point((void *)*iter);
if (!kernel_text_address(entry) ||
- !kallsyms_lookup_size_offset(entry, &size, &offset)) {
- pr_err("Failed to find blacklist at %p\n",
- (void *)entry);
+ !kallsyms_lookup_size_offset(entry, &size, &offset))
continue;
- }
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
@@ -2326,21 +2224,23 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
const char *sym, int offset, char *modname, struct kprobe *pp)
{
char *kprobe_type;
+ void *addr = p->addr;
if (p->pre_handler == pre_handler_kretprobe)
kprobe_type = "r";
- else if (p->pre_handler == setjmp_pre_handler)
- kprobe_type = "j";
else
kprobe_type = "k";
+ if (!kallsyms_show_value())
+ addr = NULL;
+
if (sym)
- seq_printf(pi, "%p %s %s+0x%x %s ",
- p->addr, kprobe_type, sym, offset,
+ seq_printf(pi, "%px %s %s+0x%x %s ",
+ addr, kprobe_type, sym, offset,
(modname ? modname : " "));
- else
- seq_printf(pi, "%p %s %p ",
- p->addr, kprobe_type, p->addr);
+ else /* try to use %pS */
+ seq_printf(pi, "%px %s %pS ",
+ addr, kprobe_type, p->addr);
if (!pp)
pp = p;
@@ -2428,8 +2328,16 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
struct kprobe_blacklist_entry *ent =
list_entry(v, struct kprobe_blacklist_entry, list);
- seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
- (void *)ent->end_addr, (void *)ent->start_addr);
+ /*
+ * If /proc/kallsyms is not showing kernel address, we won't
+ * show them here either.
+ */
+ if (!kallsyms_show_value())
+ seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
+ (void *)ent->start_addr);
+ else
+ seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
+ (void *)ent->end_addr, (void *)ent->start_addr);
return 0;
}
@@ -2611,7 +2519,7 @@ static int __init debugfs_kprobe_init(void)
if (!dir)
return -ENOMEM;
- file = debugfs_create_file("list", 0444, dir, NULL,
+ file = debugfs_create_file("list", 0400, dir, NULL,
&debugfs_kprobes_operations);
if (!file)
goto error;
@@ -2621,7 +2529,7 @@ static int __init debugfs_kprobe_init(void)
if (!file)
goto error;
- file = debugfs_create_file("blacklist", 0444, dir, NULL,
+ file = debugfs_create_file("blacklist", 0400, dir, NULL,
&debugfs_kprobe_blacklist_ops);
if (!file)
goto error;
@@ -2637,6 +2545,3 @@ late_initcall(debugfs_kprobe_init);
#endif /* CONFIG_DEBUG_FS */
module_init(init_kprobes);
-
-/* defined in arch/.../kernel/kprobes.c */
-EXPORT_SYMBOL_GPL(jprobe_return);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 486dedbd9af5..087d18d771b5 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -190,7 +190,7 @@ static void __kthread_parkme(struct kthread *self)
if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
break;
- complete_all(&self->parked);
+ complete(&self->parked);
schedule();
}
__set_current_state(TASK_RUNNING);
@@ -471,7 +471,6 @@ void kthread_unpark(struct task_struct *k)
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
__kthread_bind(k, kthread->cpu, TASK_PARKED);
- reinit_completion(&kthread->parked);
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
/*
* __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
@@ -499,6 +498,9 @@ int kthread_park(struct task_struct *k)
if (WARN_ON(k->flags & PF_EXITING))
return -ENOSYS;
+ if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
+ return -EBUSY;
+
set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
if (k != current) {
wake_up_process(k);
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 8402b3349dca..57bef4fbfb31 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -21,6 +21,9 @@
* Davidlohr Bueso <dave@stgolabs.net>
* Based on kernel/rcu/torture.c.
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
@@ -57,7 +60,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
torture_param(int, stat_interval, 60,
"Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
"Enable verbose debugging printk()s");
static char *torture_type = "spin_lock";
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 87331565e505..70178f6ffdc4 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -92,7 +92,7 @@ static void s2idle_enter(void)
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
/* Make the current CPU wait so it can enter the idle loop too. */
- swait_event(s2idle_wait_head,
+ swait_event_exclusive(s2idle_wait_head,
s2idle_state == S2IDLE_STATE_WAKE);
cpuidle_pause();
@@ -160,7 +160,7 @@ void s2idle_wake(void)
raw_spin_lock_irqsave(&s2idle_lock, flags);
if (s2idle_state > S2IDLE_STATE_NONE) {
s2idle_state = S2IDLE_STATE_WAKE;
- swake_up(&s2idle_wait_head);
+ swake_up_one(&s2idle_wait_head);
}
raw_spin_unlock_irqrestore(&s2idle_lock, flags);
}
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 40cea6735c2d..4d04683c31b2 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -91,7 +91,17 @@ static inline void rcu_seq_end(unsigned long *sp)
WRITE_ONCE(*sp, rcu_seq_endval(sp));
}
-/* Take a snapshot of the update side's sequence number. */
+/*
+ * rcu_seq_snap - Take a snapshot of the update side's sequence number.
+ *
+ * This function returns the earliest value of the grace-period sequence number
+ * that will indicate that a full grace period has elapsed since the current
+ * time. Once the grace-period sequence number has reached this value, it will
+ * be safe to invoke all callbacks that have been registered prior to the
+ * current time. This value is the current grace-period number plus two to the
+ * power of the number of low-order bits reserved for state, then rounded up to
+ * the next value in which the state bits are all zero.
+ */
static inline unsigned long rcu_seq_snap(unsigned long *sp)
{
unsigned long s;
@@ -108,6 +118,15 @@ static inline unsigned long rcu_seq_current(unsigned long *sp)
}
/*
+ * Given a snapshot from rcu_seq_snap(), determine whether or not the
+ * corresponding update-side operation has started.
+ */
+static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
+{
+ return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
+}
+
+/*
* Given a snapshot from rcu_seq_snap(), determine whether or not a
* full update-side operation has occurred.
*/
@@ -117,6 +136,45 @@ static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
}
/*
+ * Has a grace period completed since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
+{
+ return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
+}
+
+/*
+ * Has a grace period started since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
+{
+ return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
+ new);
+}
+
+/*
+ * Roughly how many full grace periods have elapsed between the collection
+ * of the two specified grace periods?
+ */
+static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
+{
+ unsigned long rnd_diff;
+
+ if (old == new)
+ return 0;
+ /*
+ * Compute the number of grace periods (still shifted up), plus
+ * one if either of new and old is not an exact grace period.
+ */
+ rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
+ ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
+ ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
+ if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
+ return 1; /* Definitely no grace period has elapsed. */
+ return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
+}
+
+/*
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
* by call_rcu() and rcu callback execution, and are therefore not part of the
* RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
@@ -276,6 +334,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
/* Is this rcu_node a leaf? */
#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
+/* Is this rcu_node the last leaf? */
+#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
+
/*
* Do a full breadth-first scan of the rcu_node structures for the
* specified rcu_state structure.
@@ -405,8 +466,7 @@ enum rcutorture_type {
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
- unsigned long *gpnum, unsigned long *completed);
-void rcutorture_record_test_transition(void);
+ unsigned long *gp_seq);
void rcutorture_record_progress(unsigned long vernum);
void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
@@ -415,15 +475,11 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
unsigned long c);
#else
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
- int *flags,
- unsigned long *gpnum,
- unsigned long *completed)
+ int *flags, unsigned long *gp_seq)
{
*flags = 0;
- *gpnum = 0;
- *completed = 0;
+ *gp_seq = 0;
}
-static inline void rcutorture_record_test_transition(void) { }
static inline void rcutorture_record_progress(unsigned long vernum) { }
#ifdef CONFIG_RCU_TRACE
void do_trace_rcu_torture_read(const char *rcutorturename,
@@ -441,31 +497,26 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
struct srcu_struct *sp, int *flags,
- unsigned long *gpnum,
- unsigned long *completed)
+ unsigned long *gp_seq)
{
if (test_type != SRCU_FLAVOR)
return;
*flags = 0;
- *completed = sp->srcu_idx;
- *gpnum = *completed;
+ *gp_seq = sp->srcu_idx;
}
#elif defined(CONFIG_TREE_SRCU)
void srcutorture_get_gp_data(enum rcutorture_type test_type,
struct srcu_struct *sp, int *flags,
- unsigned long *gpnum, unsigned long *completed);
+ unsigned long *gp_seq);
#endif
#ifdef CONFIG_TINY_RCU
-static inline unsigned long rcu_batches_started(void) { return 0; }
-static inline unsigned long rcu_batches_started_bh(void) { return 0; }
-static inline unsigned long rcu_batches_started_sched(void) { return 0; }
-static inline unsigned long rcu_batches_completed(void) { return 0; }
-static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
-static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
+static inline unsigned long rcu_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
static inline unsigned long
@@ -474,19 +525,16 @@ static inline void rcu_force_quiescent_state(void) { }
static inline void rcu_bh_force_quiescent_state(void) { }
static inline void rcu_sched_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { }
+static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
#else /* #ifdef CONFIG_TINY_RCU */
-extern unsigned long rcutorture_testseq;
-extern unsigned long rcutorture_vernum;
-unsigned long rcu_batches_started(void);
-unsigned long rcu_batches_started_bh(void);
-unsigned long rcu_batches_started_sched(void);
-unsigned long rcu_batches_completed(void);
-unsigned long rcu_batches_completed_bh(void);
-unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_get_gp_seq(void);
+unsigned long rcu_bh_get_gp_seq(void);
+unsigned long rcu_sched_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
unsigned long rcu_exp_batches_completed_sched(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void);
+int rcu_get_gp_kthreads_prio(void);
void rcu_force_quiescent_state(void);
void rcu_bh_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void);
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index e232846516b3..34244523550e 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -19,6 +19,9 @@
*
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -88,7 +91,7 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, nwriters, -1, "Number of RCU updater threads");
torture_param(bool, shutdown, !IS_ENABLED(MODULE),
"Shutdown at end of performance tests.");
-torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
static char *perf_type = "rcu";
@@ -135,8 +138,8 @@ struct rcu_perf_ops {
void (*cleanup)(void);
int (*readlock)(void);
void (*readunlock)(int idx);
- unsigned long (*started)(void);
- unsigned long (*completed)(void);
+ unsigned long (*get_gp_seq)(void);
+ unsigned long (*gp_diff)(unsigned long new, unsigned long old);
unsigned long (*exp_completed)(void);
void (*async)(struct rcu_head *head, rcu_callback_t func);
void (*gp_barrier)(void);
@@ -176,8 +179,8 @@ static struct rcu_perf_ops rcu_ops = {
.init = rcu_sync_perf_init,
.readlock = rcu_perf_read_lock,
.readunlock = rcu_perf_read_unlock,
- .started = rcu_batches_started,
- .completed = rcu_batches_completed,
+ .get_gp_seq = rcu_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.exp_completed = rcu_exp_batches_completed,
.async = call_rcu,
.gp_barrier = rcu_barrier,
@@ -206,8 +209,8 @@ static struct rcu_perf_ops rcu_bh_ops = {
.init = rcu_sync_perf_init,
.readlock = rcu_bh_perf_read_lock,
.readunlock = rcu_bh_perf_read_unlock,
- .started = rcu_batches_started_bh,
- .completed = rcu_batches_completed_bh,
+ .get_gp_seq = rcu_bh_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.exp_completed = rcu_exp_batches_completed_sched,
.async = call_rcu_bh,
.gp_barrier = rcu_barrier_bh,
@@ -263,8 +266,8 @@ static struct rcu_perf_ops srcu_ops = {
.init = rcu_sync_perf_init,
.readlock = srcu_perf_read_lock,
.readunlock = srcu_perf_read_unlock,
- .started = NULL,
- .completed = srcu_perf_completed,
+ .get_gp_seq = srcu_perf_completed,
+ .gp_diff = rcu_seq_diff,
.exp_completed = srcu_perf_completed,
.async = srcu_call_rcu,
.gp_barrier = srcu_rcu_barrier,
@@ -292,8 +295,8 @@ static struct rcu_perf_ops srcud_ops = {
.cleanup = srcu_sync_perf_cleanup,
.readlock = srcu_perf_read_lock,
.readunlock = srcu_perf_read_unlock,
- .started = NULL,
- .completed = srcu_perf_completed,
+ .get_gp_seq = srcu_perf_completed,
+ .gp_diff = rcu_seq_diff,
.exp_completed = srcu_perf_completed,
.async = srcu_call_rcu,
.gp_barrier = srcu_rcu_barrier,
@@ -322,8 +325,8 @@ static struct rcu_perf_ops sched_ops = {
.init = rcu_sync_perf_init,
.readlock = sched_perf_read_lock,
.readunlock = sched_perf_read_unlock,
- .started = rcu_batches_started_sched,
- .completed = rcu_batches_completed_sched,
+ .get_gp_seq = rcu_sched_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.exp_completed = rcu_exp_batches_completed_sched,
.async = call_rcu_sched,
.gp_barrier = rcu_barrier_sched,
@@ -350,8 +353,8 @@ static struct rcu_perf_ops tasks_ops = {
.init = rcu_sync_perf_init,
.readlock = tasks_perf_read_lock,
.readunlock = tasks_perf_read_unlock,
- .started = rcu_no_completed,
- .completed = rcu_no_completed,
+ .get_gp_seq = rcu_no_completed,
+ .gp_diff = rcu_seq_diff,
.async = call_rcu_tasks,
.gp_barrier = rcu_barrier_tasks,
.sync = synchronize_rcu_tasks,
@@ -359,9 +362,11 @@ static struct rcu_perf_ops tasks_ops = {
.name = "tasks"
};
-static bool __maybe_unused torturing_tasks(void)
+static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
{
- return cur_ops == &tasks_ops;
+ if (!cur_ops->gp_diff)
+ return new - old;
+ return cur_ops->gp_diff(new, old);
}
/*
@@ -444,8 +449,7 @@ rcu_perf_writer(void *arg)
b_rcu_perf_writer_started =
cur_ops->exp_completed() / 2;
} else {
- b_rcu_perf_writer_started =
- cur_ops->completed();
+ b_rcu_perf_writer_started = cur_ops->get_gp_seq();
}
}
@@ -502,7 +506,7 @@ retry:
cur_ops->exp_completed() / 2;
} else {
b_rcu_perf_writer_finished =
- cur_ops->completed();
+ cur_ops->get_gp_seq();
}
if (shutdown) {
smp_mb(); /* Assign before wake. */
@@ -527,7 +531,7 @@ retry:
return 0;
}
-static inline void
+static void
rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
{
pr_alert("%s" PERF_FLAG
@@ -582,8 +586,8 @@ rcu_perf_cleanup(void)
t_rcu_perf_writer_finished -
t_rcu_perf_writer_started,
ngps,
- b_rcu_perf_writer_finished -
- b_rcu_perf_writer_started);
+ rcuperf_seq_diff(b_rcu_perf_writer_finished,
+ b_rcu_perf_writer_started));
for (i = 0; i < nrealwriters; i++) {
if (!writer_durations)
break;
@@ -671,12 +675,11 @@ rcu_perf_init(void)
break;
}
if (i == ARRAY_SIZE(perf_ops)) {
- pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
- perf_type);
+ pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
pr_alert("rcu-perf types:");
for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
- pr_alert(" %s", perf_ops[i]->name);
- pr_alert("\n");
+ pr_cont(" %s", perf_ops[i]->name);
+ pr_cont("\n");
firsterr = -EINVAL;
goto unwind;
}
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 42fcb7f05fac..c596c6f1e457 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -22,6 +22,9 @@
*
* See also: Documentation/RCU/torture.txt
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -52,6 +55,7 @@
#include <linux/torture.h>
#include <linux/vmalloc.h>
#include <linux/sched/debug.h>
+#include <linux/sched/sysctl.h>
#include "rcu.h"
@@ -59,6 +63,19 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
+/* Bits for ->extendables field, extendables param, and related definitions. */
+#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
+#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
+#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */
+#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */
+#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */
+#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */
+#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */
+#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
+ RCUTORTURE_RDR_PREEMPT)
+#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
+ /* Must be power of two minus one. */
+
torture_param(int, cbflood_inter_holdoff, HZ,
"Holdoff between floods (jiffies)");
torture_param(int, cbflood_intra_holdoff, 1,
@@ -66,6 +83,8 @@ torture_param(int, cbflood_intra_holdoff, 1,
torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
torture_param(int, cbflood_n_per_burst, 20000,
"# callbacks per burst in flood");
+torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
+ "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
torture_param(int, fqs_duration, 0,
"Duration of fqs bursts (us), 0 to disable");
torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
@@ -84,7 +103,7 @@ torture_param(int, object_debug, 0,
"Enable debug-object double call_rcu() testing");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
- "Time between CPU hotplugs (s), 0=disable");
+ "Time between CPU hotplugs (jiffies), 0=disable");
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
@@ -101,7 +120,7 @@ torture_param(int, test_boost_interval, 7,
"Interval between boost tests, seconds.");
torture_param(bool, test_no_idle_hz, true,
"Test support for tickless idle CPUs");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
"Enable verbose debugging printk()s");
static char *torture_type = "rcu";
@@ -148,9 +167,9 @@ static long n_rcu_torture_boost_ktrerror;
static long n_rcu_torture_boost_rterror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
-static long n_rcu_torture_timers;
+static atomic_long_t n_rcu_torture_timers;
static long n_barrier_attempts;
-static long n_barrier_successes;
+static long n_barrier_successes; /* did rcu_barrier test succeed? */
static atomic_long_t n_cbfloods;
static struct list_head rcu_torture_removed;
@@ -261,8 +280,8 @@ struct rcu_torture_ops {
int (*readlock)(void);
void (*read_delay)(struct torture_random_state *rrsp);
void (*readunlock)(int idx);
- unsigned long (*started)(void);
- unsigned long (*completed)(void);
+ unsigned long (*get_gp_seq)(void);
+ unsigned long (*gp_diff)(unsigned long new, unsigned long old);
void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
void (*exp_sync)(void);
@@ -274,6 +293,8 @@ struct rcu_torture_ops {
void (*stats)(void);
int irq_capable;
int can_boost;
+ int extendables;
+ int ext_irq_conflict;
const char *name;
};
@@ -302,10 +323,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
* force_quiescent_state. */
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
- started = cur_ops->completed();
+ started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
mdelay(longdelay_ms);
- completed = cur_ops->completed();
+ completed = cur_ops->get_gp_seq();
do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
started, completed);
}
@@ -397,8 +418,8 @@ static struct rcu_torture_ops rcu_ops = {
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
- .started = rcu_batches_started,
- .completed = rcu_batches_completed,
+ .get_gp_seq = rcu_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu,
.exp_sync = synchronize_rcu_expedited,
@@ -439,8 +460,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
- .started = rcu_batches_started_bh,
- .completed = rcu_batches_completed_bh,
+ .get_gp_seq = rcu_bh_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_bh_torture_deferred_free,
.sync = synchronize_rcu_bh,
.exp_sync = synchronize_rcu_bh_expedited,
@@ -449,6 +470,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
+ .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
+ .ext_irq_conflict = RCUTORTURE_RDR_RCU,
.name = "rcu_bh"
};
@@ -483,8 +506,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock,
- .started = rcu_no_completed,
- .completed = rcu_no_completed,
+ .get_gp_seq = rcu_no_completed,
.deferred_free = rcu_busted_torture_deferred_free,
.sync = synchronize_rcu_busted,
.exp_sync = synchronize_rcu_busted,
@@ -572,8 +594,7 @@ static struct rcu_torture_ops srcu_ops = {
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
- .started = NULL,
- .completed = srcu_torture_completed,
+ .get_gp_seq = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
@@ -610,8 +631,7 @@ static struct rcu_torture_ops srcud_ops = {
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
- .started = NULL,
- .completed = srcu_torture_completed,
+ .get_gp_seq = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
@@ -622,6 +642,26 @@ static struct rcu_torture_ops srcud_ops = {
.name = "srcud"
};
+/* As above, but broken due to inappropriate reader extension. */
+static struct rcu_torture_ops busted_srcud_ops = {
+ .ttype = SRCU_FLAVOR,
+ .init = srcu_torture_init,
+ .cleanup = srcu_torture_cleanup,
+ .readlock = srcu_torture_read_lock,
+ .read_delay = rcu_read_delay,
+ .readunlock = srcu_torture_read_unlock,
+ .get_gp_seq = srcu_torture_completed,
+ .deferred_free = srcu_torture_deferred_free,
+ .sync = srcu_torture_synchronize,
+ .exp_sync = srcu_torture_synchronize_expedited,
+ .call = srcu_torture_call,
+ .cb_barrier = srcu_torture_barrier,
+ .stats = srcu_torture_stats,
+ .irq_capable = 1,
+ .extendables = RCUTORTURE_MAX_EXTEND,
+ .name = "busted_srcud"
+};
+
/*
* Definitions for sched torture testing.
*/
@@ -648,8 +688,8 @@ static struct rcu_torture_ops sched_ops = {
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
- .started = rcu_batches_started_sched,
- .completed = rcu_batches_completed_sched,
+ .get_gp_seq = rcu_sched_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_sched_torture_deferred_free,
.sync = synchronize_sched,
.exp_sync = synchronize_sched_expedited,
@@ -660,6 +700,7 @@ static struct rcu_torture_ops sched_ops = {
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
+ .extendables = RCUTORTURE_MAX_EXTEND,
.name = "sched"
};
@@ -687,8 +728,7 @@ static struct rcu_torture_ops tasks_ops = {
.readlock = tasks_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = tasks_torture_read_unlock,
- .started = rcu_no_completed,
- .completed = rcu_no_completed,
+ .get_gp_seq = rcu_no_completed,
.deferred_free = rcu_tasks_torture_deferred_free,
.sync = synchronize_rcu_tasks,
.exp_sync = synchronize_rcu_tasks,
@@ -700,6 +740,13 @@ static struct rcu_torture_ops tasks_ops = {
.name = "tasks"
};
+static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
+{
+ if (!cur_ops->gp_diff)
+ return new - old;
+ return cur_ops->gp_diff(new, old);
+}
+
static bool __maybe_unused torturing_tasks(void)
{
return cur_ops == &tasks_ops;
@@ -726,6 +773,44 @@ static void rcu_torture_boost_cb(struct rcu_head *head)
smp_store_release(&rbip->inflight, 0);
}
+static int old_rt_runtime = -1;
+
+static void rcu_torture_disable_rt_throttle(void)
+{
+ /*
+ * Disable RT throttling so that rcutorture's boost threads don't get
+ * throttled. Only possible if rcutorture is built-in otherwise the
+ * user should manually do this by setting the sched_rt_period_us and
+ * sched_rt_runtime sysctls.
+ */
+ if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
+ return;
+
+ old_rt_runtime = sysctl_sched_rt_runtime;
+ sysctl_sched_rt_runtime = -1;
+}
+
+static void rcu_torture_enable_rt_throttle(void)
+{
+ if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
+ return;
+
+ sysctl_sched_rt_runtime = old_rt_runtime;
+ old_rt_runtime = -1;
+}
+
+static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
+{
+ if (end - start > test_boost_duration * HZ - HZ / 2) {
+ VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
+ n_rcu_torture_boost_failure++;
+
+ return true; /* failed */
+ }
+
+ return false; /* passed */
+}
+
static int rcu_torture_boost(void *arg)
{
unsigned long call_rcu_time;
@@ -746,6 +831,21 @@ static int rcu_torture_boost(void *arg)
init_rcu_head_on_stack(&rbi.rcu);
/* Each pass through the following loop does one boost-test cycle. */
do {
+ /* Track if the test failed already in this test interval? */
+ bool failed = false;
+
+ /* Increment n_rcu_torture_boosts once per boost-test */
+ while (!kthread_should_stop()) {
+ if (mutex_trylock(&boost_mutex)) {
+ n_rcu_torture_boosts++;
+ mutex_unlock(&boost_mutex);
+ break;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+ if (kthread_should_stop())
+ goto checkwait;
+
/* Wait for the next test interval. */
oldstarttime = boost_starttime;
while (ULONG_CMP_LT(jiffies, oldstarttime)) {
@@ -764,11 +864,10 @@ static int rcu_torture_boost(void *arg)
/* RCU core before ->inflight = 1. */
smp_store_release(&rbi.inflight, 1);
call_rcu(&rbi.rcu, rcu_torture_boost_cb);
- if (jiffies - call_rcu_time >
- test_boost_duration * HZ - HZ / 2) {
- VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
- n_rcu_torture_boost_failure++;
- }
+ /* Check if the boost test failed */
+ failed = failed ||
+ rcu_torture_boost_failed(call_rcu_time,
+ jiffies);
call_rcu_time = jiffies;
}
stutter_wait("rcu_torture_boost");
@@ -777,6 +876,14 @@ static int rcu_torture_boost(void *arg)
}
/*
+ * If boost never happened, then inflight will always be 1, in
+ * this case the boost check would never happen in the above
+ * loop so do another one here.
+ */
+ if (!failed && smp_load_acquire(&rbi.inflight))
+ rcu_torture_boost_failed(call_rcu_time, jiffies);
+
+ /*
* Set the start time of the next test interval.
* Yes, this is vulnerable to long delays, but such
* delays simply cause a false negative for the next
@@ -788,7 +895,6 @@ static int rcu_torture_boost(void *arg)
if (mutex_trylock(&boost_mutex)) {
boost_starttime = jiffies +
test_boost_interval * HZ;
- n_rcu_torture_boosts++;
mutex_unlock(&boost_mutex);
break;
}
@@ -1010,7 +1116,7 @@ rcu_torture_writer(void *arg)
break;
}
}
- rcutorture_record_progress(++rcu_torture_current_version);
+ rcu_torture_current_version++;
/* Cycle through nesting levels of rcu_expedite_gp() calls. */
if (can_expedite &&
!(torture_random(&rand) & 0xff & (!!expediting - 1))) {
@@ -1084,27 +1190,133 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp)
}
/*
- * RCU torture reader from timer handler. Dereferences rcu_torture_current,
- * incrementing the corresponding element of the pipeline array. The
- * counter in the element should never be greater than 1, otherwise, the
- * RCU implementation is broken.
+ * Do one extension of an RCU read-side critical section using the
+ * current reader state in readstate (set to zero for initial entry
+ * to extended critical section), set the new state as specified by
+ * newstate (set to zero for final exit from extended critical section),
+ * and random-number-generator state in trsp. If this is neither the
+ * beginning or end of the critical section and if there was actually a
+ * change, do a ->read_delay().
*/
-static void rcu_torture_timer(struct timer_list *unused)
+static void rcutorture_one_extend(int *readstate, int newstate,
+ struct torture_random_state *trsp)
+{
+ int idxnew = -1;
+ int idxold = *readstate;
+ int statesnew = ~*readstate & newstate;
+ int statesold = *readstate & ~newstate;
+
+ WARN_ON_ONCE(idxold < 0);
+ WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
+
+ /* First, put new protection in place to avoid critical-section gap. */
+ if (statesnew & RCUTORTURE_RDR_BH)
+ local_bh_disable();
+ if (statesnew & RCUTORTURE_RDR_IRQ)
+ local_irq_disable();
+ if (statesnew & RCUTORTURE_RDR_PREEMPT)
+ preempt_disable();
+ if (statesnew & RCUTORTURE_RDR_RCU)
+ idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
+
+ /* Next, remove old protection, irq first due to bh conflict. */
+ if (statesold & RCUTORTURE_RDR_IRQ)
+ local_irq_enable();
+ if (statesold & RCUTORTURE_RDR_BH)
+ local_bh_enable();
+ if (statesold & RCUTORTURE_RDR_PREEMPT)
+ preempt_enable();
+ if (statesold & RCUTORTURE_RDR_RCU)
+ cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
+
+ /* Delay if neither beginning nor end and there was a change. */
+ if ((statesnew || statesold) && *readstate && newstate)
+ cur_ops->read_delay(trsp);
+
+ /* Update the reader state. */
+ if (idxnew == -1)
+ idxnew = idxold & ~RCUTORTURE_RDR_MASK;
+ WARN_ON_ONCE(idxnew < 0);
+ WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
+ *readstate = idxnew | newstate;
+ WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
+ WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
+}
+
+/* Return the biggest extendables mask given current RCU and boot parameters. */
+static int rcutorture_extend_mask_max(void)
+{
+ int mask;
+
+ WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
+ mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
+ mask = mask | RCUTORTURE_RDR_RCU;
+ return mask;
+}
+
+/* Return a random protection state mask, but with at least one bit set. */
+static int
+rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+{
+ int mask = rcutorture_extend_mask_max();
+ unsigned long randmask1 = torture_random(trsp) >> 8;
+ unsigned long randmask2 = randmask1 >> 1;
+
+ WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
+ /* Half the time lots of bits, half the time only one bit. */
+ if (randmask1 & 0x1)
+ mask = mask & randmask2;
+ else
+ mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+ if ((mask & RCUTORTURE_RDR_IRQ) &&
+ !(mask & RCUTORTURE_RDR_BH) &&
+ (oldmask & RCUTORTURE_RDR_BH))
+ mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
+ if ((mask & RCUTORTURE_RDR_IRQ) &&
+ !(mask & cur_ops->ext_irq_conflict) &&
+ (oldmask & cur_ops->ext_irq_conflict))
+ mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
+ return mask ?: RCUTORTURE_RDR_RCU;
+}
+
+/*
+ * Do a randomly selected number of extensions of an existing RCU read-side
+ * critical section.
+ */
+static void rcutorture_loop_extend(int *readstate,
+ struct torture_random_state *trsp)
+{
+ int i;
+ int mask = rcutorture_extend_mask_max();
+
+ WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
+ if (!((mask - 1) & mask))
+ return; /* Current RCU flavor not extendable. */
+ i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
+ while (i--) {
+ mask = rcutorture_extend_mask(*readstate, trsp);
+ rcutorture_one_extend(readstate, mask, trsp);
+ }
+}
+
+/*
+ * Do one read-side critical section, returning false if there was
+ * no data to read. Can be invoked both from process context and
+ * from a timer handler.
+ */
+static bool rcu_torture_one_read(struct torture_random_state *trsp)
{
- int idx;
unsigned long started;
unsigned long completed;
- static DEFINE_TORTURE_RANDOM(rand);
- static DEFINE_SPINLOCK(rand_lock);
+ int newstate;
struct rcu_torture *p;
int pipe_count;
+ int readstate = 0;
unsigned long long ts;
- idx = cur_ops->readlock();
- if (cur_ops->started)
- started = cur_ops->started();
- else
- started = cur_ops->completed();
+ newstate = rcutorture_extend_mask(readstate, trsp);
+ rcutorture_one_extend(&readstate, newstate, trsp);
+ started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current,
rcu_read_lock_bh_held() ||
@@ -1112,39 +1324,50 @@ static void rcu_torture_timer(struct timer_list *unused)
srcu_read_lock_held(srcu_ctlp) ||
torturing_tasks());
if (p == NULL) {
- /* Leave because rcu_torture_writer is not yet underway */
- cur_ops->readunlock(idx);
- return;
+ /* Wait for rcu_torture_writer to get underway */
+ rcutorture_one_extend(&readstate, 0, trsp);
+ return false;
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
- spin_lock(&rand_lock);
- cur_ops->read_delay(&rand);
- n_rcu_torture_timers++;
- spin_unlock(&rand_lock);
+ rcutorture_loop_extend(&readstate, trsp);
preempt_disable();
pipe_count = p->rtort_pipe_count;
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- completed = cur_ops->completed();
+ completed = cur_ops->get_gp_seq();
if (pipe_count > 1) {
- do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
- started, completed);
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
+ ts, started, completed);
rcu_ftrace_dump(DUMP_ALL);
}
__this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed - started;
- if (cur_ops->started)
- completed++;
+ completed = rcutorture_seq_diff(completed, started);
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
__this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
- cur_ops->readunlock(idx);
+ rcutorture_one_extend(&readstate, 0, trsp);
+ WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
+ return true;
+}
+
+static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
+
+/*
+ * RCU torture reader from timer handler. Dereferences rcu_torture_current,
+ * incrementing the corresponding element of the pipeline array. The
+ * counter in the element should never be greater than 1, otherwise, the
+ * RCU implementation is broken.
+ */
+static void rcu_torture_timer(struct timer_list *unused)
+{
+ atomic_long_inc(&n_rcu_torture_timers);
+ (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
/* Test call_rcu() invocation from interrupt handler. */
if (cur_ops->call) {
@@ -1164,14 +1387,8 @@ static void rcu_torture_timer(struct timer_list *unused)
static int
rcu_torture_reader(void *arg)
{
- unsigned long started;
- unsigned long completed;
- int idx;
DEFINE_TORTURE_RANDOM(rand);
- struct rcu_torture *p;
- int pipe_count;
struct timer_list t;
- unsigned long long ts;
VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
set_user_nice(current, MAX_NICE);
@@ -1183,49 +1400,8 @@ rcu_torture_reader(void *arg)
if (!timer_pending(&t))
mod_timer(&t, jiffies + 1);
}
- idx = cur_ops->readlock();
- if (cur_ops->started)
- started = cur_ops->started();
- else
- started = cur_ops->completed();
- ts = rcu_trace_clock_local();
- p = rcu_dereference_check(rcu_torture_current,
- rcu_read_lock_bh_held() ||
- rcu_read_lock_sched_held() ||
- srcu_read_lock_held(srcu_ctlp) ||
- torturing_tasks());
- if (p == NULL) {
- /* Wait for rcu_torture_writer to get underway */
- cur_ops->readunlock(idx);
+ if (!rcu_torture_one_read(&rand))
schedule_timeout_interruptible(HZ);
- continue;
- }
- if (p->rtort_mbtest == 0)
- atomic_inc(&n_rcu_torture_mberror);
- cur_ops->read_delay(&rand);
- preempt_disable();
- pipe_count = p->rtort_pipe_count;
- if (pipe_count > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- pipe_count = RCU_TORTURE_PIPE_LEN;
- }
- completed = cur_ops->completed();
- if (pipe_count > 1) {
- do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
- ts, started, completed);
- rcu_ftrace_dump(DUMP_ALL);
- }
- __this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed - started;
- if (cur_ops->started)
- completed++;
- if (completed > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- completed = RCU_TORTURE_PIPE_LEN;
- }
- __this_cpu_inc(rcu_torture_batch[completed]);
- preempt_enable();
- cur_ops->readunlock(idx);
stutter_wait("rcu_torture_reader");
} while (!torture_must_stop());
if (irqreader && cur_ops->irq_capable) {
@@ -1282,7 +1458,7 @@ rcu_torture_stats_print(void)
pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
- n_rcu_torture_timers);
+ atomic_long_read(&n_rcu_torture_timers));
torture_onoff_stats();
pr_cont("barrier: %ld/%ld:%ld ",
n_barrier_successes,
@@ -1324,18 +1500,16 @@ rcu_torture_stats_print(void)
if (rtcv_snap == rcu_torture_current_version &&
rcu_torture_current != NULL) {
int __maybe_unused flags = 0;
- unsigned long __maybe_unused gpnum = 0;
- unsigned long __maybe_unused completed = 0;
+ unsigned long __maybe_unused gp_seq = 0;
rcutorture_get_gp_data(cur_ops->ttype,
- &flags, &gpnum, &completed);
+ &flags, &gp_seq);
srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
- &flags, &gpnum, &completed);
+ &flags, &gp_seq);
wtp = READ_ONCE(writer_task);
- pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx cpu %d\n",
+ pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
rcu_torture_writer_state_getname(),
- rcu_torture_writer_state,
- gpnum, completed, flags,
+ rcu_torture_writer_state, gp_seq, flags,
wtp == NULL ? ~0UL : wtp->state,
wtp == NULL ? -1 : (int)task_cpu(wtp));
if (!splatted && wtp) {
@@ -1365,7 +1539,7 @@ rcu_torture_stats(void *arg)
return 0;
}
-static inline void
+static void
rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
{
pr_alert("%s" TORTURE_FLAG
@@ -1397,6 +1571,7 @@ static int rcutorture_booster_cleanup(unsigned int cpu)
mutex_lock(&boost_mutex);
t = boost_tasks[cpu];
boost_tasks[cpu] = NULL;
+ rcu_torture_enable_rt_throttle();
mutex_unlock(&boost_mutex);
/* This must be outside of the mutex, otherwise deadlock! */
@@ -1413,6 +1588,7 @@ static int rcutorture_booster_init(unsigned int cpu)
/* Don't allow time recalculation while creating a new task. */
mutex_lock(&boost_mutex);
+ rcu_torture_disable_rt_throttle();
VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
cpu_to_node(cpu),
@@ -1446,7 +1622,7 @@ static int rcu_torture_stall(void *args)
VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
}
if (!kthread_should_stop()) {
- stop_at = get_seconds() + stall_cpu;
+ stop_at = ktime_get_seconds() + stall_cpu;
/* RCU CPU stall is expected behavior in following code. */
rcu_read_lock();
if (stall_cpu_irqsoff)
@@ -1455,7 +1631,8 @@ static int rcu_torture_stall(void *args)
preempt_disable();
pr_alert("rcu_torture_stall start on CPU %d.\n",
smp_processor_id());
- while (ULONG_CMP_LT(get_seconds(), stop_at))
+ while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
+ stop_at))
continue; /* Induce RCU CPU stall warning. */
if (stall_cpu_irqsoff)
local_irq_enable();
@@ -1546,8 +1723,9 @@ static int rcu_torture_barrier(void *arg)
atomic_read(&barrier_cbs_invoked),
n_barrier_cbs);
WARN_ON_ONCE(1);
+ } else {
+ n_barrier_successes++;
}
- n_barrier_successes++;
schedule_timeout_interruptible(HZ / 10);
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_barrier");
@@ -1610,17 +1788,39 @@ static void rcu_torture_barrier_cleanup(void)
}
}
+static bool rcu_torture_can_boost(void)
+{
+ static int boost_warn_once;
+ int prio;
+
+ if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
+ return false;
+
+ prio = rcu_get_gp_kthreads_prio();
+ if (!prio)
+ return false;
+
+ if (prio < 2) {
+ if (boost_warn_once == 1)
+ return false;
+
+ pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
+ boost_warn_once = 1;
+ return false;
+ }
+
+ return true;
+}
+
static enum cpuhp_state rcutor_hp;
static void
rcu_torture_cleanup(void)
{
int flags = 0;
- unsigned long gpnum = 0;
- unsigned long completed = 0;
+ unsigned long gp_seq = 0;
int i;
- rcutorture_record_test_transition();
if (torture_cleanup_begin()) {
if (cur_ops->cb_barrier != NULL)
cur_ops->cb_barrier();
@@ -1648,17 +1848,15 @@ rcu_torture_cleanup(void)
fakewriter_tasks = NULL;
}
- rcutorture_get_gp_data(cur_ops->ttype, &flags, &gpnum, &completed);
- srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
- &flags, &gpnum, &completed);
- pr_alert("%s: End-test grace-period state: g%lu c%lu f%#x\n",
- cur_ops->name, gpnum, completed, flags);
+ rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
+ srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
+ pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
+ cur_ops->name, gp_seq, flags);
torture_stop_kthread(rcu_torture_stats, stats_task);
torture_stop_kthread(rcu_torture_fqs, fqs_task);
for (i = 0; i < ncbflooders; i++)
torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
- if ((test_boost == 1 && cur_ops->can_boost) ||
- test_boost == 2)
+ if (rcu_torture_can_boost())
cpuhp_remove_state(rcutor_hp);
/*
@@ -1746,7 +1944,7 @@ rcu_torture_init(void)
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] = {
&rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
- &sched_ops, &tasks_ops,
+ &busted_srcud_ops, &sched_ops, &tasks_ops,
};
if (!torture_init_begin(torture_type, verbose))
@@ -1763,8 +1961,8 @@ rcu_torture_init(void)
torture_type);
pr_alert("rcu-torture types:");
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
- pr_alert(" %s", torture_ops[i]->name);
- pr_alert("\n");
+ pr_cont(" %s", torture_ops[i]->name);
+ pr_cont("\n");
firsterr = -EINVAL;
goto unwind;
}
@@ -1882,8 +2080,7 @@ rcu_torture_init(void)
test_boost_interval = 1;
if (test_boost_duration < 2)
test_boost_duration = 2;
- if ((test_boost == 1 && cur_ops->can_boost) ||
- test_boost == 2) {
+ if (rcu_torture_can_boost()) {
boost_starttime = jiffies + test_boost_interval * HZ;
@@ -1897,7 +2094,7 @@ rcu_torture_init(void)
firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
if (firsterr)
goto unwind;
- firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
+ firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
if (firsterr)
goto unwind;
firsterr = rcu_torture_stall_init();
@@ -1926,7 +2123,6 @@ rcu_torture_init(void)
goto unwind;
}
}
- rcutorture_record_test_transition();
torture_init_end();
return 0;
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 622792abe41a..04fc2ed71af8 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -110,7 +110,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
if (!newval && READ_ONCE(sp->srcu_gp_waiting))
- swake_up(&sp->srcu_wq);
+ swake_up_one(&sp->srcu_wq);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@@ -140,7 +140,7 @@ void srcu_drive_gp(struct work_struct *wp)
idx = sp->srcu_idx;
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
- swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
+ swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
/* Invoke the callbacks we removed above. */
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index b4123d7a2cec..6c9866a854b1 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) "rcu: " fmt
+
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
@@ -390,7 +392,8 @@ void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
}
if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
WARN_ON(srcu_readers_active(sp))) {
- pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+ pr_info("%s: Active srcu_struct %p state: %d\n",
+ __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
return; /* Caller forgot to stop doing call_srcu()? */
}
free_percpu(sp->sda);
@@ -641,6 +644,9 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
* period s. Losers must either ensure that their desired grace-period
* number is recorded on at least their leaf srcu_node structure, or they
* must take steps to invoke their own callbacks.
+ *
+ * Note that this function also does the work of srcu_funnel_exp_start(),
+ * in some cases by directly invoking it.
*/
static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
unsigned long s, bool do_norm)
@@ -823,17 +829,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
* more than one CPU, this means that when "func()" is invoked, each CPU
* is guaranteed to have executed a full memory barrier since the end of
* its last corresponding SRCU read-side critical section whose beginning
- * preceded the call to call_rcu(). It also means that each CPU executing
+ * preceded the call to call_srcu(). It also means that each CPU executing
* an SRCU read-side critical section that continues beyond the start of
- * "func()" must have executed a memory barrier after the call_rcu()
+ * "func()" must have executed a memory barrier after the call_srcu()
* but before the beginning of that SRCU read-side critical section.
* Note that these guarantees include CPUs that are offline, idle, or
* executing in user mode, as well as CPUs that are executing in the kernel.
*
- * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
* resulting SRCU callback function "func()", then both CPU A and CPU
* B are guaranteed to execute a full memory barrier during the time
- * interval between the call to call_rcu() and the invocation of "func()".
+ * interval between the call to call_srcu() and the invocation of "func()".
* This guarantee applies even if CPU A and CPU B are the same CPU (but
* again only if the system has more than one CPU).
*
@@ -1246,13 +1252,12 @@ static void process_srcu(struct work_struct *work)
void srcutorture_get_gp_data(enum rcutorture_type test_type,
struct srcu_struct *sp, int *flags,
- unsigned long *gpnum, unsigned long *completed)
+ unsigned long *gp_seq)
{
if (test_type != SRCU_FLAVOR)
return;
*flags = 0;
- *completed = rcu_seq_ctr(sp->srcu_gp_seq);
- *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
+ *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
}
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
@@ -1263,16 +1268,17 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
unsigned long s0 = 0, s1 = 0;
idx = sp->srcu_idx & 0x1;
- pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
+ pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
+ tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
for_each_possible_cpu(cpu) {
unsigned long l0, l1;
unsigned long u0, u1;
long c0, c1;
- struct srcu_data *counts;
+ struct srcu_data *sdp;
- counts = per_cpu_ptr(sp->sda, cpu);
- u0 = counts->srcu_unlock_count[!idx];
- u1 = counts->srcu_unlock_count[idx];
+ sdp = per_cpu_ptr(sp->sda, cpu);
+ u0 = sdp->srcu_unlock_count[!idx];
+ u1 = sdp->srcu_unlock_count[idx];
/*
* Make sure that a lock is always counted if the corresponding
@@ -1280,12 +1286,13 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
*/
smp_rmb();
- l0 = counts->srcu_lock_count[!idx];
- l1 = counts->srcu_lock_count[idx];
+ l0 = sdp->srcu_lock_count[!idx];
+ l1 = sdp->srcu_lock_count[idx];
c0 = l0 - u0;
c1 = l1 - u1;
- pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
+ pr_cont(" %d(%ld,%ld %1p)",
+ cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
s0 += c0;
s1 += c1;
}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index a64eee0db39e..befc9321a89c 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -122,10 +122,8 @@ void rcu_check_callbacks(int user)
{
if (user)
rcu_sched_qs();
- else if (!in_softirq())
+ if (user || !in_softirq())
rcu_bh_qs();
- if (user)
- rcu_note_voluntary_context_switch(current);
}
/*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index aa7cade1b9f3..0b760c1369f7 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -27,6 +27,9 @@
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
*/
+
+#define pr_fmt(fmt) "rcu: " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -95,13 +98,13 @@ struct rcu_state sname##_state = { \
.rda = &sname##_data, \
.call = cr, \
.gp_state = RCU_GP_IDLE, \
- .gpnum = 0UL - 300UL, \
- .completed = 0UL - 300UL, \
+ .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
.name = RCU_STATE_NAME(sname), \
.abbr = sabbr, \
.exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
.exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
+ .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
}
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -155,6 +158,9 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
*/
static int rcu_scheduler_fully_active __read_mostly;
+static void
+rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
+ struct rcu_node *rnp, unsigned long gps, unsigned long flags);
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -177,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);
+/* Retreive RCU kthreads priority for rcutorture */
+int rcu_get_gp_kthreads_prio(void)
+{
+ return kthread_prio;
+}
+EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
+
/*
* Number of grace periods between delays, normalized by the duration of
* the delay. The longer the delay, the more the grace periods between
@@ -189,18 +202,6 @@ module_param(gp_cleanup_delay, int, 0444);
#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
/*
- * Track the rcutorture test sequence number and the update version
- * number within a given test. The rcutorture_testseq is incremented
- * on every rcutorture module load and unload, so has an odd value
- * when a test is running. The rcutorture_vernum is set to zero
- * when rcutorture starts and is incremented on each rcutorture update.
- * These variables enable correlating rcutorture output with the
- * RCU tracing information.
- */
-unsigned long rcutorture_testseq;
-unsigned long rcutorture_vernum;
-
-/*
* Compute the mask of online CPUs for the specified rcu_node structure.
* This will not be stable unless the rcu_node structure's ->lock is
* held, but the bit corresponding to the current CPU will be stable
@@ -218,7 +219,7 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
*/
static int rcu_gp_in_progress(struct rcu_state *rsp)
{
- return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
+ return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
}
/*
@@ -233,7 +234,7 @@ void rcu_sched_qs(void)
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
return;
trace_rcu_grace_period(TPS("rcu_sched"),
- __this_cpu_read(rcu_sched_data.gpnum),
+ __this_cpu_read(rcu_sched_data.gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
@@ -248,7 +249,7 @@ void rcu_bh_qs(void)
RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
trace_rcu_grace_period(TPS("rcu_bh"),
- __this_cpu_read(rcu_bh_data.gpnum),
+ __this_cpu_read(rcu_bh_data.gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
@@ -380,20 +381,6 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
}
/*
- * Do a double-increment of the ->dynticks counter to emulate a
- * momentary idle-CPU quiescent state.
- */
-static void rcu_dynticks_momentary_idle(void)
-{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
- &rdtp->dynticks);
-
- /* It is illegal to call this from idle state. */
- WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
-}
-
-/*
* Set the special (bottom) bit of the specified CPU so that it
* will take special action (such as flushing its TLB) on the
* next exit from an extended quiescent state. Returns true if
@@ -424,12 +411,17 @@ bool rcu_eqs_special_set(int cpu)
*
* We inform the RCU core by emulating a zero-duration dyntick-idle period.
*
- * The caller must have disabled interrupts.
+ * The caller must have disabled interrupts and must not be idle.
*/
static void rcu_momentary_dyntick_idle(void)
{
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ int special;
+
raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
- rcu_dynticks_momentary_idle();
+ special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+ /* It is illegal to call this from idle state. */
+ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
}
/*
@@ -451,7 +443,7 @@ void rcu_note_context_switch(bool preempt)
rcu_momentary_dyntick_idle();
this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
if (!preempt)
- rcu_note_voluntary_context_switch_lite(current);
+ rcu_tasks_qs(current);
out:
trace_rcu_utilization(TPS("End context switch"));
barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -513,8 +505,38 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
static ulong jiffies_till_next_fqs = ULONG_MAX;
static bool rcu_kick_kthreads;
-module_param(jiffies_till_first_fqs, ulong, 0644);
-module_param(jiffies_till_next_fqs, ulong, 0644);
+static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+ ulong j;
+ int ret = kstrtoul(val, 0, &j);
+
+ if (!ret)
+ WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
+ return ret;
+}
+
+static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+ ulong j;
+ int ret = kstrtoul(val, 0, &j);
+
+ if (!ret)
+ WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
+ return ret;
+}
+
+static struct kernel_param_ops first_fqs_jiffies_ops = {
+ .set = param_set_first_fqs_jiffies,
+ .get = param_get_ulong,
+};
+
+static struct kernel_param_ops next_fqs_jiffies_ops = {
+ .set = param_set_next_fqs_jiffies,
+ .get = param_get_ulong,
+};
+
+module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
+module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
module_param(rcu_kick_kthreads, bool, 0644);
/*
@@ -529,58 +551,31 @@ static void force_quiescent_state(struct rcu_state *rsp);
static int rcu_pending(void);
/*
- * Return the number of RCU batches started thus far for debug & stats.
+ * Return the number of RCU GPs completed thus far for debug & stats.
*/
-unsigned long rcu_batches_started(void)
+unsigned long rcu_get_gp_seq(void)
{
- return rcu_state_p->gpnum;
+ return READ_ONCE(rcu_state_p->gp_seq);
}
-EXPORT_SYMBOL_GPL(rcu_batches_started);
+EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
/*
- * Return the number of RCU-sched batches started thus far for debug & stats.
+ * Return the number of RCU-sched GPs completed thus far for debug & stats.
*/
-unsigned long rcu_batches_started_sched(void)
+unsigned long rcu_sched_get_gp_seq(void)
{
- return rcu_sched_state.gpnum;
+ return READ_ONCE(rcu_sched_state.gp_seq);
}
-EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
/*
- * Return the number of RCU BH batches started thus far for debug & stats.
+ * Return the number of RCU-bh GPs completed thus far for debug & stats.
*/
-unsigned long rcu_batches_started_bh(void)
+unsigned long rcu_bh_get_gp_seq(void)
{
- return rcu_bh_state.gpnum;
+ return READ_ONCE(rcu_bh_state.gp_seq);
}
-EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
-
-/*
- * Return the number of RCU batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed(void)
-{
- return rcu_state_p->completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
- * Return the number of RCU-sched batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_sched(void)
-{
- return rcu_sched_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-
-/*
- * Return the number of RCU BH batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_bh(void)
-{
- return rcu_bh_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
/*
* Return the number of RCU expedited batches completed thus far for
@@ -636,35 +631,42 @@ EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
*/
void show_rcu_gp_kthreads(void)
{
+ int cpu;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
struct rcu_state *rsp;
for_each_rcu_flavor(rsp) {
pr_info("%s: wait state: %d ->state: %#lx\n",
rsp->name, rsp->gp_state, rsp->gp_kthread->state);
+ rcu_for_each_node_breadth_first(rsp, rnp) {
+ if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
+ continue;
+ pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
+ rnp->grplo, rnp->grphi, rnp->gp_seq,
+ rnp->gp_seq_needed);
+ if (!rcu_is_leaf_node(rnp))
+ continue;
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ if (rdp->gpwrap ||
+ ULONG_CMP_GE(rsp->gp_seq,
+ rdp->gp_seq_needed))
+ continue;
+ pr_info("\tcpu %d ->gp_seq_needed %lu\n",
+ cpu, rdp->gp_seq_needed);
+ }
+ }
/* sched_show_task(rsp->gp_kthread); */
}
}
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
/*
- * Record the number of times rcutorture tests have been initiated and
- * terminated. This information allows the debugfs tracing stats to be
- * correlated to the rcutorture messages, even when the rcutorture module
- * is being repeatedly loaded and unloaded. In other words, we cannot
- * store this state in rcutorture itself.
- */
-void rcutorture_record_test_transition(void)
-{
- rcutorture_testseq++;
- rcutorture_vernum = 0;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
-
-/*
* Send along grace-period-related data for rcutorture diagnostics.
*/
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
- unsigned long *gpnum, unsigned long *completed)
+ unsigned long *gp_seq)
{
struct rcu_state *rsp = NULL;
@@ -684,23 +686,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
if (rsp == NULL)
return;
*flags = READ_ONCE(rsp->gp_flags);
- *gpnum = READ_ONCE(rsp->gpnum);
- *completed = READ_ONCE(rsp->completed);
+ *gp_seq = rcu_seq_current(&rsp->gp_seq);
}
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
/*
- * Record the number of writer passes through the current rcutorture test.
- * This is also used to correlate debugfs tracing stats with the rcutorture
- * messages.
- */
-void rcutorture_record_progress(unsigned long vernum)
-{
- rcutorture_vernum++;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_progress);
-
-/*
* Return the root node of the specified rcu_state structure.
*/
static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
@@ -1059,41 +1049,41 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
- * Is the current CPU online? Disable preemption to avoid false positives
- * that could otherwise happen due to the current CPU number being sampled,
- * this task being preempted, its old CPU being taken offline, resuming
- * on some other CPU, then determining that its old CPU is now offline.
- * It is OK to use RCU on an offline processor during initial boot, hence
- * the check for rcu_scheduler_fully_active. Note also that it is OK
- * for a CPU coming online to use RCU for one jiffy prior to marking itself
- * online in the cpu_online_mask. Similarly, it is OK for a CPU going
- * offline to continue to use RCU for one jiffy after marking itself
- * offline in the cpu_online_mask. This leniency is necessary given the
- * non-atomic nature of the online and offline processing, for example,
- * the fact that a CPU enters the scheduler after completing the teardown
- * of the CPU.
+ * Is the current CPU online as far as RCU is concerned?
*
- * This is also why RCU internally marks CPUs online during in the
- * preparation phase and offline after the CPU has been taken down.
+ * Disable preemption to avoid false positives that could otherwise
+ * happen due to the current CPU number being sampled, this task being
+ * preempted, its old CPU being taken offline, resuming on some other CPU,
+ * then determining that its old CPU is now offline. Because there are
+ * multiple flavors of RCU, and because this function can be called in the
+ * midst of updating the flavors while a given CPU coming online or going
+ * offline, it is necessary to check all flavors. If any of the flavors
+ * believe that given CPU is online, it is considered to be online.
*
- * Disable checking if in an NMI handler because we cannot safely report
- * errors from NMI handlers anyway.
+ * Disable checking if in an NMI handler because we cannot safely
+ * report errors from NMI handlers anyway. In addition, it is OK to use
+ * RCU on an offline processor during initial boot, hence the check for
+ * rcu_scheduler_fully_active.
*/
bool rcu_lockdep_current_cpu_online(void)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
- bool ret;
+ struct rcu_state *rsp;
- if (in_nmi())
+ if (in_nmi() || !rcu_scheduler_fully_active)
return true;
preempt_disable();
- rdp = this_cpu_ptr(&rcu_sched_data);
- rnp = rdp->mynode;
- ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
- !rcu_scheduler_fully_active;
+ for_each_rcu_flavor(rsp) {
+ rdp = this_cpu_ptr(rsp->rda);
+ rnp = rdp->mynode;
+ if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
+ preempt_enable();
+ return true;
+ }
+ }
preempt_enable();
- return ret;
+ return false;
}
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
@@ -1115,17 +1105,18 @@ static int rcu_is_cpu_rrupt_from_idle(void)
/*
* We are reporting a quiescent state on behalf of some other CPU, so
* it is our responsibility to check for and handle potential overflow
- * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
+ * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
* After all, the CPU might be in deep idle state, and thus executing no
* code whatsoever.
*/
static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
{
raw_lockdep_assert_held_rcu_node(rnp);
- if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
+ if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
+ rnp->gp_seq))
WRITE_ONCE(rdp->gpwrap, true);
- if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
- rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
+ if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
+ rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
}
/*
@@ -1137,7 +1128,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+ trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp);
return 1;
}
@@ -1159,7 +1150,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp);
if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
- rdp->rcu_iw_gpnum = rnp->gpnum;
+ rdp->rcu_iw_gp_seq = rnp->gp_seq;
rdp->rcu_iw_pending = false;
}
raw_spin_unlock_rcu_node(rnp);
@@ -1187,7 +1178,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* of the current RCU grace period.
*/
if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+ trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rdp->dynticks_fqs++;
rcu_gpnum_ovf(rnp, rdp);
return 1;
@@ -1203,8 +1194,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
- READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
+ rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
+ trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
rcu_gpnum_ovf(rnp, rdp);
return 1;
} else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
@@ -1212,12 +1203,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
smp_store_release(ruqp, true);
}
- /* Check for the CPU being offline. */
- if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
- rdp->offline_fqs++;
- rcu_gpnum_ovf(rnp, rdp);
- return 1;
+ /* If waiting too long on an offline CPU, complain. */
+ if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
+ time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+ bool onl;
+ struct rcu_node *rnp1;
+
+ WARN_ON(1); /* Offline CPUs are supposed to report QS! */
+ pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+ __func__, rnp->grplo, rnp->grphi, rnp->level,
+ (long)rnp->gp_seq, (long)rnp->completedqs);
+ for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+ pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
+ __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
+ onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+ pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
+ __func__, rdp->cpu, ".o"[onl],
+ (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+ (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+ return 1; /* Break things loose after complaining. */
}
/*
@@ -1256,11 +1260,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
resched_cpu(rdp->cpu);
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
- !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
+ !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
rdp->rcu_iw_pending = true;
- rdp->rcu_iw_gpnum = rnp->gpnum;
+ rdp->rcu_iw_gp_seq = rnp->gp_seq;
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
}
}
@@ -1274,9 +1278,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
unsigned long j1;
rsp->gp_start = j;
- smp_wmb(); /* Record start time before stall time. */
j1 = rcu_jiffies_till_stall_check();
- WRITE_ONCE(rsp->jiffies_stall, j + j1);
+ /* Record ->gp_start before ->jiffies_stall. */
+ smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
rsp->jiffies_resched = j + j1 / 2;
rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
}
@@ -1302,9 +1306,9 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
j = jiffies;
gpa = READ_ONCE(rsp->gp_activity);
if (j - gpa > 2 * HZ) {
- pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
+ pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
rsp->name, j - gpa,
- rsp->gpnum, rsp->completed,
+ (long)rcu_seq_current(&rsp->gp_seq),
rsp->gp_flags,
gp_state_getname(rsp->gp_state), rsp->gp_state,
rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
@@ -1359,16 +1363,15 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
}
}
-static inline void panic_on_rcu_stall(void)
+static void panic_on_rcu_stall(void)
{
if (sysctl_panic_on_rcu_stall)
panic("RCU Stall\n");
}
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
+static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
{
int cpu;
- long delta;
unsigned long flags;
unsigned long gpa;
unsigned long j;
@@ -1381,25 +1384,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
if (rcu_cpu_stall_suppress)
return;
- /* Only let one CPU complain about others per time interval. */
-
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- delta = jiffies - READ_ONCE(rsp->jiffies_stall);
- if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- return;
- }
- WRITE_ONCE(rsp->jiffies_stall,
- jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
/*
* OK, time to rat on our buddy...
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
- pr_err("INFO: %s detected stalls on CPUs/tasks:",
- rsp->name);
+ pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
print_cpu_stall_info_begin();
rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1418,17 +1408,16 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
cpu)->cblist);
- pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
+ pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start),
- (long)rsp->gpnum, (long)rsp->completed, totqlen);
+ (long)rcu_seq_current(&rsp->gp_seq), totqlen);
if (ndetected) {
rcu_dump_cpu_stacks(rsp);
/* Complain about tasks blocking the grace period. */
rcu_print_detail_task_stall(rsp);
} else {
- if (READ_ONCE(rsp->gpnum) != gpnum ||
- READ_ONCE(rsp->completed) == gpnum) {
+ if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
pr_err("INFO: Stall ended before state dump start\n");
} else {
j = jiffies;
@@ -1441,6 +1430,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
sched_show_task(current);
}
}
+ /* Rewrite if needed in case of slow consoles. */
+ if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
+ WRITE_ONCE(rsp->jiffies_stall,
+ jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
rcu_check_gp_kthread_starvation(rsp);
@@ -1476,15 +1469,16 @@ static void print_cpu_stall(struct rcu_state *rsp)
for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
cpu)->cblist);
- pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
+ pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
jiffies - rsp->gp_start,
- (long)rsp->gpnum, (long)rsp->completed, totqlen);
+ (long)rcu_seq_current(&rsp->gp_seq), totqlen);
rcu_check_gp_kthread_starvation(rsp);
rcu_dump_cpu_stacks(rsp);
raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ /* Rewrite if needed in case of slow consoles. */
if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
WRITE_ONCE(rsp->jiffies_stall,
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
@@ -1504,10 +1498,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
- unsigned long completed;
- unsigned long gpnum;
+ unsigned long gs1;
+ unsigned long gs2;
unsigned long gps;
unsigned long j;
+ unsigned long jn;
unsigned long js;
struct rcu_node *rnp;
@@ -1520,43 +1515,46 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Lots of memory barriers to reject false positives.
*
- * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
- * then rsp->gp_start, and finally rsp->completed. These values
- * are updated in the opposite order with memory barriers (or
- * equivalent) during grace-period initialization and cleanup.
- * Now, a false positive can occur if we get an new value of
- * rsp->gp_start and a old value of rsp->jiffies_stall. But given
- * the memory barriers, the only way that this can happen is if one
- * grace period ends and another starts between these two fetches.
- * Detect this by comparing rsp->completed with the previous fetch
- * from rsp->gpnum.
+ * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
+ * then rsp->gp_start, and finally another copy of rsp->gp_seq.
+ * These values are updated in the opposite order with memory
+ * barriers (or equivalent) during grace-period initialization
+ * and cleanup. Now, a false positive can occur if we get an new
+ * value of rsp->gp_start and a old value of rsp->jiffies_stall.
+ * But given the memory barriers, the only way that this can happen
+ * is if one grace period ends and another starts between these
+ * two fetches. This is detected by comparing the second fetch
+ * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
*
* Given this check, comparisons of jiffies, rsp->jiffies_stall,
* and rsp->gp_start suffice to forestall false positives.
*/
- gpnum = READ_ONCE(rsp->gpnum);
- smp_rmb(); /* Pick up ->gpnum first... */
+ gs1 = READ_ONCE(rsp->gp_seq);
+ smp_rmb(); /* Pick up ->gp_seq first... */
js = READ_ONCE(rsp->jiffies_stall);
smp_rmb(); /* ...then ->jiffies_stall before the rest... */
gps = READ_ONCE(rsp->gp_start);
- smp_rmb(); /* ...and finally ->gp_start before ->completed. */
- completed = READ_ONCE(rsp->completed);
- if (ULONG_CMP_GE(completed, gpnum) ||
+ smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
+ gs2 = READ_ONCE(rsp->gp_seq);
+ if (gs1 != gs2 ||
ULONG_CMP_LT(j, js) ||
ULONG_CMP_GE(gps, js))
return; /* No stall or GP completed since entering function. */
rnp = rdp->mynode;
+ jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
if (rcu_gp_in_progress(rsp) &&
- (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
+ (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
+ cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
/* We haven't checked in, so go dump stack. */
print_cpu_stall(rsp);
} else if (rcu_gp_in_progress(rsp) &&
- ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
+ ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
+ cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
/* They had a few time units to dump stack, so complain. */
- print_other_cpu_stall(rsp, gpnum);
+ print_other_cpu_stall(rsp, gs2);
}
}
@@ -1577,123 +1575,99 @@ void rcu_cpu_stall_reset(void)
WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
}
-/*
- * Determine the value that ->completed will have at the end of the
- * next subsequent grace period. This is used to tag callbacks so that
- * a CPU can invoke callbacks in a timely fashion even if that CPU has
- * been dyntick-idle for an extended period with callbacks under the
- * influence of RCU_FAST_NO_HZ.
- *
- * The caller must hold rnp->lock with interrupts disabled.
- */
-static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
- struct rcu_node *rnp)
-{
- raw_lockdep_assert_held_rcu_node(rnp);
-
- /*
- * If RCU is idle, we just wait for the next grace period.
- * But we can only be sure that RCU is idle if we are looking
- * at the root rcu_node structure -- otherwise, a new grace
- * period might have started, but just not yet gotten around
- * to initializing the current non-root rcu_node structure.
- */
- if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
- return rnp->completed + 1;
-
- /*
- * If the current rcu_node structure believes that RCU is
- * idle, and if the rcu_state structure does not yet reflect
- * the start of a new grace period, then the next grace period
- * will suffice. The memory barrier is needed to accurately
- * sample the rsp->gpnum, and pairs with the second lock
- * acquisition in rcu_gp_init(), which is augmented with
- * smp_mb__after_unlock_lock() for this purpose.
- */
- if (rnp->gpnum == rnp->completed) {
- smp_mb(); /* See above block comment. */
- if (READ_ONCE(rsp->gpnum) == rnp->completed)
- return rnp->completed + 1;
- }
-
- /*
- * Otherwise, wait for a possible partial grace period and
- * then the subsequent full grace period.
- */
- return rnp->completed + 2;
-}
-
/* Trace-event wrapper function for trace_rcu_future_grace_period. */
static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long c, const char *s)
+ unsigned long gp_seq_req, const char *s)
{
- trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
- rnp->completed, c, rnp->level,
- rnp->grplo, rnp->grphi, s);
+ trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
+ rnp->level, rnp->grplo, rnp->grphi, s);
}
/*
+ * rcu_start_this_gp - Request the start of a particular grace period
+ * @rnp_start: The leaf node of the CPU from which to start.
+ * @rdp: The rcu_data corresponding to the CPU from which to start.
+ * @gp_seq_req: The gp_seq of the grace period to start.
+ *
* Start the specified grace period, as needed to handle newly arrived
* callbacks. The required future grace periods are recorded in each
- * rcu_node structure's ->need_future_gp[] field. Returns true if there
+ * rcu_node structure's ->gp_seq_needed field. Returns true if there
* is reason to awaken the grace-period kthread.
*
* The caller must hold the specified rcu_node structure's ->lock, which
* is why the caller is responsible for waking the grace-period kthread.
+ *
+ * Returns true if the GP thread needs to be awakened else false.
*/
-static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long c)
+static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
+ unsigned long gp_seq_req)
{
bool ret = false;
struct rcu_state *rsp = rdp->rsp;
- struct rcu_node *rnp_root;
+ struct rcu_node *rnp;
/*
* Use funnel locking to either acquire the root rcu_node
* structure's lock or bail out if the need for this grace period
- * has already been recorded -- or has already started. If there
- * is already a grace period in progress in a non-leaf node, no
- * recording is needed because the end of the grace period will
- * scan the leaf rcu_node structures. Note that rnp->lock must
- * not be released.
+ * has already been recorded -- or if that grace period has in
+ * fact already started. If there is already a grace period in
+ * progress in a non-leaf node, no recording is needed because the
+ * end of the grace period will scan the leaf rcu_node structures.
+ * Note that rnp_start->lock must not be released.
*/
- raw_lockdep_assert_held_rcu_node(rnp);
- trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
- for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
- if (rnp_root != rnp)
- raw_spin_lock_rcu_node(rnp_root);
- WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum +
- need_future_gp_mask(), c));
- if (need_future_gp_element(rnp_root, c) ||
- ULONG_CMP_GE(rnp_root->gpnum, c) ||
- (rnp != rnp_root &&
- rnp_root->gpnum != rnp_root->completed)) {
- trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
+ raw_lockdep_assert_held_rcu_node(rnp_start);
+ trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
+ for (rnp = rnp_start; 1; rnp = rnp->parent) {
+ if (rnp != rnp_start)
+ raw_spin_lock_rcu_node(rnp);
+ if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
+ rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
+ (rnp != rnp_start &&
+ rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
+ trace_rcu_this_gp(rnp, rdp, gp_seq_req,
+ TPS("Prestarted"));
goto unlock_out;
}
- need_future_gp_element(rnp_root, c) = true;
- if (rnp_root != rnp && rnp_root->parent != NULL)
- raw_spin_unlock_rcu_node(rnp_root);
- if (!rnp_root->parent)
+ rnp->gp_seq_needed = gp_seq_req;
+ if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
+ /*
+ * We just marked the leaf or internal node, and a
+ * grace period is in progress, which means that
+ * rcu_gp_cleanup() will see the marking. Bail to
+ * reduce contention.
+ */
+ trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
+ TPS("Startedleaf"));
+ goto unlock_out;
+ }
+ if (rnp != rnp_start && rnp->parent != NULL)
+ raw_spin_unlock_rcu_node(rnp);
+ if (!rnp->parent)
break; /* At root, and perhaps also leaf. */
}
/* If GP already in progress, just leave, otherwise start one. */
- if (rnp_root->gpnum != rnp_root->completed) {
- trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
+ if (rcu_gp_in_progress(rsp)) {
+ trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
goto unlock_out;
}
- trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
+ trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
+ rsp->gp_req_activity = jiffies;
if (!rsp->gp_kthread) {
- trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
+ trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
goto unlock_out;
}
- trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
+ trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
ret = true; /* Caller must wake GP kthread. */
unlock_out:
- if (rnp != rnp_root)
- raw_spin_unlock_rcu_node(rnp_root);
+ /* Push furthest requested GP to leaf node and rcu_data structure. */
+ if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
+ rnp_start->gp_seq_needed = rnp->gp_seq_needed;
+ rdp->gp_seq_needed = rnp->gp_seq_needed;
+ }
+ if (rnp != rnp_start)
+ raw_spin_unlock_rcu_node(rnp);
return ret;
}
@@ -1703,13 +1677,13 @@ unlock_out:
*/
static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
{
- unsigned long c = rnp->completed;
bool needmore;
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
- need_future_gp_element(rnp, c) = false;
- needmore = need_any_future_gp(rnp);
- trace_rcu_this_gp(rnp, rdp, c,
+ needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
+ if (!needmore)
+ rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
+ trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
needmore ? TPS("CleanupMore") : TPS("Cleanup"));
return needmore;
}
@@ -1727,25 +1701,25 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
!READ_ONCE(rsp->gp_flags) ||
!rsp->gp_kthread)
return;
- swake_up(&rsp->gp_wq);
+ swake_up_one(&rsp->gp_wq);
}
/*
- * If there is room, assign a ->completed number to any callbacks on
- * this CPU that have not already been assigned. Also accelerate any
- * callbacks that were previously assigned a ->completed number that has
- * since proven to be too conservative, which can happen if callbacks get
- * assigned a ->completed number while RCU is idle, but with reference to
- * a non-root rcu_node structure. This function is idempotent, so it does
- * not hurt to call it repeatedly. Returns an flag saying that we should
- * awaken the RCU grace-period kthread.
+ * If there is room, assign a ->gp_seq number to any callbacks on this
+ * CPU that have not already been assigned. Also accelerate any callbacks
+ * that were previously assigned a ->gp_seq number that has since proven
+ * to be too conservative, which can happen if callbacks get assigned a
+ * ->gp_seq number while RCU is idle, but with reference to a non-root
+ * rcu_node structure. This function is idempotent, so it does not hurt
+ * to call it repeatedly. Returns an flag saying that we should awaken
+ * the RCU grace-period kthread.
*
* The caller must hold rnp->lock with interrupts disabled.
*/
static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp)
{
- unsigned long c;
+ unsigned long gp_seq_req;
bool ret = false;
raw_lockdep_assert_held_rcu_node(rnp);
@@ -1764,22 +1738,50 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
* accelerating callback invocation to an earlier grace-period
* number.
*/
- c = rcu_cbs_completed(rsp, rnp);
- if (rcu_segcblist_accelerate(&rdp->cblist, c))
- ret = rcu_start_this_gp(rnp, rdp, c);
+ gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
+ if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
+ ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
/* Trace depending on how much we were able to accelerate. */
if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
+ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
else
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
+ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
return ret;
}
/*
+ * Similar to rcu_accelerate_cbs(), but does not require that the leaf
+ * rcu_node structure's ->lock be held. It consults the cached value
+ * of ->gp_seq_needed in the rcu_data structure, and if that indicates
+ * that a new grace-period request be made, invokes rcu_accelerate_cbs()
+ * while holding the leaf rcu_node structure's ->lock.
+ */
+static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
+ struct rcu_node *rnp,
+ struct rcu_data *rdp)
+{
+ unsigned long c;
+ bool needwake;
+
+ lockdep_assert_irqs_disabled();
+ c = rcu_seq_snap(&rsp->gp_seq);
+ if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+ /* Old request still live, so mark recent callbacks. */
+ (void)rcu_segcblist_accelerate(&rdp->cblist, c);
+ return;
+ }
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+ needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+ if (needwake)
+ rcu_gp_kthread_wake(rsp);
+}
+
+/*
* Move any callbacks whose grace period has completed to the
* RCU_DONE_TAIL sublist, then compact the remaining sublists and
- * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
+ * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
* sublist. This function is idempotent, so it does not hurt to
* invoke it repeatedly. As long as it is not invoked -too- often...
* Returns true if the RCU grace-period kthread needs to be awakened.
@@ -1796,10 +1798,10 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
return false;
/*
- * Find all callbacks whose ->completed numbers indicate that they
+ * Find all callbacks whose ->gp_seq numbers indicate that they
* are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
*/
- rcu_segcblist_advance(&rdp->cblist, rnp->completed);
+ rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
/* Classify any remaining callbacks. */
return rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1819,39 +1821,38 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
raw_lockdep_assert_held_rcu_node(rnp);
- /* Handle the ends of any preceding grace periods first. */
- if (rdp->completed == rnp->completed &&
- !unlikely(READ_ONCE(rdp->gpwrap))) {
-
- /* No grace period end, so just accelerate recent callbacks. */
- ret = rcu_accelerate_cbs(rsp, rnp, rdp);
+ if (rdp->gp_seq == rnp->gp_seq)
+ return false; /* Nothing to do. */
+ /* Handle the ends of any preceding grace periods first. */
+ if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
+ unlikely(READ_ONCE(rdp->gpwrap))) {
+ ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
+ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
} else {
-
- /* Advance callbacks. */
- ret = rcu_advance_cbs(rsp, rnp, rdp);
-
- /* Remember that we saw this grace-period completion. */
- rdp->completed = rnp->completed;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
+ ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
}
- if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
+ /* Now handle the beginnings of any new-to-this-CPU grace periods. */
+ if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
+ unlikely(READ_ONCE(rdp->gpwrap))) {
/*
* If the current grace period is waiting for this CPU,
* set up to detect a quiescent state, otherwise don't
* go looking for one.
*/
- rdp->gpnum = rnp->gpnum;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
+ trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
need_gp = !!(rnp->qsmask & rdp->grpmask);
rdp->cpu_no_qs.b.norm = need_gp;
rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
rdp->core_needs_qs = need_gp;
zero_cpu_stall_ticks(rdp);
- WRITE_ONCE(rdp->gpwrap, false);
- rcu_gpnum_ovf(rnp, rdp);
}
+ rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
+ if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
+ rdp->gp_seq_needed = rnp->gp_seq_needed;
+ WRITE_ONCE(rdp->gpwrap, false);
+ rcu_gpnum_ovf(rnp, rdp);
return ret;
}
@@ -1863,8 +1864,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_save(flags);
rnp = rdp->mynode;
- if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
- rdp->completed == READ_ONCE(rnp->completed) &&
+ if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
!unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
!raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
local_irq_restore(flags);
@@ -1879,7 +1879,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
static void rcu_gp_slow(struct rcu_state *rsp, int delay)
{
if (delay > 0 &&
- !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
+ !(rcu_seq_ctr(rsp->gp_seq) %
+ (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
schedule_timeout_uninterruptible(delay);
}
@@ -1888,7 +1889,9 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
*/
static bool rcu_gp_init(struct rcu_state *rsp)
{
+ unsigned long flags;
unsigned long oldmask;
+ unsigned long mask;
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
@@ -1912,9 +1915,9 @@ static bool rcu_gp_init(struct rcu_state *rsp)
/* Advance to a new grace period and initialize state. */
record_gp_stall_check_time(rsp);
- /* Record GP times before starting GP, hence smp_store_release(). */
- smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
- trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
+ /* Record GP times before starting GP, hence rcu_seq_start(). */
+ rcu_seq_start(&rsp->gp_seq);
+ trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
raw_spin_unlock_irq_rcu_node(rnp);
/*
@@ -1923,13 +1926,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* for subsequent online CPUs, and that quiescent-state forcing
* will handle subsequent offline CPUs.
*/
+ rsp->gp_state = RCU_GP_ONOFF;
rcu_for_each_leaf_node(rsp, rnp) {
- rcu_gp_slow(rsp, gp_preinit_delay);
+ spin_lock(&rsp->ofl_lock);
raw_spin_lock_irq_rcu_node(rnp);
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
!rnp->wait_blkd_tasks) {
/* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_irq_rcu_node(rnp);
+ spin_unlock(&rsp->ofl_lock);
continue;
}
@@ -1939,12 +1944,14 @@ static bool rcu_gp_init(struct rcu_state *rsp)
/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
if (!oldmask != !rnp->qsmaskinit) {
- if (!oldmask) /* First online CPU for this rcu_node. */
- rcu_init_new_rnp(rnp);
- else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
- rnp->wait_blkd_tasks = true;
- else /* Last offline CPU and can propagate. */
+ if (!oldmask) { /* First online CPU for rcu_node. */
+ if (!rnp->wait_blkd_tasks) /* Ever offline? */
+ rcu_init_new_rnp(rnp);
+ } else if (rcu_preempt_has_tasks(rnp)) {
+ rnp->wait_blkd_tasks = true; /* blocked tasks */
+ } else { /* Last offline CPU and can propagate. */
rcu_cleanup_dead_rnp(rnp);
+ }
}
/*
@@ -1953,18 +1960,19 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* still offline, propagate up the rcu_node tree and
* clear ->wait_blkd_tasks. Otherwise, if one of this
* rcu_node structure's CPUs has since come back online,
- * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
- * checks for this, so just call it unconditionally).
+ * simply clear ->wait_blkd_tasks.
*/
if (rnp->wait_blkd_tasks &&
- (!rcu_preempt_has_tasks(rnp) ||
- rnp->qsmaskinit)) {
+ (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
rnp->wait_blkd_tasks = false;
- rcu_cleanup_dead_rnp(rnp);
+ if (!rnp->qsmaskinit)
+ rcu_cleanup_dead_rnp(rnp);
}
raw_spin_unlock_irq_rcu_node(rnp);
+ spin_unlock(&rsp->ofl_lock);
}
+ rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
/*
* Set the quiescent-state-needed bits in all the rcu_node
@@ -1978,22 +1986,27 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* The grace period cannot complete until the initialization
* process finishes, because this kthread handles both.
*/
+ rsp->gp_state = RCU_GP_INIT;
rcu_for_each_node_breadth_first(rsp, rnp) {
rcu_gp_slow(rsp, gp_init_delay);
- raw_spin_lock_irq_rcu_node(rnp);
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
rdp = this_cpu_ptr(rsp->rda);
- rcu_preempt_check_blocked_tasks(rnp);
+ rcu_preempt_check_blocked_tasks(rsp, rnp);
rnp->qsmask = rnp->qsmaskinit;
- WRITE_ONCE(rnp->gpnum, rsp->gpnum);
- if (WARN_ON_ONCE(rnp->completed != rsp->completed))
- WRITE_ONCE(rnp->completed, rsp->completed);
+ WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
if (rnp == rdp->mynode)
(void)__note_gp_changes(rsp, rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
- trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
+ trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
rnp->level, rnp->grplo,
rnp->grphi, rnp->qsmask);
- raw_spin_unlock_irq_rcu_node(rnp);
+ /* Quiescent states for tasks on any now-offline CPUs. */
+ mask = rnp->qsmask & ~rnp->qsmaskinitnext;
+ rnp->rcu_gp_init_mask = mask;
+ if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ else
+ raw_spin_unlock_irq_rcu_node(rnp);
cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies);
}
@@ -2002,7 +2015,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
}
/*
- * Helper function for swait_event_idle() wakeup at force-quiescent-state
+ * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
* time.
*/
static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
@@ -2053,6 +2066,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
{
unsigned long gp_duration;
bool needgp = false;
+ unsigned long new_gp_seq;
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
struct swait_queue_head *sq;
@@ -2074,19 +2088,22 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
raw_spin_unlock_irq_rcu_node(rnp);
/*
- * Propagate new ->completed value to rcu_node structures so
- * that other CPUs don't have to wait until the start of the next
- * grace period to process their callbacks. This also avoids
- * some nasty RCU grace-period initialization races by forcing
- * the end of the current grace period to be completely recorded in
- * all of the rcu_node structures before the beginning of the next
- * grace period is recorded in any of the rcu_node structures.
+ * Propagate new ->gp_seq value to rcu_node structures so that
+ * other CPUs don't have to wait until the start of the next grace
+ * period to process their callbacks. This also avoids some nasty
+ * RCU grace-period initialization races by forcing the end of
+ * the current grace period to be completely recorded in all of
+ * the rcu_node structures before the beginning of the next grace
+ * period is recorded in any of the rcu_node structures.
*/
+ new_gp_seq = rsp->gp_seq;
+ rcu_seq_end(&new_gp_seq);
rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq_rcu_node(rnp);
- WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
+ if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+ dump_blkd_tasks(rsp, rnp, 10);
WARN_ON_ONCE(rnp->qsmask);
- WRITE_ONCE(rnp->completed, rsp->gpnum);
+ WRITE_ONCE(rnp->gp_seq, new_gp_seq);
rdp = this_cpu_ptr(rsp->rda);
if (rnp == rdp->mynode)
needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -2100,26 +2117,28 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rcu_gp_slow(rsp, gp_cleanup_delay);
}
rnp = rcu_get_root(rsp);
- raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
+ raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
/* Declare grace period done. */
- WRITE_ONCE(rsp->completed, rsp->gpnum);
- trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
+ rcu_seq_end(&rsp->gp_seq);
+ trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
rsp->gp_state = RCU_GP_IDLE;
/* Check for GP requests since above loop. */
rdp = this_cpu_ptr(rsp->rda);
- if (need_any_future_gp(rnp)) {
- trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
+ if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
+ trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
TPS("CleanupMore"));
needgp = true;
}
/* Advance CBs to reduce false positives below. */
if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
- trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
+ rsp->gp_req_activity = jiffies;
+ trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
TPS("newreq"));
+ } else {
+ WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
}
- WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
raw_spin_unlock_irq_rcu_node(rnp);
}
@@ -2141,10 +2160,10 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Handle grace-period start. */
for (;;) {
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("reqwait"));
rsp->gp_state = RCU_GP_WAIT_GPS;
- swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
+ swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT);
rsp->gp_state = RCU_GP_DONE_GPS;
/* Locking provides needed memory barrier. */
@@ -2154,17 +2173,13 @@ static int __noreturn rcu_gp_kthread(void *arg)
WRITE_ONCE(rsp->gp_activity, jiffies);
WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("reqwaitsig"));
}
/* Handle quiescent-state forcing. */
first_gp_fqs = true;
j = jiffies_till_first_fqs;
- if (j > HZ) {
- j = HZ;
- jiffies_till_first_fqs = HZ;
- }
ret = 0;
for (;;) {
if (!ret) {
@@ -2173,10 +2188,10 @@ static int __noreturn rcu_gp_kthread(void *arg)
jiffies + 3 * j);
}
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("fqswait"));
rsp->gp_state = RCU_GP_WAIT_FQS;
- ret = swait_event_idle_timeout(rsp->gp_wq,
+ ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
rcu_gp_fqs_check_wake(rsp, &gf), j);
rsp->gp_state = RCU_GP_DOING_FQS;
/* Locking provides needed memory barriers. */
@@ -2188,31 +2203,24 @@ static int __noreturn rcu_gp_kthread(void *arg)
if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
(gf & RCU_GP_FLAG_FQS)) {
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("fqsstart"));
rcu_gp_fqs(rsp, first_gp_fqs);
first_gp_fqs = false;
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("fqsend"));
cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies);
ret = 0; /* Force full wait till next FQS. */
j = jiffies_till_next_fqs;
- if (j > HZ) {
- j = HZ;
- jiffies_till_next_fqs = HZ;
- } else if (j < 1) {
- j = 1;
- jiffies_till_next_fqs = 1;
- }
} else {
/* Deal with stray signal. */
cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies);
WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("fqswaitsig"));
ret = 1; /* Keep old FQS timing. */
j = jiffies;
@@ -2256,8 +2264,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
* must be represented by the same rcu_node structure (which need not be a
* leaf rcu_node structure, though it often will be). The gps parameter
* is the grace-period snapshot, which means that the quiescent states
- * are valid only if rnp->gpnum is equal to gps. That structure's lock
+ * are valid only if rnp->gp_seq is equal to gps. That structure's lock
* must be held upon entry, and it is released before return.
+ *
+ * As a special case, if mask is zero, the bit-already-cleared check is
+ * disabled. This allows propagating quiescent state due to resumed tasks
+ * during grace-period initialization.
*/
static void
rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
@@ -2271,7 +2283,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
/* Walk up the rcu_node hierarchy. */
for (;;) {
- if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
+ if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
/*
* Our bit has already been cleared, or the
@@ -2284,7 +2296,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
rcu_preempt_blocked_readers_cgp(rnp));
rnp->qsmask &= ~mask;
- trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
+ trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
mask, rnp->qsmask, rnp->level,
rnp->grplo, rnp->grphi,
!!rnp->gp_tasks);
@@ -2294,6 +2306,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
}
+ rnp->completedqs = rnp->gp_seq;
mask = rnp->grpmask;
if (rnp->parent == NULL) {
@@ -2323,8 +2336,9 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
* irqs disabled, and this lock is released upon return, but irqs remain
* disabled.
*/
-static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
- struct rcu_node *rnp, unsigned long flags)
+static void __maybe_unused
+rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
+ struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
unsigned long gps;
@@ -2332,12 +2346,15 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
struct rcu_node *rnp_p;
raw_lockdep_assert_held_rcu_node(rnp);
- if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
- rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
+ if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
+ WARN_ON_ONCE(rsp != rcu_state_p) ||
+ WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
+ rnp->qsmask != 0) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; /* Still need more quiescent states! */
}
+ rnp->completedqs = rnp->gp_seq;
rnp_p = rnp->parent;
if (rnp_p == NULL) {
/*
@@ -2348,8 +2365,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
return;
}
- /* Report up the rest of the hierarchy, tracking current ->gpnum. */
- gps = rnp->gpnum;
+ /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
+ gps = rnp->gp_seq;
mask = rnp->grpmask;
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
@@ -2370,8 +2387,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
rnp = rdp->mynode;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
- rnp->completed == rnp->gpnum || rdp->gpwrap) {
+ if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
+ rdp->gpwrap) {
/*
* The grace period in which this quiescent state was
@@ -2396,7 +2413,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
*/
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
/* ^^^ Released rnp->lock */
if (needwake)
rcu_gp_kthread_wake(rsp);
@@ -2441,17 +2458,16 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
*/
static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
{
- RCU_TRACE(unsigned long mask;)
+ RCU_TRACE(bool blkd;)
RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
return;
- RCU_TRACE(mask = rdp->grpmask;)
- trace_rcu_grace_period(rsp->name,
- rnp->gpnum + 1 - !!(rnp->qsmask & mask),
- TPS("cpuofl"));
+ RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
+ trace_rcu_grace_period(rsp->name, rnp->gp_seq,
+ blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
}
/*
@@ -2463,7 +2479,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
* This function therefore goes up the tree of rcu_node structures,
* clearing the corresponding bits in the ->qsmaskinit fields. Note that
* the leaf rcu_node structure's ->qsmaskinit field has already been
- * updated
+ * updated.
*
* This function does check that the specified rcu_node structure has
* all CPUs offline and no blocked tasks, so it is OK to invoke it
@@ -2476,9 +2492,10 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
long mask;
struct rcu_node *rnp = rnp_leaf;
- raw_lockdep_assert_held_rcu_node(rnp);
+ raw_lockdep_assert_held_rcu_node(rnp_leaf);
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
- rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+ WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
+ WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
return;
for (;;) {
mask = rnp->grpmask;
@@ -2487,7 +2504,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
break;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rnp->qsmaskinit &= ~mask;
- rnp->qsmask &= ~mask;
+ /* Between grace periods, so better already be zero! */
+ WARN_ON_ONCE(rnp->qsmask);
if (rnp->qsmaskinit) {
raw_spin_unlock_rcu_node(rnp);
/* irqs remain disabled. */
@@ -2630,6 +2648,7 @@ void rcu_check_callbacks(int user)
rcu_sched_qs();
rcu_bh_qs();
+ rcu_note_voluntary_context_switch(current);
} else if (!in_softirq()) {
@@ -2645,8 +2664,7 @@ void rcu_check_callbacks(int user)
rcu_preempt_check_callbacks();
if (rcu_pending())
invoke_rcu_core();
- if (user)
- rcu_note_voluntary_context_switch(current);
+
trace_rcu_utilization(TPS("End scheduler-tick"));
}
@@ -2681,17 +2699,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
/* rcu_initiate_boost() releases rnp->lock */
continue;
}
- if (rnp->parent &&
- (rnp->parent->qsmask & rnp->grpmask)) {
- /*
- * Race between grace-period
- * initialization and task exiting RCU
- * read-side critical section: Report.
- */
- rcu_report_unblock_qs_rnp(rsp, rnp, flags);
- /* rcu_report_unblock_qs_rnp() rlses ->lock */
- continue;
- }
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ continue;
}
for_each_leaf_node_possible_cpu(rnp, cpu) {
unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
@@ -2701,8 +2710,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
}
}
if (mask != 0) {
- /* Idle/offline CPUs, report (releases rnp->lock. */
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+ /* Idle/offline CPUs, report (releases rnp->lock). */
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
} else {
/* Nothing to do here, so just drop the lock. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2747,6 +2756,65 @@ static void force_quiescent_state(struct rcu_state *rsp)
}
/*
+ * This function checks for grace-period requests that fail to motivate
+ * RCU to come out of its idle mode.
+ */
+static void
+rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
+ struct rcu_data *rdp)
+{
+ const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
+ unsigned long flags;
+ unsigned long j;
+ struct rcu_node *rnp_root = rcu_get_root(rsp);
+ static atomic_t warned = ATOMIC_INIT(0);
+
+ if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
+ return;
+ j = jiffies; /* Expensive access, and in common case don't get here. */
+ if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+ atomic_read(&warned))
+ return;
+
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ j = jiffies;
+ if (rcu_gp_in_progress(rsp) ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+ time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+ atomic_read(&warned)) {
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return;
+ }
+ /* Hold onto the leaf lock to make others see warned==1. */
+
+ if (rnp_root != rnp)
+ raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
+ j = jiffies;
+ if (rcu_gp_in_progress(rsp) ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+ time_before(j, rsp->gp_req_activity + gpssdelay) ||
+ time_before(j, rsp->gp_activity + gpssdelay) ||
+ atomic_xchg(&warned, 1)) {
+ raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return;
+ }
+ pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
+ __func__, (long)READ_ONCE(rsp->gp_seq),
+ (long)READ_ONCE(rnp_root->gp_seq_needed),
+ j - rsp->gp_req_activity, j - rsp->gp_activity,
+ rsp->gp_flags, rsp->gp_state, rsp->name,
+ rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
+ WARN_ON(1);
+ if (rnp_root != rnp)
+ raw_spin_unlock_rcu_node(rnp_root);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
+/*
* This does the RCU core processing work for the specified rcu_state
* and rcu_data structures. This may be called only from the CPU to
* whom the rdp belongs.
@@ -2755,9 +2823,8 @@ static void
__rcu_process_callbacks(struct rcu_state *rsp)
{
unsigned long flags;
- bool needwake;
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
- struct rcu_node *rnp;
+ struct rcu_node *rnp = rdp->mynode;
WARN_ON_ONCE(!rdp->beenonline);
@@ -2768,18 +2835,13 @@ __rcu_process_callbacks(struct rcu_state *rsp)
if (!rcu_gp_in_progress(rsp) &&
rcu_segcblist_is_enabled(&rdp->cblist)) {
local_irq_save(flags);
- if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
- local_irq_restore(flags);
- } else {
- rnp = rdp->mynode;
- raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- if (needwake)
- rcu_gp_kthread_wake(rsp);
- }
+ if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
+ rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
+ local_irq_restore(flags);
}
+ rcu_check_gp_start_stall(rsp, rnp, rdp);
+
/* If there are callbacks ready, invoke them. */
if (rcu_segcblist_ready_cbs(&rdp->cblist))
invoke_rcu_callbacks(rsp, rdp);
@@ -2833,8 +2895,6 @@ static void invoke_rcu_core(void)
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
struct rcu_head *head, unsigned long flags)
{
- bool needwake;
-
/*
* If called from an extended quiescent state, invoke the RCU
* core in order to force a re-evaluation of RCU's idleness.
@@ -2861,13 +2921,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
/* Start a new grace period if one not already started. */
if (!rcu_gp_in_progress(rsp)) {
- struct rcu_node *rnp = rdp->mynode;
-
- raw_spin_lock_rcu_node(rnp);
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
- raw_spin_unlock_rcu_node(rnp);
- if (needwake)
- rcu_gp_kthread_wake(rsp);
+ rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
} else {
/* Give the grace period a kick. */
rdp->blimit = LONG_MAX;
@@ -3037,7 +3091,7 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
* when there was in fact only one the whole time, as this just adds
* some overhead: RCU still operates correctly.
*/
-static inline int rcu_blocking_is_gp(void)
+static int rcu_blocking_is_gp(void)
{
int ret;
@@ -3136,16 +3190,10 @@ unsigned long get_state_synchronize_rcu(void)
{
/*
* Any prior manipulation of RCU-protected data must happen
- * before the load from ->gpnum.
+ * before the load from ->gp_seq.
*/
smp_mb(); /* ^^^ */
-
- /*
- * Make sure this load happens before the purportedly
- * time-consuming work between get_state_synchronize_rcu()
- * and cond_synchronize_rcu().
- */
- return smp_load_acquire(&rcu_state_p->gpnum);
+ return rcu_seq_snap(&rcu_state_p->gp_seq);
}
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
@@ -3165,15 +3213,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
*/
void cond_synchronize_rcu(unsigned long oldstate)
{
- unsigned long newstate;
-
- /*
- * Ensure that this load happens before any RCU-destructive
- * actions the caller might carry out after we return.
- */
- newstate = smp_load_acquire(&rcu_state_p->completed);
- if (ULONG_CMP_GE(oldstate, newstate))
+ if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
synchronize_rcu();
+ else
+ smp_mb(); /* Ensure GP ends before subsequent accesses. */
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
@@ -3188,16 +3231,10 @@ unsigned long get_state_synchronize_sched(void)
{
/*
* Any prior manipulation of RCU-protected data must happen
- * before the load from ->gpnum.
+ * before the load from ->gp_seq.
*/
smp_mb(); /* ^^^ */
-
- /*
- * Make sure this load happens before the purportedly
- * time-consuming work between get_state_synchronize_sched()
- * and cond_synchronize_sched().
- */
- return smp_load_acquire(&rcu_sched_state.gpnum);
+ return rcu_seq_snap(&rcu_sched_state.gp_seq);
}
EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
@@ -3217,15 +3254,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
*/
void cond_synchronize_sched(unsigned long oldstate)
{
- unsigned long newstate;
-
- /*
- * Ensure that this load happens before any RCU-destructive
- * actions the caller might carry out after we return.
- */
- newstate = smp_load_acquire(&rcu_sched_state.completed);
- if (ULONG_CMP_GE(oldstate, newstate))
+ if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
synchronize_sched();
+ else
+ smp_mb(); /* Ensure GP ends before subsequent accesses. */
}
EXPORT_SYMBOL_GPL(cond_synchronize_sched);
@@ -3261,12 +3293,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
return 1;
- /* Has another RCU grace period completed? */
- if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
- return 1;
-
- /* Has a new RCU grace period started? */
- if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
+ /* Have RCU grace period completed or started? */
+ if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
return 1;
@@ -3298,7 +3326,7 @@ static int rcu_pending(void)
* non-NULL, store an indication of whether all callbacks are lazy.
* (If there are no callbacks, all of them are deemed to be lazy.)
*/
-static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
+static bool rcu_cpu_has_callbacks(bool *all_lazy)
{
bool al = true;
bool hc = false;
@@ -3484,17 +3512,22 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched);
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
{
long mask;
+ long oldmask;
struct rcu_node *rnp = rnp_leaf;
- raw_lockdep_assert_held_rcu_node(rnp);
+ raw_lockdep_assert_held_rcu_node(rnp_leaf);
+ WARN_ON_ONCE(rnp->wait_blkd_tasks);
for (;;) {
mask = rnp->grpmask;
rnp = rnp->parent;
if (rnp == NULL)
return;
raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
+ oldmask = rnp->qsmaskinit;
rnp->qsmaskinit |= mask;
raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
+ if (oldmask)
+ return;
}
}
@@ -3511,6 +3544,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
+ rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+ rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
+ rdp->rcu_onl_gp_seq = rsp->gp_seq;
+ rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
rdp->cpu = cpu;
rdp->rsp = rsp;
rcu_boot_init_nocb_percpu_data(rdp);
@@ -3518,9 +3555,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
/*
* Initialize a CPU's per-CPU RCU data. Note that only one online or
- * offline event can be happening at a given time. Note also that we
- * can accept some slop in the rsp->completed access due to the fact
- * that this CPU cannot possibly have any RCU callbacks in flight yet.
+ * offline event can be happening at a given time. Note also that we can
+ * accept some slop in the rsp->gp_seq access due to the fact that this
+ * CPU cannot possibly have any RCU callbacks in flight yet.
*/
static void
rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
@@ -3549,14 +3586,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rdp->beenonline = true; /* We have now been online. */
- rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
- rdp->completed = rnp->completed;
+ rdp->gp_seq = rnp->gp_seq;
+ rdp->gp_seq_needed = rnp->gp_seq;
rdp->cpu_no_qs.b.norm = true;
rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
rdp->core_needs_qs = false;
rdp->rcu_iw_pending = false;
- rdp->rcu_iw_gpnum = rnp->gpnum - 1;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
+ rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
+ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
@@ -3705,7 +3742,15 @@ void rcu_cpu_starting(unsigned int cpu)
nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
/* Allow lockless access for expedited grace periods. */
smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
+ rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
+ rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
+ if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+ /* Report QS -after- changing ->qsmaskinitnext! */
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ } else {
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
}
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
}
@@ -3713,7 +3758,7 @@ void rcu_cpu_starting(unsigned int cpu)
#ifdef CONFIG_HOTPLUG_CPU
/*
* The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function. We now remove it from the rcu_node tree's ->qsmaskinit
+ * function. We now remove it from the rcu_node tree's ->qsmaskinitnext
* bit masks.
*/
static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
@@ -3725,9 +3770,18 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
mask = rdp->grpmask;
+ spin_lock(&rsp->ofl_lock);
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+ rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
+ rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+ if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
+ /* Report quiescent state -before- changing ->qsmaskinitnext! */
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ }
rnp->qsmaskinitnext &= ~mask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ spin_unlock(&rsp->ofl_lock);
}
/*
@@ -3839,12 +3893,16 @@ static int __init rcu_spawn_gp_kthread(void)
struct task_struct *t;
/* Force priority into range. */
- if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+ if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
+ && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
+ kthread_prio = 2;
+ else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
kthread_prio = 1;
else if (kthread_prio < 0)
kthread_prio = 0;
else if (kthread_prio > 99)
kthread_prio = 99;
+
if (kthread_prio != kthread_prio_in)
pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
kthread_prio, kthread_prio_in);
@@ -3928,8 +3986,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
raw_spin_lock_init(&rnp->fqslock);
lockdep_set_class_and_name(&rnp->fqslock,
&rcu_fqs_class[i], fqs[i]);
- rnp->gpnum = rsp->gpnum;
- rnp->completed = rsp->completed;
+ rnp->gp_seq = rsp->gp_seq;
+ rnp->gp_seq_needed = rsp->gp_seq;
+ rnp->completedqs = rsp->gp_seq;
rnp->qsmask = 0;
rnp->qsmaskinit = 0;
rnp->grplo = j * cpustride;
@@ -3997,7 +4056,7 @@ static void __init rcu_init_geometry(void)
if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
nr_cpu_ids == NR_CPUS)
return;
- pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
+ pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
rcu_fanout_leaf, nr_cpu_ids);
/*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 78e051dffc5b..4e74df768c57 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -81,18 +81,16 @@ struct rcu_node {
raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
/* some rcu_state fields as well as */
/* following. */
- unsigned long gpnum; /* Current grace period for this node. */
- /* This will either be equal to or one */
- /* behind the root rcu_node's gpnum. */
- unsigned long completed; /* Last GP completed for this node. */
- /* This will either be equal to or one */
- /* behind the root rcu_node's gpnum. */
+ unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */
+ unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
+ unsigned long completedqs; /* All QSes done for this node. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
/* In leaf rcu_node, each bit corresponds to */
/* an rcu_data structure, otherwise, each */
/* bit corresponds to a child rcu_node */
/* structure. */
+ unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
unsigned long qsmaskinit;
/* Per-GP initial value for qsmask. */
/* Initialized from ->qsmaskinitnext at the */
@@ -158,7 +156,6 @@ struct rcu_node {
struct swait_queue_head nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- u8 need_future_gp[4]; /* Counts of upcoming GP requests. */
raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
@@ -168,22 +165,6 @@ struct rcu_node {
bool exp_need_flush; /* Need to flush workitem? */
} ____cacheline_internodealigned_in_smp;
-/* Accessors for ->need_future_gp[] array. */
-#define need_future_gp_mask() \
- (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
-#define need_future_gp_element(rnp, c) \
- ((rnp)->need_future_gp[(c) & need_future_gp_mask()])
-#define need_any_future_gp(rnp) \
-({ \
- int __i; \
- bool __nonzero = false; \
- \
- for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++) \
- __nonzero = __nonzero || \
- READ_ONCE((rnp)->need_future_gp[__i]); \
- __nonzero; \
-})
-
/*
* Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
* are indexed relative to this interval rather than the global CPU ID space.
@@ -206,16 +187,14 @@ union rcu_noqs {
/* Per-CPU data for read-copy update. */
struct rcu_data {
/* 1) quiescent-state and grace-period handling : */
- unsigned long completed; /* Track rsp->completed gp number */
- /* in order to detect GP end. */
- unsigned long gpnum; /* Highest gp number that this CPU */
- /* is aware of having started. */
+ unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
+ unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */
unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
/* for rcu_all_qs() invocations. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
bool core_needs_qs; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
- bool gpwrap; /* Possible gpnum/completed wrap. */
+ bool gpwrap; /* Possible ->gp_seq wrap. */
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
unsigned long ticks_this_gp; /* The number of scheduling-clock */
@@ -239,7 +218,6 @@ struct rcu_data {
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
- unsigned long offline_fqs; /* Kicked due to being offline. */
unsigned long cond_resched_completed;
/* Grace period that needs help */
/* from cond_resched(). */
@@ -278,12 +256,16 @@ struct rcu_data {
/* Leader CPU takes GP-end wakeups. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- /* 7) RCU CPU stall data. */
+ /* 7) Diagnostic data, including RCU CPU stall warnings. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
struct irq_work rcu_iw; /* Check for non-irq activity. */
bool rcu_iw_pending; /* Is ->rcu_iw pending? */
- unsigned long rcu_iw_gpnum; /* ->gpnum associated with ->rcu_iw. */
+ unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
+ unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
+ short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
+ unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
+ short rcu_onl_gp_flags; /* ->gp_flags at last online. */
int cpu;
struct rcu_state *rsp;
@@ -340,8 +322,7 @@ struct rcu_state {
u8 boost ____cacheline_internodealigned_in_smp;
/* Subject to priority boost. */
- unsigned long gpnum; /* Current gp number. */
- unsigned long completed; /* # of last completed gp. */
+ unsigned long gp_seq; /* Grace-period sequence #. */
struct task_struct *gp_kthread; /* Task for grace periods. */
struct swait_queue_head gp_wq; /* Where GP task waits. */
short gp_flags; /* Commands for GP task. */
@@ -373,6 +354,8 @@ struct rcu_state {
/* but in jiffies. */
unsigned long gp_activity; /* Time of last GP kthread */
/* activity in jiffies. */
+ unsigned long gp_req_activity; /* Time of last GP request */
+ /* in jiffies. */
unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */
unsigned long jiffies_resched; /* Time at which to resched */
@@ -384,6 +367,10 @@ struct rcu_state {
const char *name; /* Name of structure. */
char abbr; /* Abbreviated name. */
struct list_head flavors; /* List of RCU flavors. */
+
+ spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
+ /* Synchronize offline with */
+ /* GP pre-initialization. */
};
/* Values for rcu_state structure's gp_flags field. */
@@ -394,16 +381,20 @@ struct rcu_state {
#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
-#define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */
-#define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */
-#define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */
-#define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */
+#define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
+#define RCU_GP_INIT 4 /* Grace-period initialization. */
+#define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
+#define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
+#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
+#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
#ifndef RCU_TREE_NONCORE
static const char * const gp_state_names[] = {
"RCU_GP_IDLE",
"RCU_GP_WAIT_GPS",
"RCU_GP_DONE_GPS",
+ "RCU_GP_ONOFF",
+ "RCU_GP_INIT",
"RCU_GP_WAIT_FQS",
"RCU_GP_DOING_FQS",
"RCU_GP_CLEANUP",
@@ -449,10 +440,13 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static int rcu_print_task_stall(struct rcu_node *rnp);
static int rcu_print_task_exp_stall(struct rcu_node *rnp);
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
+static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
+ struct rcu_node *rnp);
static void rcu_preempt_check_callbacks(void);
void call_rcu(struct rcu_head *head, rcu_callback_t func);
static void __init __rcu_init_preempt(void);
+static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
+ int ncheck);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static void invoke_rcu_callbacks_kthread(void);
@@ -489,7 +483,6 @@ static void __init rcu_spawn_nocb_kthreads(void);
#ifdef CONFIG_RCU_NOCB_CPU
static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
static bool init_nocb_callback_list(struct rcu_data *rdp);
static void rcu_bind_gp_kthread(void);
static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d40708e8c5d6..0b2c2ad69629 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (wake) {
smp_mb(); /* EGP done before wake_up(). */
- swake_up(&rsp->expedited_wq);
+ swake_up_one(&rsp->expedited_wq);
}
break;
}
@@ -472,6 +472,7 @@ retry_ipi:
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
smp_call_func_t func)
{
+ int cpu;
struct rcu_node *rnp;
trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -486,13 +487,20 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
rnp->rew.rew_func = func;
rnp->rew.rew_rsp = rsp;
if (!READ_ONCE(rcu_par_gp_wq) ||
- rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
- /* No workqueues yet. */
+ rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
+ rcu_is_last_leaf_node(rsp, rnp)) {
+ /* No workqueues yet or last leaf, do direct call. */
sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
continue;
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
- queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
+ preempt_disable();
+ cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+ /* If all offline, queue the work on an unbound CPU. */
+ if (unlikely(cpu > rnp->grphi))
+ cpu = WORK_CPU_UNBOUND;
+ queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
+ preempt_enable();
rnp->exp_need_flush = true;
}
@@ -518,7 +526,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
jiffies_start = jiffies;
for (;;) {
- ret = swait_event_timeout(
+ ret = swait_event_timeout_exclusive(
rsp->expedited_wq,
sync_rcu_preempt_exp_done_unlocked(rnp_root),
jiffies_stall);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 7fd12039e512..a97c20ea9bce 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -74,8 +74,8 @@ static void __init rcu_bootup_announce_oddness(void)
pr_info("\tRCU event tracing is enabled.\n");
if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
(!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
- pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
- RCU_FANOUT);
+ pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
+ RCU_FANOUT);
if (rcu_fanout_exact)
pr_info("\tHierarchical RCU autobalancing is disabled.\n");
if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
@@ -88,11 +88,13 @@ static void __init rcu_bootup_announce_oddness(void)
pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
RCU_FANOUT_LEAF);
if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
- pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
+ pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
+ rcu_fanout_leaf);
if (nr_cpu_ids != NR_CPUS)
pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
#ifdef CONFIG_RCU_BOOST
- pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
+ pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
+ kthread_prio, CONFIG_RCU_BOOST_DELAY);
#endif
if (blimit != DEFAULT_RCU_BLIMIT)
pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
@@ -127,6 +129,7 @@ static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
bool wake);
+static void rcu_read_unlock_special(struct task_struct *t);
/*
* Tell them what RCU they are running.
@@ -183,6 +186,9 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
raw_lockdep_assert_held_rcu_node(rnp);
WARN_ON_ONCE(rdp->mynode != rnp);
WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
+ /* RCU better not be waiting on newly onlined CPUs! */
+ WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
+ rdp->grpmask);
/*
* Decide where to queue the newly blocked task. In theory,
@@ -260,8 +266,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
* ->exp_tasks pointers, respectively, to reference the newly
* blocked tasks.
*/
- if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
+ if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
rnp->gp_tasks = &t->rcu_node_entry;
+ WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
+ }
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry;
WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
@@ -286,20 +294,24 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
}
/*
- * Record a preemptible-RCU quiescent state for the specified CPU. Note
- * that this just means that the task currently running on the CPU is
- * not in a quiescent state. There might be any number of tasks blocked
- * while in an RCU read-side critical section.
+ * Record a preemptible-RCU quiescent state for the specified CPU.
+ * Note that this does not necessarily mean that the task currently running
+ * on the CPU is in a quiescent state: Instead, it means that the current
+ * grace period need not wait on any RCU read-side critical section that
+ * starts later on this CPU. It also means that if the current task is
+ * in an RCU read-side critical section, it has already added itself to
+ * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
+ * current task, there might be any number of other tasks blocked while
+ * in an RCU read-side critical section.
*
- * As with the other rcu_*_qs() functions, callers to this function
- * must disable preemption.
+ * Callers to this function must disable preemption.
*/
static void rcu_preempt_qs(void)
{
RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
trace_rcu_grace_period(TPS("rcu_preempt"),
- __this_cpu_read(rcu_data_p->gpnum),
+ __this_cpu_read(rcu_data_p->gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
@@ -348,8 +360,8 @@ static void rcu_preempt_note_context_switch(bool preempt)
trace_rcu_preempt_task(rdp->rsp->name,
t->pid,
(rnp->qsmask & rdp->grpmask)
- ? rnp->gpnum
- : rnp->gpnum + 1);
+ ? rnp->gp_seq
+ : rcu_seq_snap(&rnp->gp_seq));
rcu_preempt_ctxt_queue(rnp, rdp);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special.s) {
@@ -456,7 +468,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-void rcu_read_unlock_special(struct task_struct *t)
+static void rcu_read_unlock_special(struct task_struct *t)
{
bool empty_exp;
bool empty_norm;
@@ -535,13 +547,15 @@ void rcu_read_unlock_special(struct task_struct *t)
WARN_ON_ONCE(rnp != t->rcu_blocked_node);
WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
+ WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
+ (!empty_norm || rnp->qsmask));
empty_exp = sync_rcu_preempt_exp_done(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
t->rcu_blocked_node = NULL;
trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
- rnp->gpnum, t->pid);
+ rnp->gp_seq, t->pid);
if (&t->rcu_node_entry == rnp->gp_tasks)
rnp->gp_tasks = np;
if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -562,7 +576,7 @@ void rcu_read_unlock_special(struct task_struct *t)
empty_exp_now = sync_rcu_preempt_exp_done(rnp);
if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
- rnp->gpnum,
+ rnp->gp_seq,
0, rnp->qsmask,
rnp->level,
rnp->grplo,
@@ -686,24 +700,27 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
* Check that the list of blocked tasks for the newly completed grace
* period is in fact empty. It is a serious bug to complete a grace
* period that still has RCU readers blocked! This function must be
- * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
+ * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock
* must be held by the caller.
*
* Also, if there are blocked tasks on the list, they automatically
* block the newly created grace period, so set up ->gp_tasks accordingly.
*/
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
{
struct task_struct *t;
RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
- WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
- if (rcu_preempt_has_tasks(rnp)) {
+ if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+ dump_blkd_tasks(rsp, rnp, 10);
+ if (rcu_preempt_has_tasks(rnp) &&
+ (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
rnp->gp_tasks = rnp->blkd_tasks.next;
t = container_of(rnp->gp_tasks, struct task_struct,
rcu_node_entry);
trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
- rnp->gpnum, t->pid);
+ rnp->gp_seq, t->pid);
}
WARN_ON_ONCE(rnp->qsmask);
}
@@ -717,6 +734,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
*/
static void rcu_preempt_check_callbacks(void)
{
+ struct rcu_state *rsp = &rcu_preempt_state;
struct task_struct *t = current;
if (t->rcu_read_lock_nesting == 0) {
@@ -725,7 +743,9 @@ static void rcu_preempt_check_callbacks(void)
}
if (t->rcu_read_lock_nesting > 0 &&
__this_cpu_read(rcu_data_p->core_needs_qs) &&
- __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
+ __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) &&
+ !t->rcu_read_unlock_special.b.need_qs &&
+ time_after(jiffies, rsp->gp_start + HZ))
t->rcu_read_unlock_special.b.need_qs = true;
}
@@ -841,6 +861,47 @@ void exit_rcu(void)
__rcu_read_unlock();
}
+/*
+ * Dump the blocked-tasks state, but limit the list dump to the
+ * specified number of elements.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+ int cpu;
+ int i;
+ struct list_head *lhp;
+ bool onl;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp1;
+
+ raw_lockdep_assert_held_rcu_node(rnp);
+ pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+ __func__, rnp->grplo, rnp->grphi, rnp->level,
+ (long)rnp->gp_seq, (long)rnp->completedqs);
+ for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+ pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
+ __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
+ pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
+ __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks);
+ pr_info("%s: ->blkd_tasks", __func__);
+ i = 0;
+ list_for_each(lhp, &rnp->blkd_tasks) {
+ pr_cont(" %p", lhp);
+ if (++i >= 10)
+ break;
+ }
+ pr_cont("\n");
+ for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+ pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
+ cpu, ".o"[onl],
+ (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+ (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+ }
+}
+
#else /* #ifdef CONFIG_PREEMPT_RCU */
static struct rcu_state *const rcu_state_p = &rcu_sched_state;
@@ -911,7 +972,8 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
* so there is no need to check for blocked tasks. So check only for
* bogus qsmask values.
*/
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
{
WARN_ON_ONCE(rnp->qsmask);
}
@@ -949,6 +1011,15 @@ void exit_rcu(void)
{
}
+/*
+ * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+ WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
+}
+
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST
@@ -1433,7 +1504,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
* completed since we last checked and there are
* callbacks not yet ready to invoke.
*/
- if ((rdp->completed != rnp->completed ||
+ if ((rcu_seq_completed_gp(rdp->gp_seq,
+ rcu_seq_current(&rnp->gp_seq)) ||
unlikely(READ_ONCE(rdp->gpwrap))) &&
rcu_segcblist_pend_cbs(&rdp->cblist))
note_gp_changes(rsp, rdp);
@@ -1720,16 +1792,16 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
*/
touch_nmi_watchdog();
- if (rsp->gpnum == rdp->gpnum) {
+ ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
+ if (ticks_value) {
+ ticks_title = "GPs behind";
+ } else {
ticks_title = "ticks this GP";
ticks_value = rdp->ticks_this_gp;
- } else {
- ticks_title = "GPs behind";
- ticks_value = rsp->gpnum - rdp->gpnum;
}
print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
- delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
- pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
+ delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
+ pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
cpu,
"O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -1817,7 +1889,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
{
- return &rnp->nocb_gp_wq[rnp->completed & 0x1];
+ return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
}
static void rcu_init_one_nocb(struct rcu_node *rnp)
@@ -1854,8 +1926,8 @@ static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
del_timer(&rdp->nocb_timer);
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
- smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
- swake_up(&rdp_leader->nocb_wq);
+ smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */
+ swake_up_one(&rdp_leader->nocb_wq);
} else {
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
}
@@ -2069,12 +2141,17 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
bool needwake;
struct rcu_node *rnp = rdp->mynode;
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- c = rcu_cbs_completed(rdp->rsp, rnp);
- needwake = rcu_start_this_gp(rnp, rdp, c);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- if (needwake)
- rcu_gp_kthread_wake(rdp->rsp);
+ local_irq_save(flags);
+ c = rcu_seq_snap(&rdp->rsp->gp_seq);
+ if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+ local_irq_restore(flags);
+ } else {
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+ needwake = rcu_start_this_gp(rnp, rdp, c);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ if (needwake)
+ rcu_gp_kthread_wake(rdp->rsp);
+ }
/*
* Wait for the grace period. Do so interruptibly to avoid messing
@@ -2082,9 +2159,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
*/
trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
- swait_event_interruptible(
- rnp->nocb_gp_wq[c & 0x1],
- (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
+ swait_event_interruptible_exclusive(
+ rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1],
+ (d = rcu_seq_done(&rnp->gp_seq, c)));
if (likely(d))
break;
WARN_ON(signal_pending(current));
@@ -2111,7 +2188,7 @@ wait_again:
/* Wait for callbacks to appear. */
if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
- swait_event_interruptible(my_rdp->nocb_wq,
+ swait_event_interruptible_exclusive(my_rdp->nocb_wq,
!READ_ONCE(my_rdp->nocb_leader_sleep));
raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
my_rdp->nocb_leader_sleep = true;
@@ -2176,7 +2253,7 @@ wait_again:
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
/* List was empty, so wake up the follower. */
- swake_up(&rdp->nocb_wq);
+ swake_up_one(&rdp->nocb_wq);
}
}
@@ -2193,7 +2270,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
{
for (;;) {
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
- swait_event_interruptible(rdp->nocb_wq,
+ swait_event_interruptible_exclusive(rdp->nocb_wq,
READ_ONCE(rdp->nocb_follower_head));
if (smp_load_acquire(&rdp->nocb_follower_head)) {
/* ^^^ Ensure CB invocation follows _head test. */
@@ -2569,23 +2646,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
- * An adaptive-ticks CPU can potentially execute in kernel mode for an
- * arbitrarily long period of time with the scheduling-clock tick turned
- * off. RCU will be paying attention to this CPU because it is in the
- * kernel, but the CPU cannot be guaranteed to be executing the RCU state
- * machine because the scheduling-clock tick has been disabled. Therefore,
- * if an adaptive-ticks CPU is failing to respond to the current grace
- * period and has not be idle from an RCU perspective, kick it.
- */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
-{
-#ifdef CONFIG_NO_HZ_FULL
- if (tick_nohz_full_cpu(cpu))
- smp_send_reschedule(cpu);
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
-}
-
-/*
* Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
* grace-period kthread will do force_quiescent_state() processing?
* The idea is to avoid waking up RCU core processing on such a
@@ -2610,8 +2670,6 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
*/
static void rcu_bind_gp_kthread(void)
{
- int __maybe_unused cpu;
-
if (!tick_nohz_full_enabled())
return;
housekeeping_affine(current, HK_FLAG_RCU);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 4c230a60ece4..39cb23d22109 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
#ifdef CONFIG_TASKS_RCU
/*
- * Simple variant of RCU whose quiescent states are voluntary context switch,
- * user-space execution, and idle. As such, grace periods can take one good
- * long time. There are no read-side primitives similar to rcu_read_lock()
- * and rcu_read_unlock() because this implementation is intended to get
- * the system into a safe state for some of the manipulations involved in
- * tracing and the like. Finally, this implementation does not support
- * high call_rcu_tasks() rates from multiple CPUs. If this is required,
- * per-CPU callback lists will be needed.
+ * Simple variant of RCU whose quiescent states are voluntary context
+ * switch, cond_resched_rcu_qs(), user-space execution, and idle.
+ * As such, grace periods can take one good long time. There are no
+ * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
+ * because this implementation is intended to get the system into a safe
+ * state for some of the manipulations involved in tracing and the like.
+ * Finally, this implementation does not support high call_rcu_tasks()
+ * rates from multiple CPUs. If this is required, per-CPU callback lists
+ * will be needed.
*/
/* Global list of callbacks and associated lock. */
@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_tasks() assumes
* that the read-side critical sections end at a voluntary context
- * switch (not a preemption!), entry into idle, or transition to usermode
- * execution. As such, there are no read-side primitives analogous to
- * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
- * to determine that all tasks have passed through a safe state, not so
- * much for data-strcuture synchronization.
+ * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
+ * or transition to usermode execution. As such, there are no read-side
+ * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
+ * this primitive is intended to determine that all tasks have passed
+ * through a safe state, not so much for data-strcuture synchronization.
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
@@ -667,6 +668,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
struct rcu_head *list;
struct rcu_head *next;
LIST_HEAD(rcu_tasks_holdouts);
+ int fract;
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
housekeeping_affine(current, HK_FLAG_RCU);
@@ -748,13 +750,25 @@ static int __noreturn rcu_tasks_kthread(void *arg)
* holdouts. When the list is empty, we are done.
*/
lastreport = jiffies;
- while (!list_empty(&rcu_tasks_holdouts)) {
+
+ /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
+ fract = 10;
+
+ for (;;) {
bool firstreport;
bool needreport;
int rtst;
struct task_struct *t1;
- schedule_timeout_interruptible(HZ);
+ if (list_empty(&rcu_tasks_holdouts))
+ break;
+
+ /* Slowly back off waiting for holdouts */
+ schedule_timeout_interruptible(HZ/fract);
+
+ if (fract > 1)
+ fract--;
+
rtst = READ_ONCE(rcu_task_stall_timeout);
needreport = rtst > 0 &&
time_after(jiffies, lastreport + rtst);
@@ -800,6 +814,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
list = next;
cond_resched();
}
+ /* Paranoid sleep to keep this from entering a tight loop */
schedule_timeout_uninterruptible(HZ/10);
}
}
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index d9a02b318108..7fe183404c38 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -20,7 +20,7 @@ obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle.o fair.o rt.o deadline.o
obj-y += wait.o wait_bit.o swait.o completion.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 10c83e73837a..e3e3b979f9bd 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -53,6 +53,7 @@
*
*/
#include "sched.h"
+#include <linux/sched_clock.h>
/*
* Scheduler clock - returns current time in nanosec units.
@@ -66,12 +67,7 @@ unsigned long long __weak sched_clock(void)
}
EXPORT_SYMBOL_GPL(sched_clock);
-__read_mostly int sched_clock_running;
-
-void sched_clock_init(void)
-{
- sched_clock_running = 1;
-}
+static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
@@ -195,17 +191,40 @@ void clear_sched_clock_stable(void)
smp_mb(); /* matches sched_clock_init_late() */
- if (sched_clock_running == 2)
+ if (static_key_count(&sched_clock_running.key) == 2)
__clear_sched_clock_stable();
}
+static void __sched_clock_gtod_offset(void)
+{
+ struct sched_clock_data *scd = this_scd();
+
+ __scd_stamp(scd);
+ __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
+}
+
+void __init sched_clock_init(void)
+{
+ /*
+ * Set __gtod_offset such that once we mark sched_clock_running,
+ * sched_clock_tick() continues where sched_clock() left off.
+ *
+ * Even if TSC is buggered, we're still UP at this point so it
+ * can't really be out of sync.
+ */
+ local_irq_disable();
+ __sched_clock_gtod_offset();
+ local_irq_enable();
+
+ static_branch_inc(&sched_clock_running);
+}
/*
* We run this as late_initcall() such that it runs after all built-in drivers,
* notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
*/
static int __init sched_clock_init_late(void)
{
- sched_clock_running = 2;
+ static_branch_inc(&sched_clock_running);
/*
* Ensure that it is impossible to not do a static_key update.
*
@@ -350,8 +369,8 @@ u64 sched_clock_cpu(int cpu)
if (sched_clock_stable())
return sched_clock() + __sched_clock_offset;
- if (unlikely(!sched_clock_running))
- return 0ull;
+ if (!static_branch_unlikely(&sched_clock_running))
+ return sched_clock();
preempt_disable_notrace();
scd = cpu_sdc(cpu);
@@ -373,7 +392,7 @@ void sched_clock_tick(void)
if (sched_clock_stable())
return;
- if (unlikely(!sched_clock_running))
+ if (!static_branch_unlikely(&sched_clock_running))
return;
lockdep_assert_irqs_disabled();
@@ -385,8 +404,6 @@ void sched_clock_tick(void)
void sched_clock_tick_stable(void)
{
- u64 gtod, clock;
-
if (!sched_clock_stable())
return;
@@ -398,9 +415,7 @@ void sched_clock_tick_stable(void)
* TSC to be unstable, any computation will be computing crap.
*/
local_irq_disable();
- gtod = ktime_get_ns();
- clock = sched_clock();
- __gtod_offset = (clock + __sched_clock_offset) - gtod;
+ __sched_clock_gtod_offset();
local_irq_enable();
}
@@ -434,9 +449,17 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
+void __init sched_clock_init(void)
+{
+ static_branch_inc(&sched_clock_running);
+ local_irq_disable();
+ generic_sched_clock_init();
+ local_irq_enable();
+}
+
u64 sched_clock_cpu(int cpu)
{
- if (unlikely(!sched_clock_running))
+ if (!static_branch_unlikely(&sched_clock_running))
return 0;
return sched_clock();
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index e426b0cb9ac6..a1ad5b7d5521 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -22,8 +22,8 @@
*
* See also complete_all(), wait_for_completion() and related routines.
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
*/
void complete(struct completion *x)
{
@@ -44,8 +44,8 @@ EXPORT_SYMBOL(complete);
*
* This will wake up all threads waiting on this particular completion event.
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
*
* Since complete_all() sets the completion of @x permanently to done
* to allow multiple waiters to finish, a call to reinit_completion()
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe365c9a08e9..c45de46fdf10 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -17,6 +17,8 @@
#include "../workqueue_internal.h"
#include "../smpboot.h"
+#include "pelt.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -45,14 +47,6 @@ const_debug unsigned int sysctl_sched_features =
const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
- * period over which we average the RT time consumption, measured
- * in ms.
- *
- * default: 1s
- */
-const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
-
-/*
* period over which we measure -rt task CPU usage in us.
* default: 1s
*/
@@ -183,9 +177,9 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->clock_task += delta;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef HAVE_SCHED_AVG_IRQ
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
- sched_rt_avg_update(rq, irq_delta + steal);
+ update_irq_load_avg(rq, irq_delta + steal);
#endif
}
@@ -412,8 +406,8 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
* its already queued (either by us or someone else) and will get the
* wakeup due to that.
*
- * This cmpxchg() implies a full barrier, which pairs with the write
- * barrier implied by the wakeup in wake_up_q().
+ * This cmpxchg() executes a full barrier, which pairs with the full
+ * barrier executed by the wakeup in wake_up_q().
*/
if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
return;
@@ -441,8 +435,8 @@ void wake_up_q(struct wake_q_head *head)
task->wake_q.next = NULL;
/*
- * wake_up_process() implies a wmb() to pair with the queueing
- * in wake_q_add() so as not to miss wakeups.
+ * wake_up_process() executes a full barrier, which pairs with
+ * the queueing in wake_q_add() so as not to miss wakeups.
*/
wake_up_process(task);
put_task_struct(task);
@@ -649,23 +643,6 @@ bool sched_can_stop_tick(struct rq *rq)
return true;
}
#endif /* CONFIG_NO_HZ_FULL */
-
-void sched_avg_update(struct rq *rq)
-{
- s64 period = sched_avg_period();
-
- while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
- /*
- * Inline assembly required to prevent the compiler
- * optimising this loop into a divmod call.
- * See __iter_div_u64_rem() for another example of this.
- */
- asm("" : "+rm" (rq->age_stamp));
- rq->age_stamp += period;
- rq->rt_avg /= 2;
- }
-}
-
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
@@ -1199,6 +1176,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
__set_task_cpu(p, new_cpu);
}
+#ifdef CONFIG_NUMA_BALANCING
static void __migrate_swap_task(struct task_struct *p, int cpu)
{
if (task_on_rq_queued(p)) {
@@ -1280,16 +1258,17 @@ unlock:
/*
* Cross migrate two tasks
*/
-int migrate_swap(struct task_struct *cur, struct task_struct *p)
+int migrate_swap(struct task_struct *cur, struct task_struct *p,
+ int target_cpu, int curr_cpu)
{
struct migration_swap_arg arg;
int ret = -EINVAL;
arg = (struct migration_swap_arg){
.src_task = cur,
- .src_cpu = task_cpu(cur),
+ .src_cpu = curr_cpu,
.dst_task = p,
- .dst_cpu = task_cpu(p),
+ .dst_cpu = target_cpu,
};
if (arg.src_cpu == arg.dst_cpu)
@@ -1314,6 +1293,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
out:
return ret;
}
+#endif /* CONFIG_NUMA_BALANCING */
/*
* wait_task_inactive - wait for a thread to unschedule.
@@ -1879,8 +1859,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
* rq(c1)->lock (if not at the same time, then in that order).
* C) LOCK of the rq(c1)->lock scheduling in task
*
- * Transitivity guarantees that B happens after A and C after B.
- * Note: we only require RCpc transitivity.
+ * Release/acquire chaining guarantees that B happens after A and C after B.
* Note: the CPU doing B need not be c0 or c1
*
* Example:
@@ -1942,16 +1921,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
* UNLOCK rq(0)->lock
*
*
- * However; for wakeups there is a second guarantee we must provide, namely we
- * must observe the state that lead to our wakeup. That is, not only must our
- * task observe its own prior state, it must also observe the stores prior to
- * its wakeup.
- *
- * This means that any means of doing remote wakeups must order the CPU doing
- * the wakeup against the CPU the task is going to end up running on. This,
- * however, is already required for the regular Program-Order guarantee above,
- * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
- *
+ * However, for wakeups there is a second guarantee we must provide, namely we
+ * must ensure that CONDITION=1 done by the caller can not be reordered with
+ * accesses to the task state; see try_to_wake_up() and set_current_state().
*/
/**
@@ -1967,6 +1939,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
* Atomic against schedule() which would dequeue a task, also see
* set_current_state().
*
+ * This function executes a full memory barrier before accessing the task
+ * state; see set_current_state().
+ *
* Return: %true if @p->state changes (an actual wakeup was done),
* %false otherwise.
*/
@@ -1998,21 +1973,20 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* be possible to, falsely, observe p->on_rq == 0 and get stuck
* in smp_cond_load_acquire() below.
*
- * sched_ttwu_pending() try_to_wake_up()
- * [S] p->on_rq = 1; [L] P->state
- * UNLOCK rq->lock -----.
- * \
- * +--- RMB
- * schedule() /
- * LOCK rq->lock -----'
- * UNLOCK rq->lock
+ * sched_ttwu_pending() try_to_wake_up()
+ * STORE p->on_rq = 1 LOAD p->state
+ * UNLOCK rq->lock
+ *
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * UNLOCK rq->lock
*
* [task p]
- * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
+ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
*
- * Pairs with the UNLOCK+LOCK on rq->lock from the
- * last wakeup of our task and the schedule that got our task
- * current.
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
*/
smp_rmb();
if (p->on_rq && ttwu_remote(p, wake_flags))
@@ -2026,15 +2000,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* One must be running (->on_cpu == 1) in order to remove oneself
* from the runqueue.
*
- * [S] ->on_cpu = 1; [L] ->on_rq
- * UNLOCK rq->lock
- * RMB
- * LOCK rq->lock
- * [S] ->on_rq = 0; [L] ->on_cpu
+ * __schedule() (switch to task 'p') try_to_wake_up()
+ * STORE p->on_cpu = 1 LOAD p->on_rq
+ * UNLOCK rq->lock
+ *
+ * __schedule() (put 'p' to sleep)
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * STORE p->on_rq = 0 LOAD p->on_cpu
*
- * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
- * from the consecutive calls to schedule(); the first switching to our
- * task, the second putting it to sleep.
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
*/
smp_rmb();
@@ -2140,8 +2116,7 @@ out:
*
* Return: 1 if the process was woken up, 0 if it was already running.
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * This function executes a full memory barrier before accessing the task state.
*/
int wake_up_process(struct task_struct *p)
{
@@ -2317,7 +2292,6 @@ static inline void init_schedstats(void) {}
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
- int cpu = get_cpu();
__sched_fork(clone_flags, p);
/*
@@ -2353,14 +2327,12 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->sched_reset_on_fork = 0;
}
- if (dl_prio(p->prio)) {
- put_cpu();
+ if (dl_prio(p->prio))
return -EAGAIN;
- } else if (rt_prio(p->prio)) {
+ else if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
- } else {
+ else
p->sched_class = &fair_sched_class;
- }
init_entity_runnable_average(&p->se);
@@ -2376,7 +2348,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
* We're setting the CPU for the first time, we don't migrate,
* so use __set_task_cpu().
*/
- __set_task_cpu(p, cpu);
+ __set_task_cpu(p, smp_processor_id());
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -2393,8 +2365,6 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
#endif
-
- put_cpu();
return 0;
}
@@ -5714,13 +5684,6 @@ void set_rq_offline(struct rq *rq)
}
}
-static void set_cpu_rq_start_time(unsigned int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- rq->age_stamp = sched_clock_cpu(cpu);
-}
-
/*
* used to mark begin/end of suspend/resume:
*/
@@ -5838,7 +5801,6 @@ static void sched_rq_cpu_starting(unsigned int cpu)
int sched_cpu_starting(unsigned int cpu)
{
- set_cpu_rq_start_time(cpu);
sched_rq_cpu_starting(cpu);
sched_tick_start(cpu);
return 0;
@@ -5954,7 +5916,6 @@ void __init sched_init(void)
int i, j;
unsigned long alloc_size = 0, ptr;
- sched_clock_init();
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -6106,7 +6067,6 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
idle_thread_set_boot_cpu();
- set_cpu_rq_start_time(smp_processor_id());
#endif
init_sched_fair_class();
@@ -6785,6 +6745,16 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
+ if (schedstat_enabled() && tg != &root_task_group) {
+ u64 ws = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ ws += schedstat_val(tg->se[i]->statistics.wait_sum);
+
+ seq_printf(sf, "wait_sum %llu\n", ws);
+ }
+
return 0;
}
#endif /* CONFIG_CFS_BANDWIDTH */
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index c907fde01eaa..3fffad3bc8a8 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -53,9 +53,7 @@ struct sugov_cpu {
unsigned int iowait_boost_max;
u64 last_update;
- /* The fields below are only needed when sharing a policy: */
- unsigned long util_cfs;
- unsigned long util_dl;
+ unsigned long bw_dl;
unsigned long max;
/* The field below is for single-CPU policies only: */
@@ -179,33 +177,90 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
return cpufreq_driver_resolve_freq(policy, freq);
}
-static void sugov_get_util(struct sugov_cpu *sg_cpu)
+/*
+ * This function computes an effective utilization for the given CPU, to be
+ * used for frequency selection given the linear relation: f = u * f_max.
+ *
+ * The scheduler tracks the following metrics:
+ *
+ * cpu_util_{cfs,rt,dl,irq}()
+ * cpu_bw_dl()
+ *
+ * Where the cfs,rt and dl util numbers are tracked with the same metric and
+ * synchronized windows and are thus directly comparable.
+ *
+ * The cfs,rt,dl utilization are the running times measured with rq->clock_task
+ * which excludes things like IRQ and steal-time. These latter are then accrued
+ * in the irq utilization.
+ *
+ * The DL bandwidth number otoh is not a measured metric but a value computed
+ * based on the task model parameters and gives the minimal utilization
+ * required to meet deadlines.
+ */
+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
+ unsigned long util, irq, max;
- sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
- sg_cpu->util_cfs = cpu_util_cfs(rq);
- sg_cpu->util_dl = cpu_util_dl(rq);
-}
-
-static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
-{
- struct rq *rq = cpu_rq(sg_cpu->cpu);
+ sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+ sg_cpu->bw_dl = cpu_bw_dl(rq);
if (rt_rq_is_runnable(&rq->rt))
- return sg_cpu->max;
+ return max;
+
+ /*
+ * Early check to see if IRQ/steal time saturates the CPU, can be
+ * because of inaccuracies in how we track these -- see
+ * update_irq_load_avg().
+ */
+ irq = cpu_util_irq(rq);
+ if (unlikely(irq >= max))
+ return max;
+
+ /*
+ * Because the time spend on RT/DL tasks is visible as 'lost' time to
+ * CFS tasks and we use the same metric to track the effective
+ * utilization (PELT windows are synchronized) we can directly add them
+ * to obtain the CPU's actual utilization.
+ */
+ util = cpu_util_cfs(rq);
+ util += cpu_util_rt(rq);
+
+ /*
+ * We do not make cpu_util_dl() a permanent part of this sum because we
+ * want to use cpu_bw_dl() later on, but we need to check if the
+ * CFS+RT+DL sum is saturated (ie. no idle time) such that we select
+ * f_max when there is no idle time.
+ *
+ * NOTE: numerical errors or stop class might cause us to not quite hit
+ * saturation when we should -- something for later.
+ */
+ if ((util + cpu_util_dl(rq)) >= max)
+ return max;
+
+ /*
+ * There is still idle time; further improve the number by using the
+ * irq metric. Because IRQ/steal time is hidden from the task clock we
+ * need to scale the task numbers:
+ *
+ * 1 - irq
+ * U' = irq + ------- * U
+ * max
+ */
+ util = scale_irq_capacity(util, irq, max);
+ util += irq;
/*
- * Utilization required by DEADLINE must always be granted while, for
- * FAIR, we use blocked utilization of IDLE CPUs as a mechanism to
- * gracefully reduce the frequency when no tasks show up for longer
+ * Bandwidth required by DEADLINE must always be granted while, for
+ * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
+ * to gracefully reduce the frequency when no tasks show up for longer
* periods of time.
*
- * Ideally we would like to set util_dl as min/guaranteed freq and
- * util_cfs + util_dl as requested freq. However, cpufreq is not yet
- * ready for such an interface. So, we only do the latter for now.
+ * Ideally we would like to set bw_dl as min/guaranteed freq and util +
+ * bw_dl as requested freq. However, cpufreq is not yet ready for such
+ * an interface. So, we only do the latter for now.
*/
- return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs));
+ return min(max, util + sg_cpu->bw_dl);
}
/**
@@ -360,7 +415,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
{
- if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl)
+ if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
sg_policy->need_freq_update = true;
}
@@ -383,9 +438,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = sugov_cpu_is_busy(sg_cpu);
- sugov_get_util(sg_cpu);
+ util = sugov_get_util(sg_cpu);
max = sg_cpu->max;
- util = sugov_aggregate_util(sg_cpu);
sugov_iowait_apply(sg_cpu, time, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
/*
@@ -424,9 +478,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max;
- sugov_get_util(j_sg_cpu);
+ j_util = sugov_get_util(j_sg_cpu);
j_max = j_sg_cpu->max;
- j_util = sugov_aggregate_util(j_sg_cpu);
sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
if (j_util * max > j_max * util) {
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b5fbdde6afa9..997ea7b839fa 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -16,6 +16,7 @@
* Fabio Checconi <fchecconi@gmail.com>
*/
#include "sched.h"
+#include "pelt.h"
struct dl_bandwidth def_dl_bandwidth;
@@ -1179,8 +1180,6 @@ static void update_curr_dl(struct rq *rq)
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
-
if (dl_entity_is_special(dl_se))
return;
@@ -1761,6 +1760,9 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
deadline_queue_push_tasks(rq);
+ if (rq->curr->sched_class != &dl_sched_class)
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
return p;
}
@@ -1768,6 +1770,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
update_curr_dl(rq);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
@@ -1784,6 +1787,7 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
/*
* Even when we have runtime, update_curr_dl() might have resulted in us
* not being the leftmost task anymore. In that case NEED_RESCHED will
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e593b4118578..60caf1fb94e0 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -111,20 +111,19 @@ static int sched_feat_set(char *cmp)
cmp += 3;
}
- for (i = 0; i < __SCHED_FEAT_NR; i++) {
- if (strcmp(cmp, sched_feat_names[i]) == 0) {
- if (neg) {
- sysctl_sched_features &= ~(1UL << i);
- sched_feat_disable(i);
- } else {
- sysctl_sched_features |= (1UL << i);
- sched_feat_enable(i);
- }
- break;
- }
+ i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
+ if (i < 0)
+ return i;
+
+ if (neg) {
+ sysctl_sched_features &= ~(1UL << i);
+ sched_feat_disable(i);
+ } else {
+ sysctl_sched_features |= (1UL << i);
+ sched_feat_enable(i);
}
- return i;
+ return 0;
}
static ssize_t
@@ -133,7 +132,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
{
char buf[64];
char *cmp;
- int i;
+ int ret;
struct inode *inode;
if (cnt > 63)
@@ -148,10 +147,10 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
/* Ensure the static_key remains in a consistent state */
inode = file_inode(filp);
inode_lock(inode);
- i = sched_feat_set(cmp);
+ ret = sched_feat_set(cmp);
inode_unlock(inode);
- if (i == __SCHED_FEAT_NR)
- return -EINVAL;
+ if (ret < 0)
+ return ret;
*ppos += cnt;
@@ -623,8 +622,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
#undef PU
}
-extern __read_mostly int sched_clock_running;
-
static void print_cpu(struct seq_file *m, int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -843,8 +840,8 @@ void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
unsigned long tpf, unsigned long gsf, unsigned long gpf)
{
SEQ_printf(m, "numa_faults node=%d ", node);
- SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
- SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
+ SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
+ SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
}
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2f0a0be4d344..309c93fcc604 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -255,9 +255,6 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
return cfs_rq->rq;
}
-/* An entity is a task if it doesn't "own" a runqueue */
-#define entity_is_task(se) (!se->my_q)
-
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
@@ -419,7 +416,6 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
return container_of(cfs_rq, struct rq, cfs);
}
-#define entity_is_task(se) 1
#define for_each_sched_entity(se) \
for (; se; se = NULL)
@@ -692,7 +688,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#ifdef CONFIG_SMP
-
+#include "pelt.h"
#include "sched-pelt.h"
static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
@@ -735,11 +731,12 @@ static void attach_entity_cfs_rq(struct sched_entity *se);
* To solve this problem, we also cap the util_avg of successive tasks to
* only 1/2 of the left utilization budget:
*
- * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
+ * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
*
- * where n denotes the nth task.
+ * where n denotes the nth task and cpu_scale the CPU capacity.
*
- * For example, a simplest series from the beginning would be like:
+ * For example, for a CPU with 1024 of capacity, a simplest series from
+ * the beginning would be like:
*
* task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
* cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
@@ -751,7 +748,8 @@ void post_init_entity_util_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
- long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+ long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+ long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) {
@@ -1314,7 +1312,7 @@ static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
* of each group. Skip other nodes.
*/
if (sched_numa_topology_type == NUMA_BACKPLANE &&
- dist > maxdist)
+ dist >= maxdist)
continue;
/* Add up the faults from nearby nodes. */
@@ -1452,15 +1450,12 @@ static unsigned long capacity_of(int cpu);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
- unsigned long nr_running;
unsigned long load;
/* Total compute capacity of CPUs on a node */
unsigned long compute_capacity;
- /* Approximate capacity in terms of runnable tasks on a node */
- unsigned long task_capacity;
- int has_free_capacity;
+ unsigned int nr_running;
};
/*
@@ -1487,8 +1482,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
* the @ns structure is NULL'ed and task_numa_compare() will
* not find this node attractive.
*
- * We'll either bail at !has_free_capacity, or we'll detect a huge
- * imbalance and bail there.
+ * We'll detect a huge imbalance and bail there.
*/
if (!cpus)
return;
@@ -1497,9 +1491,8 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
capacity = cpus / smt; /* cores */
- ns->task_capacity = min_t(unsigned, capacity,
+ capacity = min_t(unsigned, capacity,
DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
- ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
}
struct task_numa_env {
@@ -1548,28 +1541,12 @@ static bool load_too_imbalanced(long src_load, long dst_load,
src_capacity = env->src_stats.compute_capacity;
dst_capacity = env->dst_stats.compute_capacity;
- /* We care about the slope of the imbalance, not the direction. */
- if (dst_load < src_load)
- swap(dst_load, src_load);
+ imb = abs(dst_load * src_capacity - src_load * dst_capacity);
- /* Is the difference below the threshold? */
- imb = dst_load * src_capacity * 100 -
- src_load * dst_capacity * env->imbalance_pct;
- if (imb <= 0)
- return false;
-
- /*
- * The imbalance is above the allowed threshold.
- * Compare it with the old imbalance.
- */
orig_src_load = env->src_stats.load;
orig_dst_load = env->dst_stats.load;
- if (orig_dst_load < orig_src_load)
- swap(orig_dst_load, orig_src_load);
-
- old_imb = orig_dst_load * src_capacity * 100 -
- orig_src_load * dst_capacity * env->imbalance_pct;
+ old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
/* Would this change make things worse? */
return (imb > old_imb);
@@ -1582,9 +1559,8 @@ static bool load_too_imbalanced(long src_load, long dst_load,
* be exchanged with the source task
*/
static void task_numa_compare(struct task_numa_env *env,
- long taskimp, long groupimp)
+ long taskimp, long groupimp, bool maymove)
{
- struct rq *src_rq = cpu_rq(env->src_cpu);
struct rq *dst_rq = cpu_rq(env->dst_cpu);
struct task_struct *cur;
long src_load, dst_load;
@@ -1605,97 +1581,73 @@ static void task_numa_compare(struct task_numa_env *env,
if (cur == env->p)
goto unlock;
+ if (!cur) {
+ if (maymove || imp > env->best_imp)
+ goto assign;
+ else
+ goto unlock;
+ }
+
/*
* "imp" is the fault differential for the source task between the
* source and destination node. Calculate the total differential for
* the source task and potential destination task. The more negative
- * the value is, the more rmeote accesses that would be expected to
+ * the value is, the more remote accesses that would be expected to
* be incurred if the tasks were swapped.
*/
- if (cur) {
- /* Skip this swap candidate if cannot move to the source CPU: */
- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
- goto unlock;
+ /* Skip this swap candidate if cannot move to the source cpu */
+ if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
+ goto unlock;
+ /*
+ * If dst and source tasks are in the same NUMA group, or not
+ * in any group then look only at task weights.
+ */
+ if (cur->numa_group == env->p->numa_group) {
+ imp = taskimp + task_weight(cur, env->src_nid, dist) -
+ task_weight(cur, env->dst_nid, dist);
/*
- * If dst and source tasks are in the same NUMA group, or not
- * in any group then look only at task weights.
+ * Add some hysteresis to prevent swapping the
+ * tasks within a group over tiny differences.
*/
- if (cur->numa_group == env->p->numa_group) {
- imp = taskimp + task_weight(cur, env->src_nid, dist) -
- task_weight(cur, env->dst_nid, dist);
- /*
- * Add some hysteresis to prevent swapping the
- * tasks within a group over tiny differences.
- */
- if (cur->numa_group)
- imp -= imp/16;
- } else {
- /*
- * Compare the group weights. If a task is all by
- * itself (not part of a group), use the task weight
- * instead.
- */
- if (cur->numa_group)
- imp += group_weight(cur, env->src_nid, dist) -
- group_weight(cur, env->dst_nid, dist);
- else
- imp += task_weight(cur, env->src_nid, dist) -
- task_weight(cur, env->dst_nid, dist);
- }
+ if (cur->numa_group)
+ imp -= imp / 16;
+ } else {
+ /*
+ * Compare the group weights. If a task is all by itself
+ * (not part of a group), use the task weight instead.
+ */
+ if (cur->numa_group && env->p->numa_group)
+ imp += group_weight(cur, env->src_nid, dist) -
+ group_weight(cur, env->dst_nid, dist);
+ else
+ imp += task_weight(cur, env->src_nid, dist) -
+ task_weight(cur, env->dst_nid, dist);
}
- if (imp <= env->best_imp && moveimp <= env->best_imp)
+ if (imp <= env->best_imp)
goto unlock;
- if (!cur) {
- /* Is there capacity at our destination? */
- if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
- !env->dst_stats.has_free_capacity)
- goto unlock;
-
- goto balance;
- }
-
- /* Balance doesn't matter much if we're running a task per CPU: */
- if (imp > env->best_imp && src_rq->nr_running == 1 &&
- dst_rq->nr_running == 1)
+ if (maymove && moveimp > imp && moveimp > env->best_imp) {
+ imp = moveimp - 1;
+ cur = NULL;
goto assign;
+ }
/*
* In the overloaded case, try and keep the load balanced.
*/
-balance:
- load = task_h_load(env->p);
+ load = task_h_load(env->p) - task_h_load(cur);
+ if (!load)
+ goto assign;
+
dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
- if (moveimp > imp && moveimp > env->best_imp) {
- /*
- * If the improvement from just moving env->p direction is
- * better than swapping tasks around, check if a move is
- * possible. Store a slightly smaller score than moveimp,
- * so an actually idle CPU will win.
- */
- if (!load_too_imbalanced(src_load, dst_load, env)) {
- imp = moveimp - 1;
- cur = NULL;
- goto assign;
- }
- }
-
- if (imp <= env->best_imp)
- goto unlock;
-
- if (cur) {
- load = task_h_load(cur);
- dst_load -= load;
- src_load += load;
- }
-
if (load_too_imbalanced(src_load, dst_load, env))
goto unlock;
+assign:
/*
* One idle CPU per node is evaluated for a task numa move.
* Call select_idle_sibling to maybe find a better one.
@@ -1711,7 +1663,6 @@ balance:
local_irq_enable();
}
-assign:
task_numa_assign(env, cur, imp);
unlock:
rcu_read_unlock();
@@ -1720,43 +1671,30 @@ unlock:
static void task_numa_find_cpu(struct task_numa_env *env,
long taskimp, long groupimp)
{
+ long src_load, dst_load, load;
+ bool maymove = false;
int cpu;
+ load = task_h_load(env->p);
+ dst_load = env->dst_stats.load + load;
+ src_load = env->src_stats.load - load;
+
+ /*
+ * If the improvement from just moving env->p direction is better
+ * than swapping tasks around, check if a move is possible.
+ */
+ maymove = !load_too_imbalanced(src_load, dst_load, env);
+
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
continue;
env->dst_cpu = cpu;
- task_numa_compare(env, taskimp, groupimp);
+ task_numa_compare(env, taskimp, groupimp, maymove);
}
}
-/* Only move tasks to a NUMA node less busy than the current node. */
-static bool numa_has_capacity(struct task_numa_env *env)
-{
- struct numa_stats *src = &env->src_stats;
- struct numa_stats *dst = &env->dst_stats;
-
- if (src->has_free_capacity && !dst->has_free_capacity)
- return false;
-
- /*
- * Only consider a task move if the source has a higher load
- * than the destination, corrected for CPU capacity on each node.
- *
- * src->load dst->load
- * --------------------- vs ---------------------
- * src->compute_capacity dst->compute_capacity
- */
- if (src->load * dst->compute_capacity * env->imbalance_pct >
-
- dst->load * src->compute_capacity * 100)
- return true;
-
- return false;
-}
-
static int task_numa_migrate(struct task_struct *p)
{
struct task_numa_env env = {
@@ -1797,7 +1735,7 @@ static int task_numa_migrate(struct task_struct *p)
* elsewhere, so there is no point in (re)trying.
*/
if (unlikely(!sd)) {
- p->numa_preferred_nid = task_node(p);
+ sched_setnuma(p, task_node(p));
return -EINVAL;
}
@@ -1811,8 +1749,7 @@ static int task_numa_migrate(struct task_struct *p)
update_numa_stats(&env.dst_stats, env.dst_nid);
/* Try to find a spot on the preferred nid. */
- if (numa_has_capacity(&env))
- task_numa_find_cpu(&env, taskimp, groupimp);
+ task_numa_find_cpu(&env, taskimp, groupimp);
/*
* Look at other nodes in these cases:
@@ -1842,8 +1779,7 @@ static int task_numa_migrate(struct task_struct *p)
env.dist = dist;
env.dst_nid = nid;
update_numa_stats(&env.dst_stats, env.dst_nid);
- if (numa_has_capacity(&env))
- task_numa_find_cpu(&env, taskimp, groupimp);
+ task_numa_find_cpu(&env, taskimp, groupimp);
}
}
@@ -1856,15 +1792,13 @@ static int task_numa_migrate(struct task_struct *p)
* trying for a better one later. Do not set the preferred node here.
*/
if (p->numa_group) {
- struct numa_group *ng = p->numa_group;
-
if (env.best_cpu == -1)
nid = env.src_nid;
else
- nid = env.dst_nid;
+ nid = cpu_to_node(env.best_cpu);
- if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
- sched_setnuma(p, env.dst_nid);
+ if (nid != p->numa_preferred_nid)
+ sched_setnuma(p, nid);
}
/* No better CPU than the current one was found. */
@@ -1884,7 +1818,8 @@ static int task_numa_migrate(struct task_struct *p)
return ret;
}
- ret = migrate_swap(p, env.best_task);
+ ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
+
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
put_task_struct(env.best_task);
@@ -2144,8 +2079,8 @@ static int preferred_group_nid(struct task_struct *p, int nid)
static void task_numa_placement(struct task_struct *p)
{
- int seq, nid, max_nid = -1, max_group_nid = -1;
- unsigned long max_faults = 0, max_group_faults = 0;
+ int seq, nid, max_nid = -1;
+ unsigned long max_faults = 0;
unsigned long fault_types[2] = { 0, 0 };
unsigned long total_faults;
u64 runtime, period;
@@ -2224,33 +2159,30 @@ static void task_numa_placement(struct task_struct *p)
}
}
- if (faults > max_faults) {
- max_faults = faults;
+ if (!p->numa_group) {
+ if (faults > max_faults) {
+ max_faults = faults;
+ max_nid = nid;
+ }
+ } else if (group_faults > max_faults) {
+ max_faults = group_faults;
max_nid = nid;
}
-
- if (group_faults > max_group_faults) {
- max_group_faults = group_faults;
- max_group_nid = nid;
- }
}
- update_task_scan_period(p, fault_types[0], fault_types[1]);
-
if (p->numa_group) {
numa_group_count_active_nodes(p->numa_group);
spin_unlock_irq(group_lock);
- max_nid = preferred_group_nid(p, max_group_nid);
+ max_nid = preferred_group_nid(p, max_nid);
}
if (max_faults) {
/* Set the new preferred node */
if (max_nid != p->numa_preferred_nid)
sched_setnuma(p, max_nid);
-
- if (task_node(p) != p->numa_preferred_nid)
- numa_migrate_preferred(p);
}
+
+ update_task_scan_period(p, fault_types[0], fault_types[1]);
}
static inline int get_numa_group(struct numa_group *grp)
@@ -2450,14 +2382,14 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
numa_is_active_node(mem_node, ng))
local = 1;
- task_numa_placement(p);
-
/*
* Retry task to preferred node migration periodically, in case it
* case it previously failed, or the scheduler moved us.
*/
- if (time_after(jiffies, p->numa_migrate_retry))
+ if (time_after(jiffies, p->numa_migrate_retry)) {
+ task_numa_placement(p);
numa_migrate_preferred(p);
+ }
if (migrated)
p->numa_pages_migrated += pages;
@@ -2749,19 +2681,6 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
} while (0)
#ifdef CONFIG_SMP
-/*
- * XXX we want to get rid of these helpers and use the full load resolution.
- */
-static inline long se_weight(struct sched_entity *se)
-{
- return scale_load_down(se->load.weight);
-}
-
-static inline long se_runnable(struct sched_entity *se)
-{
- return scale_load_down(se->runnable_weight);
-}
-
static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -3062,314 +2981,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
}
#ifdef CONFIG_SMP
-/*
- * Approximate:
- * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
- */
-static u64 decay_load(u64 val, u64 n)
-{
- unsigned int local_n;
-
- if (unlikely(n > LOAD_AVG_PERIOD * 63))
- return 0;
-
- /* after bounds checking we can collapse to 32-bit */
- local_n = n;
-
- /*
- * As y^PERIOD = 1/2, we can combine
- * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
- * With a look-up table which covers y^n (n<PERIOD)
- *
- * To achieve constant time decay_load.
- */
- if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
- val >>= local_n / LOAD_AVG_PERIOD;
- local_n %= LOAD_AVG_PERIOD;
- }
-
- val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
- return val;
-}
-
-static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
-{
- u32 c1, c2, c3 = d3; /* y^0 == 1 */
-
- /*
- * c1 = d1 y^p
- */
- c1 = decay_load((u64)d1, periods);
-
- /*
- * p-1
- * c2 = 1024 \Sum y^n
- * n=1
- *
- * inf inf
- * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
- * n=0 n=p
- */
- c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
-
- return c1 + c2 + c3;
-}
-
-/*
- * Accumulate the three separate parts of the sum; d1 the remainder
- * of the last (incomplete) period, d2 the span of full periods and d3
- * the remainder of the (incomplete) current period.
- *
- * d1 d2 d3
- * ^ ^ ^
- * | | |
- * |<->|<----------------->|<--->|
- * ... |---x---|------| ... |------|-----x (now)
- *
- * p-1
- * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
- * n=1
- *
- * = u y^p + (Step 1)
- *
- * p-1
- * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
- * n=1
- */
-static __always_inline u32
-accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running)
-{
- unsigned long scale_freq, scale_cpu;
- u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
- u64 periods;
-
- scale_freq = arch_scale_freq_capacity(cpu);
- scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
-
- delta += sa->period_contrib;
- periods = delta / 1024; /* A period is 1024us (~1ms) */
-
- /*
- * Step 1: decay old *_sum if we crossed period boundaries.
- */
- if (periods) {
- sa->load_sum = decay_load(sa->load_sum, periods);
- sa->runnable_load_sum =
- decay_load(sa->runnable_load_sum, periods);
- sa->util_sum = decay_load((u64)(sa->util_sum), periods);
-
- /*
- * Step 2
- */
- delta %= 1024;
- contrib = __accumulate_pelt_segments(periods,
- 1024 - sa->period_contrib, delta);
- }
- sa->period_contrib = delta;
-
- contrib = cap_scale(contrib, scale_freq);
- if (load)
- sa->load_sum += load * contrib;
- if (runnable)
- sa->runnable_load_sum += runnable * contrib;
- if (running)
- sa->util_sum += contrib * scale_cpu;
-
- return periods;
-}
-
-/*
- * We can represent the historical contribution to runnable average as the
- * coefficients of a geometric series. To do this we sub-divide our runnable
- * history into segments of approximately 1ms (1024us); label the segment that
- * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
- *
- * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
- * p0 p1 p2
- * (now) (~1ms ago) (~2ms ago)
- *
- * Let u_i denote the fraction of p_i that the entity was runnable.
- *
- * We then designate the fractions u_i as our co-efficients, yielding the
- * following representation of historical load:
- * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
- *
- * We choose y based on the with of a reasonably scheduling period, fixing:
- * y^32 = 0.5
- *
- * This means that the contribution to load ~32ms ago (u_32) will be weighted
- * approximately half as much as the contribution to load within the last ms
- * (u_0).
- *
- * When a period "rolls over" and we have new u_0`, multiplying the previous
- * sum again by y is sufficient to update:
- * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
- * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
- */
-static __always_inline int
-___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running)
-{
- u64 delta;
-
- delta = now - sa->last_update_time;
- /*
- * This should only happen when time goes backwards, which it
- * unfortunately does during sched clock init when we swap over to TSC.
- */
- if ((s64)delta < 0) {
- sa->last_update_time = now;
- return 0;
- }
-
- /*
- * Use 1024ns as the unit of measurement since it's a reasonable
- * approximation of 1us and fast to compute.
- */
- delta >>= 10;
- if (!delta)
- return 0;
-
- sa->last_update_time += delta << 10;
-
- /*
- * running is a subset of runnable (weight) so running can't be set if
- * runnable is clear. But there are some corner cases where the current
- * se has been already dequeued but cfs_rq->curr still points to it.
- * This means that weight will be 0 but not running for a sched_entity
- * but also for a cfs_rq if the latter becomes idle. As an example,
- * this happens during idle_balance() which calls
- * update_blocked_averages()
- */
- if (!load)
- runnable = running = 0;
-
- /*
- * Now we know we crossed measurement unit boundaries. The *_avg
- * accrues by two steps:
- *
- * Step 1: accumulate *_sum since last_update_time. If we haven't
- * crossed period boundaries, finish.
- */
- if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
- return 0;
-
- return 1;
-}
-
-static __always_inline void
-___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
-{
- u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
-
- /*
- * Step 2: update *_avg.
- */
- sa->load_avg = div_u64(load * sa->load_sum, divider);
- sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
- sa->util_avg = sa->util_sum / divider;
-}
-
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
-static inline void cfs_se_util_change(struct sched_avg *avg)
-{
- unsigned int enqueued;
-
- if (!sched_feat(UTIL_EST))
- return;
-
- /* Avoid store if the flag has been already set */
- enqueued = avg->util_est.enqueued;
- if (!(enqueued & UTIL_AVG_UNCHANGED))
- return;
-
- /* Reset flag to report util_avg has been updated */
- enqueued &= ~UTIL_AVG_UNCHANGED;
- WRITE_ONCE(avg->util_est.enqueued, enqueued);
-}
-
-/*
- * sched_entity:
- *
- * task:
- * se_runnable() == se_weight()
- *
- * group: [ see update_cfs_group() ]
- * se_weight() = tg->weight * grq->load_avg / tg->load_avg
- * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
- *
- * load_sum := runnable_sum
- * load_avg = se_weight(se) * runnable_avg
- *
- * runnable_load_sum := runnable_sum
- * runnable_load_avg = se_runnable(se) * runnable_avg
- *
- * XXX collapse load_sum and runnable_load_sum
- *
- * cfq_rs:
- *
- * load_sum = \Sum se_weight(se) * se->avg.load_sum
- * load_avg = \Sum se->avg.load_avg
- *
- * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
- * runnable_load_avg = \Sum se->avg.runable_load_avg
- */
-
-static int
-__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
-{
- if (entity_is_task(se))
- se->runnable_weight = se->load.weight;
-
- if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
- ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
- return 1;
- }
-
- return 0;
-}
-
-static int
-__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- if (entity_is_task(se))
- se->runnable_weight = se->load.weight;
-
- if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
- cfs_rq->curr == se)) {
-
- ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
- cfs_se_util_change(&se->avg);
- return 1;
- }
-
- return 0;
-}
-
-static int
-__update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
-{
- if (___update_load_sum(now, cpu, &cfs_rq->avg,
- scale_load_down(cfs_rq->load.weight),
- scale_load_down(cfs_rq->runnable_weight),
- cfs_rq->curr != NULL)) {
-
- ___update_load_avg(&cfs_rq->avg, 1, 1);
- return 1;
- }
-
- return 0;
-}
-
#ifdef CONFIG_FAIR_GROUP_SCHED
/**
* update_tg_load_avg - update the tg's load avg
@@ -4037,12 +3648,6 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
#else /* CONFIG_SMP */
-static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
-{
- return 0;
-}
-
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
#define DO_ATTACH 0x0
@@ -4726,7 +4331,6 @@ static inline int throttled_lb_pair(struct task_group *tg,
throttled_hierarchy(dest_cfs_rq);
}
-/* updated child weight may affect parent so we have to do this bottom up */
static int tg_unthrottle_up(struct task_group *tg, void *data)
{
struct rq *rq = data;
@@ -5653,8 +5257,6 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
}
-
- sched_avg_update(this_rq);
}
/* Used instead of source_load when we know the type == 0 */
@@ -7294,8 +6896,8 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
{
struct numa_group *numa_group = rcu_dereference(p->numa_group);
- unsigned long src_faults, dst_faults;
- int src_nid, dst_nid;
+ unsigned long src_weight, dst_weight;
+ int src_nid, dst_nid, dist;
if (!static_branch_likely(&sched_numa_balancing))
return -1;
@@ -7322,18 +6924,19 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
return 0;
/* Leaving a core idle is often worse than degrading locality. */
- if (env->idle != CPU_NOT_IDLE)
+ if (env->idle == CPU_IDLE)
return -1;
+ dist = node_distance(src_nid, dst_nid);
if (numa_group) {
- src_faults = group_faults(p, src_nid);
- dst_faults = group_faults(p, dst_nid);
+ src_weight = group_weight(p, src_nid, dist);
+ dst_weight = group_weight(p, dst_nid, dist);
} else {
- src_faults = task_faults(p, src_nid);
- dst_faults = task_faults(p, dst_nid);
+ src_weight = task_weight(p, src_nid, dist);
+ dst_weight = task_weight(p, dst_nid, dist);
}
- return dst_faults < src_faults;
+ return dst_weight < src_weight;
}
#else
@@ -7620,6 +7223,22 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
return false;
}
+static inline bool others_have_blocked(struct rq *rq)
+{
+ if (READ_ONCE(rq->avg_rt.util_avg))
+ return true;
+
+ if (READ_ONCE(rq->avg_dl.util_avg))
+ return true;
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+ if (READ_ONCE(rq->avg_irq.util_avg))
+ return true;
+#endif
+
+ return false;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
@@ -7679,6 +7298,12 @@ static void update_blocked_averages(int cpu)
if (cfs_rq_has_blocked(cfs_rq))
done = false;
}
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_irq_load_avg(rq, 0);
+ /* Don't need periodic decay once load/util_avg are null */
+ if (others_have_blocked(rq))
+ done = false;
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
@@ -7744,9 +7369,12 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_irq_load_avg(rq, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
- if (!cfs_rq_has_blocked(cfs_rq))
+ if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq))
rq->has_blocked_load = 0;
#endif
rq_unlock_irqrestore(rq, &rf);
@@ -7856,39 +7484,32 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
static unsigned long scale_rt_capacity(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- u64 total, used, age_stamp, avg;
- s64 delta;
+ unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+ unsigned long used, free;
+ unsigned long irq;
- /*
- * Since we're reading these variables without serialization make sure
- * we read them once before doing sanity checks on them.
- */
- age_stamp = READ_ONCE(rq->age_stamp);
- avg = READ_ONCE(rq->rt_avg);
- delta = __rq_clock_broken(rq) - age_stamp;
+ irq = cpu_util_irq(rq);
- if (unlikely(delta < 0))
- delta = 0;
+ if (unlikely(irq >= max))
+ return 1;
- total = sched_avg_period() + delta;
+ used = READ_ONCE(rq->avg_rt.util_avg);
+ used += READ_ONCE(rq->avg_dl.util_avg);
- used = div_u64(avg, total);
+ if (unlikely(used >= max))
+ return 1;
- if (likely(used < SCHED_CAPACITY_SCALE))
- return SCHED_CAPACITY_SCALE - used;
+ free = max - used;
- return 1;
+ return scale_irq_capacity(free, irq, max);
}
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
- unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
+ unsigned long capacity = scale_rt_capacity(cpu);
struct sched_group *sdg = sd->groups;
- cpu_rq(cpu)->cpu_capacity_orig = capacity;
-
- capacity *= scale_rt_capacity(cpu);
- capacity >>= SCHED_CAPACITY_SHIFT;
+ cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
if (!capacity)
capacity = 1;
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
new file mode 100644
index 000000000000..35475c0c5419
--- /dev/null
+++ b/kernel/sched/pelt.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Per Entity Load Tracking
+ *
+ * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Interactivity improvements by Mike Galbraith
+ * (C) 2007 Mike Galbraith <efault@gmx.de>
+ *
+ * Various enhancements by Dmitry Adamushko.
+ * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
+ *
+ * Group scheduling enhancements by Srivatsa Vaddagiri
+ * Copyright IBM Corporation, 2007
+ * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
+ *
+ * Scaled math optimizations by Thomas Gleixner
+ * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * Move PELT related code from fair.c into this pelt.c file
+ * Author: Vincent Guittot <vincent.guittot@linaro.org>
+ */
+
+#include <linux/sched.h>
+#include "sched.h"
+#include "sched-pelt.h"
+#include "pelt.h"
+
+/*
+ * Approximate:
+ * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
+ */
+static u64 decay_load(u64 val, u64 n)
+{
+ unsigned int local_n;
+
+ if (unlikely(n > LOAD_AVG_PERIOD * 63))
+ return 0;
+
+ /* after bounds checking we can collapse to 32-bit */
+ local_n = n;
+
+ /*
+ * As y^PERIOD = 1/2, we can combine
+ * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
+ * With a look-up table which covers y^n (n<PERIOD)
+ *
+ * To achieve constant time decay_load.
+ */
+ if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
+ val >>= local_n / LOAD_AVG_PERIOD;
+ local_n %= LOAD_AVG_PERIOD;
+ }
+
+ val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
+ return val;
+}
+
+static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
+{
+ u32 c1, c2, c3 = d3; /* y^0 == 1 */
+
+ /*
+ * c1 = d1 y^p
+ */
+ c1 = decay_load((u64)d1, periods);
+
+ /*
+ * p-1
+ * c2 = 1024 \Sum y^n
+ * n=1
+ *
+ * inf inf
+ * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
+ * n=0 n=p
+ */
+ c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
+
+ return c1 + c2 + c3;
+}
+
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
+/*
+ * Accumulate the three separate parts of the sum; d1 the remainder
+ * of the last (incomplete) period, d2 the span of full periods and d3
+ * the remainder of the (incomplete) current period.
+ *
+ * d1 d2 d3
+ * ^ ^ ^
+ * | | |
+ * |<->|<----------------->|<--->|
+ * ... |---x---|------| ... |------|-----x (now)
+ *
+ * p-1
+ * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
+ * n=1
+ *
+ * = u y^p + (Step 1)
+ *
+ * p-1
+ * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
+ * n=1
+ */
+static __always_inline u32
+accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
+ unsigned long load, unsigned long runnable, int running)
+{
+ unsigned long scale_freq, scale_cpu;
+ u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
+ u64 periods;
+
+ scale_freq = arch_scale_freq_capacity(cpu);
+ scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+
+ delta += sa->period_contrib;
+ periods = delta / 1024; /* A period is 1024us (~1ms) */
+
+ /*
+ * Step 1: decay old *_sum if we crossed period boundaries.
+ */
+ if (periods) {
+ sa->load_sum = decay_load(sa->load_sum, periods);
+ sa->runnable_load_sum =
+ decay_load(sa->runnable_load_sum, periods);
+ sa->util_sum = decay_load((u64)(sa->util_sum), periods);
+
+ /*
+ * Step 2
+ */
+ delta %= 1024;
+ contrib = __accumulate_pelt_segments(periods,
+ 1024 - sa->period_contrib, delta);
+ }
+ sa->period_contrib = delta;
+
+ contrib = cap_scale(contrib, scale_freq);
+ if (load)
+ sa->load_sum += load * contrib;
+ if (runnable)
+ sa->runnable_load_sum += runnable * contrib;
+ if (running)
+ sa->util_sum += contrib * scale_cpu;
+
+ return periods;
+}
+
+/*
+ * We can represent the historical contribution to runnable average as the
+ * coefficients of a geometric series. To do this we sub-divide our runnable
+ * history into segments of approximately 1ms (1024us); label the segment that
+ * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
+ *
+ * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
+ * p0 p1 p2
+ * (now) (~1ms ago) (~2ms ago)
+ *
+ * Let u_i denote the fraction of p_i that the entity was runnable.
+ *
+ * We then designate the fractions u_i as our co-efficients, yielding the
+ * following representation of historical load:
+ * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
+ *
+ * We choose y based on the with of a reasonably scheduling period, fixing:
+ * y^32 = 0.5
+ *
+ * This means that the contribution to load ~32ms ago (u_32) will be weighted
+ * approximately half as much as the contribution to load within the last ms
+ * (u_0).
+ *
+ * When a period "rolls over" and we have new u_0`, multiplying the previous
+ * sum again by y is sufficient to update:
+ * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
+ * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
+ */
+static __always_inline int
+___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
+ unsigned long load, unsigned long runnable, int running)
+{
+ u64 delta;
+
+ delta = now - sa->last_update_time;
+ /*
+ * This should only happen when time goes backwards, which it
+ * unfortunately does during sched clock init when we swap over to TSC.
+ */
+ if ((s64)delta < 0) {
+ sa->last_update_time = now;
+ return 0;
+ }
+
+ /*
+ * Use 1024ns as the unit of measurement since it's a reasonable
+ * approximation of 1us and fast to compute.
+ */
+ delta >>= 10;
+ if (!delta)
+ return 0;
+
+ sa->last_update_time += delta << 10;
+
+ /*
+ * running is a subset of runnable (weight) so running can't be set if
+ * runnable is clear. But there are some corner cases where the current
+ * se has been already dequeued but cfs_rq->curr still points to it.
+ * This means that weight will be 0 but not running for a sched_entity
+ * but also for a cfs_rq if the latter becomes idle. As an example,
+ * this happens during idle_balance() which calls
+ * update_blocked_averages()
+ */
+ if (!load)
+ runnable = running = 0;
+
+ /*
+ * Now we know we crossed measurement unit boundaries. The *_avg
+ * accrues by two steps:
+ *
+ * Step 1: accumulate *_sum since last_update_time. If we haven't
+ * crossed period boundaries, finish.
+ */
+ if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
+ return 0;
+
+ return 1;
+}
+
+static __always_inline void
+___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
+{
+ u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
+
+ /*
+ * Step 2: update *_avg.
+ */
+ sa->load_avg = div_u64(load * sa->load_sum, divider);
+ sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
+ WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+}
+
+/*
+ * sched_entity:
+ *
+ * task:
+ * se_runnable() == se_weight()
+ *
+ * group: [ see update_cfs_group() ]
+ * se_weight() = tg->weight * grq->load_avg / tg->load_avg
+ * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
+ *
+ * load_sum := runnable_sum
+ * load_avg = se_weight(se) * runnable_avg
+ *
+ * runnable_load_sum := runnable_sum
+ * runnable_load_avg = se_runnable(se) * runnable_avg
+ *
+ * XXX collapse load_sum and runnable_load_sum
+ *
+ * cfq_rq:
+ *
+ * load_sum = \Sum se_weight(se) * se->avg.load_sum
+ * load_avg = \Sum se->avg.load_avg
+ *
+ * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
+ * runnable_load_avg = \Sum se->avg.runable_load_avg
+ */
+
+int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
+{
+ if (entity_is_task(se))
+ se->runnable_weight = se->load.weight;
+
+ if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
+ ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ return 1;
+ }
+
+ return 0;
+}
+
+int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ if (entity_is_task(se))
+ se->runnable_weight = se->load.weight;
+
+ if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
+ cfs_rq->curr == se)) {
+
+ ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ cfs_se_util_change(&se->avg);
+ return 1;
+ }
+
+ return 0;
+}
+
+int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
+{
+ if (___update_load_sum(now, cpu, &cfs_rq->avg,
+ scale_load_down(cfs_rq->load.weight),
+ scale_load_down(cfs_rq->runnable_weight),
+ cfs_rq->curr != NULL)) {
+
+ ___update_load_avg(&cfs_rq->avg, 1, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * rt_rq:
+ *
+ * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ * util_sum = cpu_scale * load_sum
+ * runnable_load_sum = load_sum
+ *
+ * load_avg and runnable_load_avg are not supported and meaningless.
+ *
+ */
+
+int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ if (___update_load_sum(now, rq->cpu, &rq->avg_rt,
+ running,
+ running,
+ running)) {
+
+ ___update_load_avg(&rq->avg_rt, 1, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * dl_rq:
+ *
+ * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ * util_sum = cpu_scale * load_sum
+ * runnable_load_sum = load_sum
+ *
+ */
+
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ if (___update_load_sum(now, rq->cpu, &rq->avg_dl,
+ running,
+ running,
+ running)) {
+
+ ___update_load_avg(&rq->avg_dl, 1, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+/*
+ * irq:
+ *
+ * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ * util_sum = cpu_scale * load_sum
+ * runnable_load_sum = load_sum
+ *
+ */
+
+int update_irq_load_avg(struct rq *rq, u64 running)
+{
+ int ret = 0;
+ /*
+ * We know the time that has been used by interrupt since last update
+ * but we don't when. Let be pessimistic and assume that interrupt has
+ * happened just before the update. This is not so far from reality
+ * because interrupt will most probably wake up task and trig an update
+ * of rq clock during which the metric si updated.
+ * We start to decay with normal context time and then we add the
+ * interrupt context time.
+ * We can safely remove running from rq->clock because
+ * rq->clock += delta with delta >= running
+ */
+ ret = ___update_load_sum(rq->clock - running, rq->cpu, &rq->avg_irq,
+ 0,
+ 0,
+ 0);
+ ret += ___update_load_sum(rq->clock, rq->cpu, &rq->avg_irq,
+ 1,
+ 1,
+ 1);
+
+ if (ret)
+ ___update_load_avg(&rq->avg_irq, 1, 1);
+
+ return ret;
+}
+#endif
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
new file mode 100644
index 000000000000..d2894db28955
--- /dev/null
+++ b/kernel/sched/pelt.h
@@ -0,0 +1,72 @@
+#ifdef CONFIG_SMP
+
+int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
+int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
+int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
+int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+int update_irq_load_avg(struct rq *rq, u64 running);
+#else
+static inline int
+update_irq_load_avg(struct rq *rq, u64 running)
+{
+ return 0;
+}
+#endif
+
+/*
+ * When a task is dequeued, its estimated utilization should not be update if
+ * its util_avg has not been updated at least once.
+ * This flag is used to synchronize util_avg updates with util_est updates.
+ * We map this information into the LSB bit of the utilization saved at
+ * dequeue time (i.e. util_est.dequeued).
+ */
+#define UTIL_AVG_UNCHANGED 0x1
+
+static inline void cfs_se_util_change(struct sched_avg *avg)
+{
+ unsigned int enqueued;
+
+ if (!sched_feat(UTIL_EST))
+ return;
+
+ /* Avoid store if the flag has been already set */
+ enqueued = avg->util_est.enqueued;
+ if (!(enqueued & UTIL_AVG_UNCHANGED))
+ return;
+
+ /* Reset flag to report util_avg has been updated */
+ enqueued &= ~UTIL_AVG_UNCHANGED;
+ WRITE_ONCE(avg->util_est.enqueued, enqueued);
+}
+
+#else
+
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+{
+ return 0;
+}
+
+static inline int
+update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ return 0;
+}
+
+static inline int
+update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ return 0;
+}
+
+static inline int
+update_irq_load_avg(struct rq *rq, u64 running)
+{
+ return 0;
+}
+#endif
+
+
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index eaaec8364f96..2e2955a8cf8f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -5,6 +5,8 @@
*/
#include "sched.h"
+#include "pelt.h"
+
int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
@@ -973,8 +975,6 @@ static void update_curr_rt(struct rq *rq)
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
-
if (!rt_bandwidth_enabled())
return;
@@ -1578,6 +1578,14 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rt_queue_push_tasks(rq);
+ /*
+ * If prev task was rt, put_prev_task() has already updated the
+ * utilization. We only care of the case where we start to schedule a
+ * rt task
+ */
+ if (rq->curr->sched_class != &rt_sched_class)
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+
return p;
}
@@ -1585,6 +1593,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
+
/*
* The previous task needs to be made eligible for pushing
* if it is still active
@@ -2314,6 +2324,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
watchdog(rq, p);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c7742dcc136c..4a2e8cae63c4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -594,6 +594,7 @@ struct rt_rq {
unsigned long rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
+
#endif /* CONFIG_SMP */
int rt_queued;
@@ -673,7 +674,26 @@ struct dl_rq {
u64 bw_ratio;
};
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/* An entity is a task if it doesn't "own" a runqueue */
+#define entity_is_task(se) (!se->my_q)
+#else
+#define entity_is_task(se) 1
+#endif
+
#ifdef CONFIG_SMP
+/*
+ * XXX we want to get rid of these helpers and use the full load resolution.
+ */
+static inline long se_weight(struct sched_entity *se)
+{
+ return scale_load_down(se->load.weight);
+}
+
+static inline long se_runnable(struct sched_entity *se)
+{
+ return scale_load_down(se->runnable_weight);
+}
static inline bool sched_asym_prefer(int a, int b)
{
@@ -833,8 +853,12 @@ struct rq {
struct list_head cfs_tasks;
- u64 rt_avg;
- u64 age_stamp;
+ struct sched_avg avg_rt;
+ struct sched_avg avg_dl;
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#define HAVE_SCHED_AVG_IRQ
+ struct sched_avg avg_irq;
+#endif
u64 idle_stamp;
u64 avg_idle;
@@ -1075,7 +1099,8 @@ enum numa_faults_stats {
};
extern void sched_setnuma(struct task_struct *p, int node);
extern int migrate_task_to(struct task_struct *p, int cpu);
-extern int migrate_swap(struct task_struct *, struct task_struct *);
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
+ int cpu, int scpu);
extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
#else
static inline void
@@ -1690,15 +1715,9 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
-extern const_debug unsigned int sysctl_sched_time_avg;
extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
-static inline u64 sched_avg_period(void)
-{
- return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
-}
-
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -1735,8 +1754,6 @@ unsigned long arch_scale_freq_capacity(int cpu)
#endif
#ifdef CONFIG_SMP
-extern void sched_avg_update(struct rq *rq);
-
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -1747,12 +1764,6 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
return SCHED_CAPACITY_SCALE;
}
#endif
-
-static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
-{
- rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
- sched_avg_update(rq);
-}
#else
#ifndef arch_scale_cpu_capacity
static __always_inline
@@ -1761,8 +1772,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
return SCHED_CAPACITY_SCALE;
}
#endif
-static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
-static inline void sched_avg_update(struct rq *rq) { }
#endif
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
@@ -2177,11 +2186,16 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
-static inline unsigned long cpu_util_dl(struct rq *rq)
+static inline unsigned long cpu_bw_dl(struct rq *rq)
{
return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
}
+static inline unsigned long cpu_util_dl(struct rq *rq)
+{
+ return READ_ONCE(rq->avg_dl.util_avg);
+}
+
static inline unsigned long cpu_util_cfs(struct rq *rq)
{
unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
@@ -2193,4 +2207,37 @@ static inline unsigned long cpu_util_cfs(struct rq *rq)
return util;
}
+
+static inline unsigned long cpu_util_rt(struct rq *rq)
+{
+ return READ_ONCE(rq->avg_rt.util_avg);
+}
+#endif
+
+#ifdef HAVE_SCHED_AVG_IRQ
+static inline unsigned long cpu_util_irq(struct rq *rq)
+{
+ return rq->avg_irq.util_avg;
+}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+ util *= (max - irq);
+ util /= max;
+
+ return util;
+
+}
+#else
+static inline unsigned long cpu_util_irq(struct rq *rq)
+{
+ return 0;
+}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+ return util;
+}
#endif
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
index b6fb2c3b3ff7..66b59ac77c22 100644
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -32,7 +32,7 @@ void swake_up_locked(struct swait_queue_head *q)
}
EXPORT_SYMBOL(swake_up_locked);
-void swake_up(struct swait_queue_head *q)
+void swake_up_one(struct swait_queue_head *q)
{
unsigned long flags;
@@ -40,7 +40,7 @@ void swake_up(struct swait_queue_head *q)
swake_up_locked(q);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(swake_up);
+EXPORT_SYMBOL(swake_up_one);
/*
* Does not allow usage from IRQ disabled, since we must be able to
@@ -69,14 +69,14 @@ void swake_up_all(struct swait_queue_head *q)
}
EXPORT_SYMBOL(swake_up_all);
-void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
+static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
wait->task = current;
if (list_empty(&wait->task_list))
- list_add(&wait->task_list, &q->task_list);
+ list_add_tail(&wait->task_list, &q->task_list);
}
-void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
+void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
{
unsigned long flags;
@@ -85,16 +85,28 @@ void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int
set_current_state(state);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(prepare_to_swait);
+EXPORT_SYMBOL(prepare_to_swait_exclusive);
long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
{
- if (signal_pending_state(state, current))
- return -ERESTARTSYS;
+ unsigned long flags;
+ long ret = 0;
- prepare_to_swait(q, wait, state);
+ raw_spin_lock_irqsave(&q->lock, flags);
+ if (unlikely(signal_pending_state(state, current))) {
+ /*
+ * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
+ * must not see us.
+ */
+ list_del_init(&wait->task_list);
+ ret = -ERESTARTSYS;
+ } else {
+ __prepare_to_swait(q, wait);
+ set_current_state(state);
+ }
+ raw_spin_unlock_irqrestore(&q->lock, flags);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(prepare_to_swait_event);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 928be527477e..870f97b313e3 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -134,8 +134,8 @@ static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
*/
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
int nr_exclusive, void *key)
@@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
*
* On UP it can prevent extra preemption.
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
*/
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
int nr_exclusive, void *key)
@@ -392,35 +392,36 @@ static inline bool is_kthread_should_stop(void)
* if (condition)
* break;
*
- * p->state = mode; condition = true;
- * smp_mb(); // A smp_wmb(); // C
- * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
- * schedule() try_to_wake_up();
- * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
- * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
- * smp_mb() // B smp_wmb(); // C
- * wq_entry->flags |= WQ_FLAG_WOKEN;
- * }
- * remove_wait_queue(&wq_head, &wait);
+ * // in wait_woken() // in woken_wake_function()
*
+ * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
+ * smp_mb(); // A try_to_wake_up():
+ * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
+ * schedule() if (p->state & mode)
+ * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
+ * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
+ * smp_mb(); // B condition = true;
+ * } smp_mb(); // C
+ * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
*/
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
{
- set_current_state(mode); /* A */
/*
- * The above implies an smp_mb(), which matches with the smp_wmb() from
- * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
- * also observe all state before the wakeup.
+ * The below executes an smp_mb(), which matches with the full barrier
+ * executed by the try_to_wake_up() in woken_wake_function() such that
+ * either we see the store to wq_entry->flags in woken_wake_function()
+ * or woken_wake_function() sees our store to current->state.
*/
+ set_current_state(mode); /* A */
if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
timeout = schedule_timeout(timeout);
__set_current_state(TASK_RUNNING);
/*
- * The below implies an smp_mb(), it too pairs with the smp_wmb() from
- * woken_wake_function() such that we must either observe the wait
- * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
- * an event.
+ * The below executes an smp_mb(), which matches with the smp_mb() (C)
+ * in woken_wake_function() such that either we see the wait condition
+ * being true or the store to wq_entry->flags in woken_wake_function()
+ * follows ours in the coherence order.
*/
smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
@@ -430,14 +431,8 @@ EXPORT_SYMBOL(wait_woken);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{
- /*
- * Although this function is called under waitqueue lock, LOCK
- * doesn't imply write barrier and the users expects write
- * barrier semantics on wakeup functions. The following
- * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
- * and is paired with smp_store_mb() in wait_woken().
- */
- smp_wmb(); /* C */
+ /* Pairs with the smp_store_mb() in wait_woken(). */
+ smp_mb(); /* C */
wq_entry->flags |= WQ_FLAG_WOKEN;
return default_wake_function(wq_entry, mode, sync, key);
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 5043e7433f4b..c230c2dd48e1 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -238,8 +238,7 @@ int smpboot_unpark_threads(unsigned int cpu)
mutex_lock(&smpboot_threads_lock);
list_for_each_entry(cur, &hotplug_threads, list)
- if (cpumask_test_cpu(cpu, cur->cpumask))
- smpboot_unpark_thread(cur, cpu);
+ smpboot_unpark_thread(cur, cpu);
mutex_unlock(&smpboot_threads_lock);
return 0;
}
@@ -280,34 +279,26 @@ static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
}
/**
- * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related
+ * smpboot_register_percpu_thread - Register a per_cpu thread related
* to hotplug
* @plug_thread: Hotplug thread descriptor
- * @cpumask: The cpumask where threads run
*
* Creates and starts the threads on all online cpus.
*/
-int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *cpumask)
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
{
unsigned int cpu;
int ret = 0;
- if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_copy(plug_thread->cpumask, cpumask);
-
get_online_cpus();
mutex_lock(&smpboot_threads_lock);
for_each_online_cpu(cpu) {
ret = __smpboot_create_thread(plug_thread, cpu);
if (ret) {
smpboot_destroy_threads(plug_thread);
- free_cpumask_var(plug_thread->cpumask);
goto out;
}
- if (cpumask_test_cpu(cpu, cpumask))
- smpboot_unpark_thread(plug_thread, cpu);
+ smpboot_unpark_thread(plug_thread, cpu);
}
list_add(&plug_thread->list, &hotplug_threads);
out:
@@ -315,7 +306,7 @@ out:
put_online_cpus();
return ret;
}
-EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
+EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
/**
* smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
@@ -331,44 +322,9 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
smpboot_destroy_threads(plug_thread);
mutex_unlock(&smpboot_threads_lock);
put_online_cpus();
- free_cpumask_var(plug_thread->cpumask);
}
EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
-/**
- * smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked
- * @plug_thread: Hotplug thread descriptor
- * @new: Revised mask to use
- *
- * The cpumask field in the smp_hotplug_thread must not be updated directly
- * by the client, but only by calling this function.
- * This function can only be called on a registered smp_hotplug_thread.
- */
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *new)
-{
- struct cpumask *old = plug_thread->cpumask;
- static struct cpumask tmp;
- unsigned int cpu;
-
- lockdep_assert_cpus_held();
- mutex_lock(&smpboot_threads_lock);
-
- /* Park threads that were exclusively enabled on the old mask. */
- cpumask_andnot(&tmp, old, new);
- for_each_cpu_and(cpu, &tmp, cpu_online_mask)
- smpboot_park_thread(plug_thread, cpu);
-
- /* Unpark threads that are exclusively enabled on the new mask. */
- cpumask_andnot(&tmp, new, old);
- for_each_cpu_and(cpu, &tmp, cpu_online_mask)
- smpboot_unpark_thread(plug_thread, cpu);
-
- cpumask_copy(old, new);
-
- mutex_unlock(&smpboot_threads_lock);
-}
-
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
/*
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index e190d1ef3a23..067cb83f37ea 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
unsigned long flags;
bool enabled;
+ preempt_disable();
raw_spin_lock_irqsave(&stopper->lock, flags);
enabled = stopper->enabled;
if (enabled)
@@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
raw_spin_unlock_irqrestore(&stopper->lock, flags);
wake_up_q(&wakeq);
+ preempt_enable();
return enabled;
}
@@ -236,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
DEFINE_WAKE_Q(wakeq);
int err;
+
retry:
+ /*
+ * The waking up of stopper threads has to happen in the same
+ * scheduling context as the queueing. Otherwise, there is a
+ * possibility of one of the above stoppers being woken up by another
+ * CPU, and preempting us. This will cause us to not wake up the other
+ * stopper forever.
+ */
+ preempt_disable();
raw_spin_lock_irq(&stopper1->lock);
raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
- err = -ENOENT;
- if (!stopper1->enabled || !stopper2->enabled)
+ if (!stopper1->enabled || !stopper2->enabled) {
+ err = -ENOENT;
goto unlock;
+ }
+
/*
* Ensure that if we race with __stop_cpus() the stoppers won't get
* queued up in reverse order leading to system deadlock.
@@ -253,36 +266,30 @@ retry:
* It can be falsely true but it is safe to spin until it is cleared,
* queue_stop_cpus_work() does everything under preempt_disable().
*/
- err = -EDEADLK;
- if (unlikely(stop_cpus_in_progress))
- goto unlock;
+ if (unlikely(stop_cpus_in_progress)) {
+ err = -EDEADLK;
+ goto unlock;
+ }
err = 0;
__cpu_stop_queue_work(stopper1, work1, &wakeq);
__cpu_stop_queue_work(stopper2, work2, &wakeq);
- /*
- * The waking up of stopper threads has to happen
- * in the same scheduling context as the queueing.
- * Otherwise, there is a possibility of one of the
- * above stoppers being woken up by another CPU,
- * and preempting us. This will cause us to n ot
- * wake up the other stopper forever.
- */
- preempt_disable();
+
unlock:
raw_spin_unlock(&stopper2->lock);
raw_spin_unlock_irq(&stopper1->lock);
if (unlikely(err == -EDEADLK)) {
+ preempt_enable();
+
while (stop_cpus_in_progress)
cpu_relax();
+
goto retry;
}
- if (!err) {
- wake_up_q(&wakeq);
- preempt_enable();
- }
+ wake_up_q(&wakeq);
+ preempt_enable();
return err;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 38509dc1f77b..e27b51d3facd 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2512,11 +2512,11 @@ static int do_sysinfo(struct sysinfo *info)
{
unsigned long mem_total, sav_total;
unsigned int mem_unit, bitcount;
- struct timespec tp;
+ struct timespec64 tp;
memset(info, 0, sizeof(struct sysinfo));
- get_monotonic_boottime(&tp);
+ ktime_get_boottime_ts64(&tp);
info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2d9837c0aff4..f22f76b7a138 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -368,14 +368,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "sched_time_avg_ms",
- .data = &sysctl_sched_time_avg,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &one,
- },
#ifdef CONFIG_SCHEDSTATS
{
.procname = "sched_schedstats",
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index dd53e354f630..7bca480151b0 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -162,90 +162,6 @@ static int test_kprobes(void)
}
-#if 0
-static u32 jph_val;
-
-static u32 j_kprobe_target(u32 value)
-{
- if (preemptible()) {
- handler_errors++;
- pr_err("jprobe-handler is preemptible\n");
- }
- if (value != rand1) {
- handler_errors++;
- pr_err("incorrect value in jprobe handler\n");
- }
-
- jph_val = rand1;
- jprobe_return();
- return 0;
-}
-
-static struct jprobe jp = {
- .entry = j_kprobe_target,
- .kp.symbol_name = "kprobe_target"
-};
-
-static int test_jprobe(void)
-{
- int ret;
-
- ret = register_jprobe(&jp);
- if (ret < 0) {
- pr_err("register_jprobe returned %d\n", ret);
- return ret;
- }
-
- ret = target(rand1);
- unregister_jprobe(&jp);
- if (jph_val == 0) {
- pr_err("jprobe handler not called\n");
- handler_errors++;
- }
-
- return 0;
-}
-
-static struct jprobe jp2 = {
- .entry = j_kprobe_target,
- .kp.symbol_name = "kprobe_target2"
-};
-
-static int test_jprobes(void)
-{
- int ret;
- struct jprobe *jps[2] = {&jp, &jp2};
-
- /* addr and flags should be cleard for reusing kprobe. */
- jp.kp.addr = NULL;
- jp.kp.flags = 0;
- ret = register_jprobes(jps, 2);
- if (ret < 0) {
- pr_err("register_jprobes returned %d\n", ret);
- return ret;
- }
-
- jph_val = 0;
- ret = target(rand1);
- if (jph_val == 0) {
- pr_err("jprobe handler not called\n");
- handler_errors++;
- }
-
- jph_val = 0;
- ret = target2(rand1);
- if (jph_val == 0) {
- pr_err("jprobe handler2 not called\n");
- handler_errors++;
- }
- unregister_jprobes(jps, 2);
-
- return 0;
-}
-#else
-#define test_jprobe() (0)
-#define test_jprobes() (0)
-#endif
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
@@ -383,16 +299,6 @@ int init_test_probes(void)
if (ret < 0)
errors++;
- num_tests++;
- ret = test_jprobe();
- if (ret < 0)
- errors++;
-
- num_tests++;
- ret = test_jprobes();
- if (ret < 0)
- errors++;
-
#ifdef CONFIG_KRETPROBES
num_tests++;
ret = test_kretprobe();
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 639321bf2e39..fa5de5e8de61 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -581,11 +581,11 @@ static void alarm_timer_rearm(struct k_itimer *timr)
* @timr: Pointer to the posixtimer data struct
* @now: Current time to forward the timer against
*/
-static int alarm_timer_forward(struct k_itimer *timr, ktime_t now)
+static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now)
{
struct alarm *alarm = &timr->it.alarm.alarmtimer;
- return (int) alarm_forward(alarm, timr->it_interval, now);
+ return alarm_forward(alarm, timr->it_interval, now);
}
/**
@@ -808,7 +808,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
/* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) {
ktime_t now = alarm_bases[type].gettime();
- exp = ktime_add(now, exp);
+
+ exp = ktime_add_safe(now, exp);
}
ret = alarmtimer_do_nsleep(&alarm, exp, type);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 16c027e9cc73..8c0e4092f661 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -463,6 +463,12 @@ void clockevents_register_device(struct clock_event_device *dev)
dev->cpumask = cpumask_of(smp_processor_id());
}
+ if (dev->cpumask == cpu_all_mask) {
+ WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n",
+ dev->name);
+ dev->cpumask = cpu_possible_mask;
+ }
+
raw_spin_lock_irqsave(&clockevents_lock, flags);
list_add(&dev->list, &clockevent_devices);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f89a78e2792b..f74fb00d8064 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -94,6 +94,8 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
/*[Clocksource internal variables]---------
* curr_clocksource:
* currently selected clocksource.
+ * suspend_clocksource:
+ * used to calculate the suspend time.
* clocksource_list:
* linked list with the registered clocksources
* clocksource_mutex:
@@ -102,10 +104,12 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
* Name of the user-specified clocksource.
*/
static struct clocksource *curr_clocksource;
+static struct clocksource *suspend_clocksource;
static LIST_HEAD(clocksource_list);
static DEFINE_MUTEX(clocksource_mutex);
static char override_name[CS_NAME_LEN];
static int finished_booting;
+static u64 suspend_start;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static void clocksource_watchdog_work(struct work_struct *work);
@@ -447,6 +451,140 @@ static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
+static bool clocksource_is_suspend(struct clocksource *cs)
+{
+ return cs == suspend_clocksource;
+}
+
+static void __clocksource_suspend_select(struct clocksource *cs)
+{
+ /*
+ * Skip the clocksource which will be stopped in suspend state.
+ */
+ if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
+ return;
+
+ /*
+ * The nonstop clocksource can be selected as the suspend clocksource to
+ * calculate the suspend time, so it should not supply suspend/resume
+ * interfaces to suspend the nonstop clocksource when system suspends.
+ */
+ if (cs->suspend || cs->resume) {
+ pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
+ cs->name);
+ }
+
+ /* Pick the best rating. */
+ if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
+ suspend_clocksource = cs;
+}
+
+/**
+ * clocksource_suspend_select - Select the best clocksource for suspend timing
+ * @fallback: if select a fallback clocksource
+ */
+static void clocksource_suspend_select(bool fallback)
+{
+ struct clocksource *cs, *old_suspend;
+
+ old_suspend = suspend_clocksource;
+ if (fallback)
+ suspend_clocksource = NULL;
+
+ list_for_each_entry(cs, &clocksource_list, list) {
+ /* Skip current if we were requested for a fallback. */
+ if (fallback && cs == old_suspend)
+ continue;
+
+ __clocksource_suspend_select(cs);
+ }
+}
+
+/**
+ * clocksource_start_suspend_timing - Start measuring the suspend timing
+ * @cs: current clocksource from timekeeping
+ * @start_cycles: current cycles from timekeeping
+ *
+ * This function will save the start cycle values of suspend timer to calculate
+ * the suspend time when resuming system.
+ *
+ * This function is called late in the suspend process from timekeeping_suspend(),
+ * that means processes are freezed, non-boot cpus and interrupts are disabled
+ * now. It is therefore possible to start the suspend timer without taking the
+ * clocksource mutex.
+ */
+void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
+{
+ if (!suspend_clocksource)
+ return;
+
+ /*
+ * If current clocksource is the suspend timer, we should use the
+ * tkr_mono.cycle_last value as suspend_start to avoid same reading
+ * from suspend timer.
+ */
+ if (clocksource_is_suspend(cs)) {
+ suspend_start = start_cycles;
+ return;
+ }
+
+ if (suspend_clocksource->enable &&
+ suspend_clocksource->enable(suspend_clocksource)) {
+ pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
+ return;
+ }
+
+ suspend_start = suspend_clocksource->read(suspend_clocksource);
+}
+
+/**
+ * clocksource_stop_suspend_timing - Stop measuring the suspend timing
+ * @cs: current clocksource from timekeeping
+ * @cycle_now: current cycles from timekeeping
+ *
+ * This function will calculate the suspend time from suspend timer.
+ *
+ * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
+ *
+ * This function is called early in the resume process from timekeeping_resume(),
+ * that means there is only one cpu, no processes are running and the interrupts
+ * are disabled. It is therefore possible to stop the suspend timer without
+ * taking the clocksource mutex.
+ */
+u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+{
+ u64 now, delta, nsec = 0;
+
+ if (!suspend_clocksource)
+ return 0;
+
+ /*
+ * If current clocksource is the suspend timer, we should use the
+ * tkr_mono.cycle_last value from timekeeping as current cycle to
+ * avoid same reading from suspend timer.
+ */
+ if (clocksource_is_suspend(cs))
+ now = cycle_now;
+ else
+ now = suspend_clocksource->read(suspend_clocksource);
+
+ if (now > suspend_start) {
+ delta = clocksource_delta(now, suspend_start,
+ suspend_clocksource->mask);
+ nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
+ suspend_clocksource->shift);
+ }
+
+ /*
+ * Disable the suspend timer to save power if current clocksource is
+ * not the suspend timer.
+ */
+ if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
+ suspend_clocksource->disable(suspend_clocksource);
+
+ return nsec;
+}
+
/**
* clocksource_suspend - suspend the clocksource(s)
*/
@@ -792,6 +930,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
clocksource_select();
clocksource_select_watchdog(false);
+ __clocksource_suspend_select(cs);
mutex_unlock(&clocksource_mutex);
return 0;
}
@@ -820,6 +959,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
clocksource_select();
clocksource_select_watchdog(false);
+ clocksource_suspend_select(false);
mutex_unlock(&clocksource_mutex);
}
EXPORT_SYMBOL(clocksource_change_rating);
@@ -845,6 +985,15 @@ static int clocksource_unbind(struct clocksource *cs)
return -EBUSY;
}
+ if (clocksource_is_suspend(cs)) {
+ /*
+ * Select and try to install a replacement suspend clocksource.
+ * If no replacement suspend clocksource, we will just let the
+ * clocksource go and have no suspend clocksource.
+ */
+ clocksource_suspend_select(true);
+ }
+
clocksource_watchdog_lock(&flags);
clocksource_dequeue_watchdog(cs);
list_del_init(&cs->list);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 3e93c54bd3a1..e1a549c9e399 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -718,8 +718,8 @@ static void hrtimer_switch_to_hres(void)
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
if (tick_init_highres()) {
- printk(KERN_WARNING "Could not switch to high resolution "
- "mode on CPU %d\n", base->cpu);
+ pr_warn("Could not switch to high resolution mode on CPU %u\n",
+ base->cpu);
return;
}
base->hres_active = 1;
@@ -1573,8 +1573,7 @@ retry:
else
expires_next = ktime_add(now, delta);
tick_program_event(expires_next, 1);
- printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
- ktime_to_ns(delta));
+ pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
}
/* called with interrupts disabled */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index a09ded765f6c..c5e0cba3b39c 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -502,7 +502,7 @@ static void sched_sync_hw_clock(struct timespec64 now,
{
struct timespec64 next;
- getnstimeofday64(&next);
+ ktime_get_real_ts64(&next);
if (!fail)
next.tv_sec = 659;
else {
@@ -537,7 +537,7 @@ static void sync_rtc_clock(void)
if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
return;
- getnstimeofday64(&now);
+ ktime_get_real_ts64(&now);
adjust = now;
if (persistent_clock_is_local)
@@ -591,7 +591,7 @@ static bool sync_cmos_clock(void)
* Architectures are strongly encouraged to use rtclib and not
* implement this legacy API.
*/
- getnstimeofday64(&now);
+ ktime_get_real_ts64(&now);
if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
if (persistent_clock_is_local)
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
@@ -642,7 +642,7 @@ void ntp_notify_cmos_timer(void)
/*
* Propagate a new txc->status value into the NTP state:
*/
-static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
+static inline void process_adj_status(const struct timex *txc)
{
if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
time_state = TIME_OK;
@@ -665,12 +665,10 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
}
-static inline void process_adjtimex_modes(struct timex *txc,
- struct timespec64 *ts,
- s32 *time_tai)
+static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai)
{
if (txc->modes & ADJ_STATUS)
- process_adj_status(txc, ts);
+ process_adj_status(txc);
if (txc->modes & ADJ_NANO)
time_status |= STA_NANO;
@@ -718,7 +716,7 @@ static inline void process_adjtimex_modes(struct timex *txc,
* adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.
*/
-int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
+int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai)
{
int result;
@@ -735,7 +733,7 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
/* If there are input parameters, then process them: */
if (txc->modes)
- process_adjtimex_modes(txc, ts, time_tai);
+ process_adjtimex_modes(txc, time_tai);
txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
NTP_SCALE_SHIFT);
@@ -1022,12 +1020,11 @@ void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_t
static int __init ntp_tick_adj_setup(char *str)
{
- int rc = kstrtol(str, 0, (long *)&ntp_tick_adj);
-
+ int rc = kstrtos64(str, 0, &ntp_tick_adj);
if (rc)
return rc;
- ntp_tick_adj <<= NTP_SCALE_SHIFT;
+ ntp_tick_adj <<= NTP_SCALE_SHIFT;
return 1;
}
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 909bd1f1bfb1..c24b0e13f011 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -8,6 +8,6 @@ extern void ntp_clear(void);
extern u64 ntp_tick_length(void);
extern ktime_t ntp_get_next_leap(void);
extern int second_overflow(time64_t secs);
-extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
-extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
+extern int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai);
+extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 9cdf54b04ca8..294d7b65af33 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -85,7 +85,7 @@ static void bump_cpu_timer(struct k_itimer *timer, u64 now)
continue;
timer->it.cpu.expires += incr;
- timer->it_overrun += 1 << i;
+ timer->it_overrun += 1LL << i;
delta -= incr;
}
}
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 26aa9569e24a..2c6847d5d69b 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -81,7 +81,7 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
ktime_get_ts64(tp);
break;
case CLOCK_BOOTTIME:
- get_monotonic_boottime64(tp);
+ ktime_get_boottime_ts64(tp);
break;
default:
return -EINVAL;
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index e08ce3f27447..f23cc46ecf3e 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -86,15 +86,6 @@ static const struct k_clock clock_realtime, clock_monotonic;
#endif
/*
- * parisc wants ENOTSUP instead of EOPNOTSUPP
- */
-#ifndef ENOTSUP
-# define ENANOSLEEP_NOTSUP EOPNOTSUPP
-#else
-# define ENANOSLEEP_NOTSUP ENOTSUP
-#endif
-
-/*
* The timer ID is turned into a timer address by idr_find().
* Verifying a valid ID consists of:
*
@@ -228,21 +219,21 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
*/
static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
{
- getrawmonotonic64(tp);
+ ktime_get_raw_ts64(tp);
return 0;
}
static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
{
- *tp = current_kernel_time64();
+ ktime_get_coarse_real_ts64(tp);
return 0;
}
static int posix_get_monotonic_coarse(clockid_t which_clock,
struct timespec64 *tp)
{
- *tp = get_monotonic_coarse64();
+ ktime_get_coarse_ts64(tp);
return 0;
}
@@ -254,13 +245,13 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
{
- get_monotonic_boottime64(tp);
+ ktime_get_boottime_ts64(tp);
return 0;
}
static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
{
- timekeeping_clocktai64(tp);
+ ktime_get_clocktai_ts64(tp);
return 0;
}
@@ -283,6 +274,17 @@ static __init int init_posix_timers(void)
}
__initcall(init_posix_timers);
+/*
+ * The siginfo si_overrun field and the return value of timer_getoverrun(2)
+ * are of type int. Clamp the overrun value to INT_MAX
+ */
+static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
+{
+ s64 sum = timr->it_overrun_last + (s64)baseval;
+
+ return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
+}
+
static void common_hrtimer_rearm(struct k_itimer *timr)
{
struct hrtimer *timer = &timr->it.real.timer;
@@ -290,9 +292,8 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
if (!timr->it_interval)
return;
- timr->it_overrun += (unsigned int) hrtimer_forward(timer,
- timer->base->get_time(),
- timr->it_interval);
+ timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
+ timr->it_interval);
hrtimer_restart(timer);
}
@@ -321,10 +322,10 @@ void posixtimer_rearm(struct siginfo *info)
timr->it_active = 1;
timr->it_overrun_last = timr->it_overrun;
- timr->it_overrun = -1;
+ timr->it_overrun = -1LL;
++timr->it_requeue_pending;
- info->si_overrun += timr->it_overrun_last;
+ info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
}
unlock_timer(timr, flags);
@@ -418,9 +419,8 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
now = ktime_add(now, kj);
}
#endif
- timr->it_overrun += (unsigned int)
- hrtimer_forward(timer, now,
- timr->it_interval);
+ timr->it_overrun += hrtimer_forward(timer, now,
+ timr->it_interval);
ret = HRTIMER_RESTART;
++timr->it_requeue_pending;
timr->it_active = 1;
@@ -524,7 +524,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
new_timer->it_id = (timer_t) new_timer_id;
new_timer->it_clock = which_clock;
new_timer->kclock = kc;
- new_timer->it_overrun = -1;
+ new_timer->it_overrun = -1LL;
if (event) {
rcu_read_lock();
@@ -645,11 +645,11 @@ static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
return __hrtimer_expires_remaining_adjusted(timer, now);
}
-static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
+static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
{
struct hrtimer *timer = &timr->it.real.timer;
- return (int)hrtimer_forward(timer, now, timr->it_interval);
+ return hrtimer_forward(timer, now, timr->it_interval);
}
/*
@@ -743,7 +743,7 @@ static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
/* Get the time remaining on a POSIX.1b interval timer. */
SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
- struct itimerspec __user *, setting)
+ struct __kernel_itimerspec __user *, setting)
{
struct itimerspec64 cur_setting;
@@ -755,7 +755,8 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
return ret;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
+
COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
struct compat_itimerspec __user *, setting)
{
@@ -768,6 +769,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
}
return ret;
}
+
#endif
/*
@@ -789,7 +791,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
if (!timr)
return -EINVAL;
- overrun = timr->it_overrun_last;
+ overrun = timer_overrun_to_int(timr, 0);
unlock_timer(timr, flags);
return overrun;
@@ -906,8 +908,8 @@ retry:
/* Set a POSIX.1b interval timer */
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
- const struct itimerspec __user *, new_setting,
- struct itimerspec __user *, old_setting)
+ const struct __kernel_itimerspec __user *, new_setting,
+ struct __kernel_itimerspec __user *, old_setting)
{
struct itimerspec64 new_spec, old_spec;
struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
@@ -927,7 +929,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
return error;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
struct compat_itimerspec __user *, new,
struct compat_itimerspec __user *, old)
@@ -1220,7 +1222,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
if (!kc)
return -EINVAL;
if (!kc->nsleep)
- return -ENANOSLEEP_NOTSUP;
+ return -EOPNOTSUPP;
if (get_timespec64(&t, rqtp))
return -EFAULT;
@@ -1247,7 +1249,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
if (!kc)
return -EINVAL;
if (!kc->nsleep)
- return -ENANOSLEEP_NOTSUP;
+ return -EOPNOTSUPP;
if (compat_get_timespec64(&t, rqtp))
return -EFAULT;
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
index 151e28f5bf30..ddb21145211a 100644
--- a/kernel/time/posix-timers.h
+++ b/kernel/time/posix-timers.h
@@ -19,7 +19,7 @@ struct k_clock {
void (*timer_get)(struct k_itimer *timr,
struct itimerspec64 *cur_setting);
void (*timer_rearm)(struct k_itimer *timr);
- int (*timer_forward)(struct k_itimer *timr, ktime_t now);
+ s64 (*timer_forward)(struct k_itimer *timr, ktime_t now);
ktime_t (*timer_remaining)(struct k_itimer *timr, ktime_t now);
int (*timer_try_to_cancel)(struct k_itimer *timr);
void (*timer_arm)(struct k_itimer *timr, ktime_t expires,
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 2d8f05aad442..cbc72c2c1fca 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -237,7 +237,7 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
pr_debug("Registered %pF as sched_clock source\n", read);
}
-void __init sched_clock_postinit(void)
+void __init generic_sched_clock_init(void)
{
/*
* If no sched_clock() function has been provided at that point,
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 58045eb976c3..a59641fb88b6 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -90,7 +90,7 @@ static struct clock_event_device ce_broadcast_hrtimer = {
.max_delta_ticks = ULONG_MAX,
.mult = 1,
.shift = 0,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
};
static enum hrtimer_restart bc_handler(struct hrtimer *t)
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 2b41e8e2d31d..ccdb351277ee 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(sys_tz);
*/
SYSCALL_DEFINE1(time, time_t __user *, tloc)
{
- time_t i = get_seconds();
+ time_t i = (time_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -107,11 +107,9 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
/* compat_time_t is a 32 bit "long" and needs to get converted. */
COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
{
- struct timeval tv;
compat_time_t i;
- do_gettimeofday(&tv);
- i = tv.tv_sec;
+ i = (compat_time_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -931,7 +929,7 @@ int compat_put_timespec64(const struct timespec64 *ts, void __user *uts)
EXPORT_SYMBOL_GPL(compat_put_timespec64);
int get_itimerspec64(struct itimerspec64 *it,
- const struct itimerspec __user *uit)
+ const struct __kernel_itimerspec __user *uit)
{
int ret;
@@ -946,7 +944,7 @@ int get_itimerspec64(struct itimerspec64 *it,
EXPORT_SYMBOL_GPL(get_itimerspec64);
int put_itimerspec64(const struct itimerspec64 *it,
- struct itimerspec __user *uit)
+ struct __kernel_itimerspec __user *uit)
{
int ret;
@@ -959,3 +957,24 @@ int put_itimerspec64(const struct itimerspec64 *it,
return ret;
}
EXPORT_SYMBOL_GPL(put_itimerspec64);
+
+int get_compat_itimerspec64(struct itimerspec64 *its,
+ const struct compat_itimerspec __user *uits)
+{
+
+ if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
+ __compat_get_timespec64(&its->it_value, &uits->it_value))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
+
+int put_compat_itimerspec64(const struct itimerspec64 *its,
+ struct compat_itimerspec __user *uits)
+{
+ if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
+ __compat_put_timespec64(&its->it_value, &uits->it_value))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4786df904c22..f3b22f456fac 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -17,6 +17,7 @@
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
+#include <linux/sched/clock.h>
#include <linux/syscore_ops.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
@@ -34,6 +35,14 @@
#define TK_MIRROR (1 << 1)
#define TK_CLOCK_WAS_SET (1 << 2)
+enum timekeeping_adv_mode {
+ /* Update timekeeper when a tick has passed */
+ TK_ADV_TICK,
+
+ /* Update timekeeper on a direct frequency change */
+ TK_ADV_FREQ
+};
+
/*
* The most important data for readout fits into a single 64 byte
* cache line.
@@ -97,7 +106,7 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
}
}
-static inline struct timespec64 tk_xtime(struct timekeeper *tk)
+static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
{
struct timespec64 ts;
@@ -154,7 +163,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
* a read of the fast-timekeeper tkrs (which is protected by its own locking
* and update logic).
*/
-static inline u64 tk_clock_read(struct tk_read_base *tkr)
+static inline u64 tk_clock_read(const struct tk_read_base *tkr)
{
struct clocksource *clock = READ_ONCE(tkr->clock);
@@ -203,7 +212,7 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
}
}
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
{
struct timekeeper *tk = &tk_core.timekeeper;
u64 now, last, mask, max, delta;
@@ -247,7 +256,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
{
}
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
{
u64 cycle_now, delta;
@@ -344,7 +353,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
static inline u32 arch_gettimeoffset(void) { return 0; }
#endif
-static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
+static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
{
u64 nsec;
@@ -355,7 +364,7 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
return nsec + arch_gettimeoffset();
}
-static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
{
u64 delta;
@@ -363,7 +372,7 @@ static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
return timekeeping_delta_to_ns(tkr, delta);
}
-static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
+static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
{
u64 delta;
@@ -386,7 +395,8 @@ static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
* slightly wrong timestamp (a few nanoseconds). See
* @ktime_get_mono_fast_ns.
*/
-static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
+static void update_fast_timekeeper(const struct tk_read_base *tkr,
+ struct tk_fast *tkf)
{
struct tk_read_base *base = tkf->base;
@@ -541,10 +551,10 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
* number of cycles every time until timekeeping is resumed at which time the
* proper readout base for the fast timekeeper will be restored automatically.
*/
-static void halt_fast_timekeeper(struct timekeeper *tk)
+static void halt_fast_timekeeper(const struct timekeeper *tk)
{
static struct tk_read_base tkr_dummy;
- struct tk_read_base *tkr = &tk->tkr_mono;
+ const struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
cycles_at_suspend = tk_clock_read(tkr);
@@ -1269,7 +1279,7 @@ EXPORT_SYMBOL(do_settimeofday64);
*
* Adds or subtracts an offset value from the current time.
*/
-static int timekeeping_inject_offset(struct timespec64 *ts)
+static int timekeeping_inject_offset(const struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
@@ -1496,22 +1506,39 @@ void __weak read_persistent_clock64(struct timespec64 *ts64)
}
/**
- * read_boot_clock64 - Return time of the system start.
+ * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
+ * from the boot.
*
* Weak dummy function for arches that do not yet support it.
- * Function to read the exact time the system has been started.
- * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
- *
- * XXX - Do be sure to remove it once all arches implement it.
+ * wall_time - current time as returned by persistent clock
+ * boot_offset - offset that is defined as wall_time - boot_time
+ * The default function calculates offset based on the current value of
+ * local_clock(). This way architectures that support sched_clock() but don't
+ * support dedicated boot time clock will provide the best estimate of the
+ * boot time.
*/
-void __weak read_boot_clock64(struct timespec64 *ts)
+void __weak __init
+read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
+ struct timespec64 *boot_offset)
{
- ts->tv_sec = 0;
- ts->tv_nsec = 0;
+ read_persistent_clock64(wall_time);
+ *boot_offset = ns_to_timespec64(local_clock());
}
-/* Flag for if timekeeping_resume() has injected sleeptime */
-static bool sleeptime_injected;
+/*
+ * Flag reflecting whether timekeeping_resume() has injected sleeptime.
+ *
+ * The flag starts of false and is only set when a suspend reaches
+ * timekeeping_suspend(), timekeeping_resume() sets it to false when the
+ * timekeeper clocksource is not stopping across suspend and has been
+ * used to update sleep time. If the timekeeper clocksource has stopped
+ * then the flag stays true and is used by the RTC resume code to decide
+ * whether sleeptime must be injected and if so the flag gets false then.
+ *
+ * If a suspend fails before reaching timekeeping_resume() then the flag
+ * stays false and prevents erroneous sleeptime injection.
+ */
+static bool suspend_timing_needed;
/* Flag for if there is a persistent clock on this platform */
static bool persistent_clock_exists;
@@ -1521,28 +1548,29 @@ static bool persistent_clock_exists;
*/
void __init timekeeping_init(void)
{
+ struct timespec64 wall_time, boot_offset, wall_to_mono;
struct timekeeper *tk = &tk_core.timekeeper;
struct clocksource *clock;
unsigned long flags;
- struct timespec64 now, boot, tmp;
-
- read_persistent_clock64(&now);
- if (!timespec64_valid_strict(&now)) {
- pr_warn("WARNING: Persistent clock returned invalid value!\n"
- " Check your CMOS/BIOS settings.\n");
- now.tv_sec = 0;
- now.tv_nsec = 0;
- } else if (now.tv_sec || now.tv_nsec)
- persistent_clock_exists = true;
- read_boot_clock64(&boot);
- if (!timespec64_valid_strict(&boot)) {
- pr_warn("WARNING: Boot clock returned invalid value!\n"
- " Check your CMOS/BIOS settings.\n");
- boot.tv_sec = 0;
- boot.tv_nsec = 0;
+ read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
+ if (timespec64_valid_strict(&wall_time) &&
+ timespec64_to_ns(&wall_time) > 0) {
+ persistent_clock_exists = true;
+ } else if (timespec64_to_ns(&wall_time) != 0) {
+ pr_warn("Persistent clock returned invalid value");
+ wall_time = (struct timespec64){0};
}
+ if (timespec64_compare(&wall_time, &boot_offset) < 0)
+ boot_offset = (struct timespec64){0};
+
+ /*
+ * We want set wall_to_mono, so the following is true:
+ * wall time + wall_to_mono = boot time
+ */
+ wall_to_mono = timespec64_sub(boot_offset, wall_time);
+
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
ntp_init();
@@ -1552,13 +1580,10 @@ void __init timekeeping_init(void)
clock->enable(clock);
tk_setup_internals(tk, clock);
- tk_set_xtime(tk, &now);
+ tk_set_xtime(tk, &wall_time);
tk->raw_sec = 0;
- if (boot.tv_sec == 0 && boot.tv_nsec == 0)
- boot = tk_xtime(tk);
- set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
- tk_set_wall_to_mono(tk, tmp);
+ tk_set_wall_to_mono(tk, wall_to_mono);
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1577,7 +1602,7 @@ static struct timespec64 timekeeping_suspend_time;
* adds the sleep offset to the timekeeping variables.
*/
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
- struct timespec64 *delta)
+ const struct timespec64 *delta)
{
if (!timespec64_valid_strict(delta)) {
printk_deferred(KERN_WARNING
@@ -1610,7 +1635,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
*/
bool timekeeping_rtc_skipresume(void)
{
- return sleeptime_injected;
+ return !suspend_timing_needed;
}
/**
@@ -1638,7 +1663,7 @@ bool timekeeping_rtc_skipsuspend(void)
* This function should only be called by rtc_resume(), and allows
* a suspend offset to be injected into the timekeeping values.
*/
-void timekeeping_inject_sleeptime64(struct timespec64 *delta)
+void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
@@ -1646,6 +1671,8 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
+ suspend_timing_needed = false;
+
timekeeping_forward_now(tk);
__timekeeping_inject_sleeptime(tk, delta);
@@ -1669,9 +1696,9 @@ void timekeeping_resume(void)
struct clocksource *clock = tk->tkr_mono.clock;
unsigned long flags;
struct timespec64 ts_new, ts_delta;
- u64 cycle_now;
+ u64 cycle_now, nsec;
+ bool inject_sleeptime = false;
- sleeptime_injected = false;
read_persistent_clock64(&ts_new);
clockevents_resume();
@@ -1693,22 +1720,19 @@ void timekeeping_resume(void)
* usable source. The rtc part is handled separately in rtc core code.
*/
cycle_now = tk_clock_read(&tk->tkr_mono);
- if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
- cycle_now > tk->tkr_mono.cycle_last) {
- u64 nsec, cyc_delta;
-
- cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
- tk->tkr_mono.mask);
- nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
+ nsec = clocksource_stop_suspend_timing(clock, cycle_now);
+ if (nsec > 0) {
ts_delta = ns_to_timespec64(nsec);
- sleeptime_injected = true;
+ inject_sleeptime = true;
} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
- sleeptime_injected = true;
+ inject_sleeptime = true;
}
- if (sleeptime_injected)
+ if (inject_sleeptime) {
+ suspend_timing_needed = false;
__timekeeping_inject_sleeptime(tk, &ts_delta);
+ }
/* Re-base the last cycle value */
tk->tkr_mono.cycle_last = cycle_now;
@@ -1732,6 +1756,8 @@ int timekeeping_suspend(void)
unsigned long flags;
struct timespec64 delta, delta_delta;
static struct timespec64 old_delta;
+ struct clocksource *curr_clock;
+ u64 cycle_now;
read_persistent_clock64(&timekeeping_suspend_time);
@@ -1743,11 +1769,22 @@ int timekeeping_suspend(void)
if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
persistent_clock_exists = true;
+ suspend_timing_needed = true;
+
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk);
timekeeping_suspended = 1;
+ /*
+ * Since we've called forward_now, cycle_last stores the value
+ * just read from the current clocksource. Save this to potentially
+ * use in suspend timing.
+ */
+ curr_clock = tk->tkr_mono.clock;
+ cycle_now = tk->tkr_mono.cycle_last;
+ clocksource_start_suspend_timing(curr_clock, cycle_now);
+
if (persistent_clock_exists) {
/*
* To avoid drift caused by repeated suspend/resumes,
@@ -2021,11 +2058,11 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
return offset;
}
-/**
- * update_wall_time - Uses the current clocksource to increment the wall time
- *
+/*
+ * timekeeping_advance - Updates the timekeeper to the current time and
+ * current NTP tick length
*/
-void update_wall_time(void)
+static void timekeeping_advance(enum timekeeping_adv_mode mode)
{
struct timekeeper *real_tk = &tk_core.timekeeper;
struct timekeeper *tk = &shadow_timekeeper;
@@ -2042,14 +2079,17 @@ void update_wall_time(void)
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
+
+ if (mode != TK_ADV_TICK)
+ goto out;
#else
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
-#endif
/* Check if there's really nothing to do */
- if (offset < real_tk->cycle_interval)
+ if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
goto out;
+#endif
/* Do some additional sanity checking */
timekeeping_check_update(tk, offset);
@@ -2106,6 +2146,15 @@ out:
}
/**
+ * update_wall_time - Uses the current clocksource to increment the wall time
+ *
+ */
+void update_wall_time(void)
+{
+ timekeeping_advance(TK_ADV_TICK);
+}
+
+/**
* getboottime64 - Return the real time of system boot.
* @ts: pointer to the timespec64 to be set
*
@@ -2220,7 +2269,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
/**
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
*/
-static int timekeeping_validate_timex(struct timex *txc)
+static int timekeeping_validate_timex(const struct timex *txc)
{
if (txc->modes & ADJ_ADJTIME) {
/* singleshot must not be used with any other mode bits */
@@ -2310,7 +2359,7 @@ int do_adjtimex(struct timex *txc)
return ret;
}
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
@@ -2327,6 +2376,10 @@ int do_adjtimex(struct timex *txc)
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+ /* Update the multiplier immediately if frequency was set directly */
+ if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
+ timekeeping_advance(TK_ADV_FREQ);
+
if (tai != orig_tai)
clock_was_set();
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index 0754cadfa9e6..238e4be60229 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -70,7 +70,7 @@ static int __init tk_debug_sleep_time_init(void)
}
late_initcall(tk_debug_sleep_time_init);
-void tk_debug_account_sleep_time(struct timespec64 *t)
+void tk_debug_account_sleep_time(const struct timespec64 *t)
{
/* Cap bin index so we don't overflow the array */
int bin = min(fls(t->tv_sec), NUM_BINS-1);
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h
index cf5c0828ee31..bcbb52db2256 100644
--- a/kernel/time/timekeeping_internal.h
+++ b/kernel/time/timekeeping_internal.h
@@ -8,7 +8,7 @@
#include <linux/time.h>
#ifdef CONFIG_DEBUG_FS
-extern void tk_debug_account_sleep_time(struct timespec64 *t);
+extern void tk_debug_account_sleep_time(const struct timespec64 *t);
#else
#define tk_debug_account_sleep_time(x)
#endif
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index cc2d23e6ff61..fa49cd753dea 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -581,7 +581,7 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
* wheel:
*/
base->next_expiry = timer->expires;
- wake_up_nohz_cpu(base->cpu);
+ wake_up_nohz_cpu(base->cpu);
}
static void
@@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base)
raw_spin_lock_irq(&base->lock);
+ /*
+ * timer_base::must_forward_clk must be cleared before running
+ * timers so that any timer functions that call mod_timer() will
+ * not try to forward the base. Idle tracking / clock forwarding
+ * logic is only used with BASE_STD timers.
+ *
+ * The must_forward_clk flag is cleared unconditionally also for
+ * the deferrable base. The deferrable base is not affected by idle
+ * tracking and never forwarded, so clearing the flag is a NOOP.
+ *
+ * The fact that the deferrable base is never forwarded can cause
+ * large variations in granularity for deferrable timers, but they
+ * can be deferred for long periods due to idle anyway.
+ */
+ base->must_forward_clk = false;
+
while (time_after_eq(jiffies, base->clk)) {
levels = collect_expired_timers(base, heads);
@@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
- /*
- * must_forward_clk must be cleared before running timers so that any
- * timer functions that call mod_timer will not try to forward the
- * base. idle trcking / clock forwarding logic is only used with
- * BASE_STD timers.
- *
- * The deferrable base does not do idle tracking at all, so we do
- * not forward it. This can result in very large variations in
- * granularity for deferrable timers, but they can be deferred for
- * long periods due to idle.
- */
- base->must_forward_clk = false;
-
__run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
diff --git a/kernel/torture.c b/kernel/torture.c
index 3de1efbecd6a..1ac24a826589 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -20,6 +20,9 @@
* Author: Paul E. McKenney <paulmck@us.ibm.com>
* Based on kernel/rcu/torture.c.
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -53,7 +56,7 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
static char *torture_type;
-static bool verbose;
+static int verbose;
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
@@ -98,7 +101,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
return false;
- if (verbose)
+ if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: offlining %d\n",
torture_type, cpu);
@@ -111,7 +114,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
"torture_onoff task: offline %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
- if (verbose)
+ if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: offlined %d\n",
torture_type, cpu);
@@ -147,7 +150,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
return false;
- if (verbose)
+ if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: onlining %d\n",
torture_type, cpu);
@@ -160,7 +163,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
"torture_onoff task: online %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
- if (verbose)
+ if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: onlined %d\n",
torture_type, cpu);
@@ -647,7 +650,7 @@ static void torture_stutter_cleanup(void)
* The runnable parameter points to a flag that controls whether or not
* the test is currently runnable. If there is no such flag, pass in NULL.
*/
-bool torture_init_begin(char *ttype, bool v)
+bool torture_init_begin(char *ttype, int v)
{
mutex_lock(&fullstop_mutex);
if (torture_type != NULL) {
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 6b71860f3998..e9d99463e5df 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1228,16 +1228,11 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
/*
* We need to check and see if we modified the pc of the
- * pt_regs, and if so clear the kprobe and return 1 so that we
- * don't do the single stepping.
- * The ftrace kprobe handler leaves it up to us to re-enable
- * preemption here before returning if we've modified the ip.
+ * pt_regs, and if so return 1 so that we don't do the
+ * single stepping.
*/
- if (orig_ip != instruction_pointer(regs)) {
- reset_current_kprobe();
- preempt_enable_no_resched();
+ if (orig_ip != instruction_pointer(regs))
return 1;
- }
if (!ret)
return 0;
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 576d18045811..5470dce212c0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -18,18 +18,14 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sysctl.h>
-#include <linux/smpboot.h>
-#include <linux/sched/rt.h>
-#include <uapi/linux/sched/types.h>
#include <linux/tick.h>
-#include <linux/workqueue.h>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
+#include <linux/stop_machine.h>
#include <asm/irq_regs.h>
#include <linux/kvm_para.h>
-#include <linux/kthread.h>
static DEFINE_MUTEX(watchdog_mutex);
@@ -169,11 +165,10 @@ static void lockup_detector_update_enable(void)
unsigned int __read_mostly softlockup_panic =
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
-static bool softlockup_threads_initialized __read_mostly;
+static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
-static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
@@ -335,6 +330,27 @@ static void watchdog_interrupt_count(void)
__this_cpu_inc(hrtimer_interrupts);
}
+static DEFINE_PER_CPU(struct completion, softlockup_completion);
+static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
+
+/*
+ * The watchdog thread function - touches the timestamp.
+ *
+ * It only runs once every sample_period seconds (4 seconds by
+ * default) to reset the softlockup timestamp. If this gets delayed
+ * for more than 2*watchdog_thresh seconds then the debug-printout
+ * triggers in watchdog_timer_fn().
+ */
+static int softlockup_fn(void *data)
+{
+ __this_cpu_write(soft_lockup_hrtimer_cnt,
+ __this_cpu_read(hrtimer_interrupts));
+ __touch_watchdog();
+ complete(this_cpu_ptr(&softlockup_completion));
+
+ return 0;
+}
+
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
@@ -350,7 +366,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
watchdog_interrupt_count();
/* kick the softlockup detector */
- wake_up_process(__this_cpu_read(softlockup_watchdog));
+ if (completion_done(this_cpu_ptr(&softlockup_completion))) {
+ reinit_completion(this_cpu_ptr(&softlockup_completion));
+ stop_one_cpu_nowait(smp_processor_id(),
+ softlockup_fn, NULL,
+ this_cpu_ptr(&softlockup_stop_work));
+ }
/* .. and repeat */
hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
@@ -448,16 +469,15 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
-static void watchdog_set_prio(unsigned int policy, unsigned int prio)
-{
- struct sched_param param = { .sched_priority = prio };
-
- sched_setscheduler(current, policy, &param);
-}
-
static void watchdog_enable(unsigned int cpu)
{
struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
+ struct completion *done = this_cpu_ptr(&softlockup_completion);
+
+ WARN_ON_ONCE(cpu != smp_processor_id());
+
+ init_completion(done);
+ complete(done);
/*
* Start the timer first to prevent the NMI watchdog triggering
@@ -473,15 +493,14 @@ static void watchdog_enable(unsigned int cpu)
/* Enable the perf event */
if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
watchdog_nmi_enable(cpu);
-
- watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
}
static void watchdog_disable(unsigned int cpu)
{
struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
- watchdog_set_prio(SCHED_NORMAL, 0);
+ WARN_ON_ONCE(cpu != smp_processor_id());
+
/*
* Disable the perf event first. That prevents that a large delay
* between disabling the timer and disabling the perf event causes
@@ -489,79 +508,66 @@ static void watchdog_disable(unsigned int cpu)
*/
watchdog_nmi_disable(cpu);
hrtimer_cancel(hrtimer);
+ wait_for_completion(this_cpu_ptr(&softlockup_completion));
}
-static void watchdog_cleanup(unsigned int cpu, bool online)
+static int softlockup_stop_fn(void *data)
{
- watchdog_disable(cpu);
+ watchdog_disable(smp_processor_id());
+ return 0;
}
-static int watchdog_should_run(unsigned int cpu)
+static void softlockup_stop_all(void)
{
- return __this_cpu_read(hrtimer_interrupts) !=
- __this_cpu_read(soft_lockup_hrtimer_cnt);
+ int cpu;
+
+ if (!softlockup_initialized)
+ return;
+
+ for_each_cpu(cpu, &watchdog_allowed_mask)
+ smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
+
+ cpumask_clear(&watchdog_allowed_mask);
}
-/*
- * The watchdog thread function - touches the timestamp.
- *
- * It only runs once every sample_period seconds (4 seconds by
- * default) to reset the softlockup timestamp. If this gets delayed
- * for more than 2*watchdog_thresh seconds then the debug-printout
- * triggers in watchdog_timer_fn().
- */
-static void watchdog(unsigned int cpu)
+static int softlockup_start_fn(void *data)
{
- __this_cpu_write(soft_lockup_hrtimer_cnt,
- __this_cpu_read(hrtimer_interrupts));
- __touch_watchdog();
+ watchdog_enable(smp_processor_id());
+ return 0;
}
-static struct smp_hotplug_thread watchdog_threads = {
- .store = &softlockup_watchdog,
- .thread_should_run = watchdog_should_run,
- .thread_fn = watchdog,
- .thread_comm = "watchdog/%u",
- .setup = watchdog_enable,
- .cleanup = watchdog_cleanup,
- .park = watchdog_disable,
- .unpark = watchdog_enable,
-};
-
-static void softlockup_update_smpboot_threads(void)
+static void softlockup_start_all(void)
{
- lockdep_assert_held(&watchdog_mutex);
-
- if (!softlockup_threads_initialized)
- return;
+ int cpu;
- smpboot_update_cpumask_percpu_thread(&watchdog_threads,
- &watchdog_allowed_mask);
+ cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
+ for_each_cpu(cpu, &watchdog_allowed_mask)
+ smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
}
-/* Temporarily park all watchdog threads */
-static void softlockup_park_all_threads(void)
+int lockup_detector_online_cpu(unsigned int cpu)
{
- cpumask_clear(&watchdog_allowed_mask);
- softlockup_update_smpboot_threads();
+ watchdog_enable(cpu);
+ return 0;
}
-/* Unpark enabled threads */
-static void softlockup_unpark_threads(void)
+int lockup_detector_offline_cpu(unsigned int cpu)
{
- cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
- softlockup_update_smpboot_threads();
+ watchdog_disable(cpu);
+ return 0;
}
static void lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
- softlockup_park_all_threads();
+
+ softlockup_stop_all();
set_sample_period();
lockup_detector_update_enable();
if (watchdog_enabled && watchdog_thresh)
- softlockup_unpark_threads();
+ softlockup_start_all();
+
watchdog_nmi_start();
cpus_read_unlock();
/*
@@ -580,8 +586,6 @@ static void lockup_detector_reconfigure(void)
*/
static __init void lockup_detector_setup(void)
{
- int ret;
-
/*
* If sysctl is off and watchdog got disabled on the command line,
* nothing to do here.
@@ -592,24 +596,13 @@ static __init void lockup_detector_setup(void)
!(watchdog_enabled && watchdog_thresh))
return;
- ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
- &watchdog_allowed_mask);
- if (ret) {
- pr_err("Failed to initialize soft lockup detector threads\n");
- return;
- }
-
mutex_lock(&watchdog_mutex);
- softlockup_threads_initialized = true;
lockup_detector_reconfigure();
+ softlockup_initialized = true;
mutex_unlock(&watchdog_mutex);
}
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
-static inline int watchdog_park_threads(void) { return 0; }
-static inline void watchdog_unpark_threads(void) { }
-static inline int watchdog_enable_all_cpus(void) { return 0; }
-static inline void watchdog_disable_all_cpus(void) { }
static void lockup_detector_reconfigure(void)
{
cpus_read_lock();
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index e449a23e9d59..1f7020d65d0a 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -175,8 +175,8 @@ static int hardlockup_detector_event_create(void)
evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
watchdog_overflow_callback, NULL);
if (IS_ERR(evt)) {
- pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
- PTR_ERR(evt));
+ pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
+ PTR_ERR(evt));
return PTR_ERR(evt);
}
this_cpu_write(watchdog_ev, evt);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8838d1158d19..0b066b3c9284 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1718,7 +1718,7 @@ config KPROBES_SANITY_TEST
default n
help
This option provides for testing basic kprobes functionality on
- boot. A sample kprobe, jprobe and kretprobe are inserted and
+ boot. Samples of kprobe and kretprobe are inserted and
verified for functionality.
Say N if you are unsure.
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 19d42ea75ec2..98fa559ebd80 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,9 +1,6 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
-config ARCH_WANTS_UBSAN_NO_NULL
- def_bool n
-
config UBSAN
bool "Undefined behaviour sanity checker"
help
@@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
-config UBSAN_NULL
- bool "Enable checking of null pointers"
- depends on UBSAN
- default y if !ARCH_WANTS_UBSAN_NO_NULL
- help
- This option enables detection of memory accesses via a
- null pointer.
-
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m && UBSAN
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 53c2d5edc826..1d91e31eceec 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new)
}
EXPORT_SYMBOL(atomic64_xchg);
-int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- int ret = 0;
+ long long val;
raw_spin_lock_irqsave(lock, flags);
- if (v->counter != u) {
+ val = v->counter;
+ if (val != u)
v->counter += a;
- ret = 1;
- }
raw_spin_unlock_irqrestore(lock, flags);
- return ret;
+
+ return val;
}
-EXPORT_SYMBOL(atomic64_add_unless);
+EXPORT_SYMBOL(atomic64_fetch_add_unless);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 994be4805cec..70935ed91125 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- pr_warn("object is on stack, but not annotated\n");
+ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+ task_stack_page(current));
else
- pr_warn("object is not on stack, but annotated\n");
+ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+ task_stack_page(current));
+
WARN_ON(1);
}
@@ -1185,8 +1188,7 @@ void __init debug_objects_mem_init(void)
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
- if (obj_cache)
- kmem_cache_destroy(obj_cache);
+ kmem_cache_destroy(obj_cache);
pr_warn("out of memory.\n");
} else
debug_objects_selftest();
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 54e5bbaa3200..517f5853ffed 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd)) {
+ pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud)) {
+ pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 140fa8bb5c23..914ebe98fc21 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -55,22 +55,24 @@ static inline void XOR(int x, int y, int z)
asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
}
-static inline void LOAD_DATA(int x, int n, u8 *ptr)
+static inline void LOAD_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VLM %2,%3,0,%r1"
- : : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : : "m" (*__ptr), "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
-static inline void STORE_DATA(int x, int n, u8 *ptr)
+static inline void STORE_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VSTM %2,%3,0,1"
- : "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : "=m" (*__ptr) : "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
static inline void COPY_VEC(int x, int y)
@@ -93,19 +95,19 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
q = dptr[z0 + 2]; /* RS syndrome */
for (d = 0; d < bytes; d += $#*NSIZE) {
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= 0; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
- STORE_DATA(0,$#,&p[d]);
- STORE_DATA(8,$#,&q[d]);
+ STORE_DATA(0,&p[d]);
+ STORE_DATA(8,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
@@ -127,14 +129,14 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
for (d = 0; d < bytes; d += $#*NSIZE) {
/* P/Q data pages */
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= start; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
@@ -145,12 +147,12 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
}
- LOAD_DATA(16,$#,&p[d]);
+ LOAD_DATA(16,&p[d]);
XOR(16+$$,16+$$,0+$$);
- STORE_DATA(16,$#,&p[d]);
- LOAD_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&p[d]);
+ LOAD_DATA(16,&q[d]);
XOR(16+$$,16+$$,8+$$);
- STORE_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
diff --git a/lib/refcount.c b/lib/refcount.c
index d3b81cefce91..ebcf8cd49e05 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -35,13 +35,13 @@
*
*/
+#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/spinlock.h>
#include <linux/bug.h>
-#ifdef CONFIG_REFCOUNT_FULL
-
/**
- * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -58,7 +58,7 @@
*
* Return: false if the passed refcount is 0, true otherwise
*/
-bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -79,10 +79,10 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_add_not_zero);
+EXPORT_SYMBOL(refcount_add_not_zero_checked);
/**
- * refcount_add - add a value to a refcount
+ * refcount_add_checked - add a value to a refcount
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(refcount_add_not_zero);
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*/
-void refcount_add(unsigned int i, refcount_t *r)
+void refcount_add_checked(unsigned int i, refcount_t *r)
{
- WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_add);
+EXPORT_SYMBOL(refcount_add_checked);
/**
- * refcount_inc_not_zero - increment a refcount unless it is 0
+ * refcount_inc_not_zero_checked - increment a refcount unless it is 0
* @r: the refcount to increment
*
* Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(refcount_add);
*
* Return: true if the increment was successful, false otherwise
*/
-bool refcount_inc_not_zero(refcount_t *r)
+bool refcount_inc_not_zero_checked(refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -134,10 +134,10 @@ bool refcount_inc_not_zero(refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_inc_not_zero);
+EXPORT_SYMBOL(refcount_inc_not_zero_checked);
/**
- * refcount_inc - increment a refcount
+ * refcount_inc_checked - increment a refcount
* @r: the refcount to increment
*
* Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
@@ -148,14 +148,14 @@ EXPORT_SYMBOL(refcount_inc_not_zero);
* Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition.
*/
-void refcount_inc(refcount_t *r)
+void refcount_inc_checked(refcount_t *r)
{
- WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_inc);
+EXPORT_SYMBOL(refcount_inc_checked);
/**
- * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(refcount_inc);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -192,10 +192,10 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return !new;
}
-EXPORT_SYMBOL(refcount_sub_and_test);
+EXPORT_SYMBOL(refcount_sub_and_test_checked);
/**
- * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
@@ -207,14 +207,14 @@ EXPORT_SYMBOL(refcount_sub_and_test);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_dec_and_test(refcount_t *r)
+bool refcount_dec_and_test_checked(refcount_t *r)
{
- return refcount_sub_and_test(1, r);
+ return refcount_sub_and_test_checked(1, r);
}
-EXPORT_SYMBOL(refcount_dec_and_test);
+EXPORT_SYMBOL(refcount_dec_and_test_checked);
/**
- * refcount_dec - decrement a refcount
+ * refcount_dec_checked - decrement a refcount
* @r: the refcount
*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
@@ -223,12 +223,11 @@ EXPORT_SYMBOL(refcount_dec_and_test);
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
-void refcount_dec(refcount_t *r)
+void refcount_dec_checked(refcount_t *r)
{
- WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+ WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
-EXPORT_SYMBOL(refcount_dec);
-#endif /* CONFIG_REFCOUNT_FULL */
+EXPORT_SYMBOL(refcount_dec_checked);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
diff --git a/mm/init-mm.c b/mm/init-mm.c
index f0179c9c04c2..a787a319211e 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -15,6 +15,16 @@
#define INIT_MM_CONTEXT(name)
#endif
+/*
+ * For dynamically allocated mm_structs, there is a dynamically sized cpumask
+ * at the end of the structure, the size of which depends on the maximum CPU
+ * number the system can see. That way we allocate only as much memory for
+ * mm_cpumask() as needed for the hundreds, or thousands of processes that
+ * a system typically runs.
+ *
+ * Since there is only one init_mm in the entire system, keep it simple
+ * and size this cpu_bitmask to NR_CPUS.
+ */
struct mm_struct init_mm = {
.mm_rb = RB_ROOT,
.pgd = swapper_pg_dir,
@@ -25,5 +35,6 @@ struct mm_struct init_mm = {
.arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
.user_ns = &init_user_ns,
+ .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
INIT_MM_CONTEXT(init_mm)
};
diff --git a/mm/memfd.c b/mm/memfd.c
index 27069518e3c5..2bb5e257080e 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -326,7 +326,7 @@ SYSCALL_DEFINE2(memfd_create,
goto err_fd;
}
file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
- file->f_flags |= O_RDWR | O_LARGEFILE;
+ file->f_flags |= O_LARGEFILE;
if (flags & MFD_ALLOW_SEALING) {
file_seals = memfd_file_seals_ptr(file);
diff --git a/mm/memory.c b/mm/memory.c
index dab1511294ad..3d0a74ab70f2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -326,16 +326,20 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-/*
- * See the comment near struct mmu_table_batch.
- */
-
static void tlb_remove_table_smp_sync(void *arg)
{
- /* Simply deliver the interrupt */
+ struct mm_struct __maybe_unused *mm = arg;
+ /*
+ * On most architectures this does nothing. Simply delivering the
+ * interrupt is enough to prevent races with software page table
+ * walking like that done in get_user_pages_fast.
+ *
+ * See the comment near struct mmu_table_batch.
+ */
+ tlb_flush_remove_tables_local(mm);
}
-static void tlb_remove_table_one(void *table)
+static void tlb_remove_table_one(void *table, struct mmu_gather *tlb)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
@@ -344,7 +348,7 @@ static void tlb_remove_table_one(void *table)
* It is however sufficient for software page-table walkers that rely on
* IRQ disabling. See the comment near struct mmu_table_batch.
*/
- smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+ smp_call_function(tlb_remove_table_smp_sync, tlb->mm, 1);
__tlb_remove_table(table);
}
@@ -365,6 +369,8 @@ void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
+ tlb_flush_remove_tables(tlb->mm);
+
if (*batch) {
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
@@ -387,7 +393,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
- tlb_remove_table_one(table);
+ tlb_remove_table_one(table, tlb);
return;
}
(*batch)->nr = 0;
@@ -4395,6 +4401,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+ if (!maddr)
+ return -ENOMEM;
+
if (write)
memcpy_toio(maddr + offset, buf, len);
else
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a790ef4be74e..3222193c46c6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6939,9 +6939,21 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
start = (void *)PAGE_ALIGN((unsigned long)start);
end = (void *)((unsigned long)end & PAGE_MASK);
for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
+ struct page *page = virt_to_page(pos);
+ void *direct_map_addr;
+
+ /*
+ * 'direct_map_addr' might be different from 'pos'
+ * because some architectures' virt_to_page()
+ * work with aliases. Getting the direct map
+ * address ensures that we get a _writeable_
+ * alias for the memset().
+ */
+ direct_map_addr = page_address(page);
if ((unsigned int)poison <= 0xFF)
- memset(pos, poison, PAGE_SIZE);
- free_reserved_page(virt_to_page(pos));
+ memset(direct_map_addr, poison, PAGE_SIZE);
+
+ free_reserved_page(page);
}
if (pages && s)
diff --git a/mm/shmem.c b/mm/shmem.c
index 41b9bbf24e16..96bcc51fb9ec 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3897,18 +3897,11 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
/* common code */
-static const struct dentry_operations anon_ops = {
- .d_dname = simple_dname
-};
-
static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
unsigned long flags, unsigned int i_flags)
{
- struct file *res;
struct inode *inode;
- struct path path;
- struct super_block *sb;
- struct qstr this;
+ struct file *res;
if (IS_ERR(mnt))
return ERR_CAST(mnt);
@@ -3919,41 +3912,21 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l
if (shmem_acct_size(flags, size))
return ERR_PTR(-ENOMEM);
- res = ERR_PTR(-ENOMEM);
- this.name = name;
- this.len = strlen(name);
- this.hash = 0; /* will go */
- sb = mnt->mnt_sb;
- path.mnt = mntget(mnt);
- path.dentry = d_alloc_pseudo(sb, &this);
- if (!path.dentry)
- goto put_memory;
- d_set_d_op(path.dentry, &anon_ops);
-
- res = ERR_PTR(-ENOSPC);
- inode = shmem_get_inode(sb, NULL, S_IFREG | 0777, 0, flags);
- if (!inode)
- goto put_memory;
-
+ inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
+ flags);
+ if (unlikely(!inode)) {
+ shmem_unacct_size(flags, size);
+ return ERR_PTR(-ENOSPC);
+ }
inode->i_flags |= i_flags;
- d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode); /* It is unlinked */
res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
+ if (!IS_ERR(res))
+ res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
+ &shmem_file_operations);
if (IS_ERR(res))
- goto put_path;
-
- res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
- &shmem_file_operations);
- if (IS_ERR(res))
- goto put_path;
-
- return res;
-
-put_memory:
- shmem_unacct_size(flags, size);
-put_path:
- path_put(&path);
+ iput(inode);
return res;
}
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index af8c4b38b746..d84227d75717 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -244,7 +244,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
* the packet count limit, so...
*/
if (atm_may_send(pvcc->atmvcc, size) &&
- atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
+ atomic_inc_not_zero(&pvcc->inflight))
return 1;
/*
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 2b75df469220..842a9c7c73a3 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
u32 cwnd = hc->tx_cwnd, restart_cwnd,
iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
+ s32 delta = now - hc->tx_lsndtime;
hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
/* don't reduce cwnd below the initial window (IW) */
restart_cwnd = min(cwnd, iwnd);
- cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
- hc->tx_cwnd = max(cwnd, restart_cwnd);
+ while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
+ cwnd >>= 1;
+ hc->tx_cwnd = max(cwnd, restart_cwnd);
hc->tx_cwnd_stamp = now;
hc->tx_cwnd_used = 0;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 732369c80644..9864bcd3d317 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
int ret;
/* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev)
+ if (!dev->phydev && !dp->pl)
return -ENODEV;
if (!ds->ops->set_mac_eee)
@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
int ret;
/* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev)
+ if (!dev->phydev && !dp->pl)
return -ENODEV;
if (!ds->ops->get_mac_eee)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 00e138a44cbb..1cc9650af9fb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1133,12 +1133,8 @@ route_lookup:
max_headroom += 8;
mtu -= 8;
}
- if (skb->protocol == htons(ETH_P_IPV6)) {
- if (mtu < IPV6_MIN_MTU)
- mtu = IPV6_MIN_MTU;
- } else if (mtu < 576) {
- mtu = 576;
- }
+ mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
+ IPV6_MIN_MTU : IPV4_MIN_MTU);
skb_dst_update_pmtu(skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ec18b3ce8b6d..7208c16302f6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -978,10 +978,6 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
rt->rt6i_flags &= ~RTF_EXPIRES;
rcu_assign_pointer(rt->from, from);
dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
- if (from->fib6_metrics != &dst_default_metrics) {
- rt->dst._metrics |= DST_METRICS_REFCOUNTED;
- refcount_inc(&from->fib6_metrics->refcnt);
- }
}
/* Caller must already hold reference to @ort */
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 89041260784c..260b3dc1b4a2 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
rcu_read_lock_bh();
sap = __llc_sap_find(sap_value);
- if (sap)
- llc_sap_hold(sap);
+ if (!sap || !llc_sap_hold_safe(sap))
+ sap = NULL;
rcu_read_unlock_bh();
return sap;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9b27d0cd766d..e6445d8f3f57 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4226,6 +4226,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
if (req->tp_block_nr) {
+ unsigned int min_frame_size;
+
/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
@@ -4248,12 +4250,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
+ min_frame_size = po->tp_hdrlen + po->tp_reserve;
if (po->tp_version >= TPACKET_V3 &&
- req->tp_block_size <=
- BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
+ req->tp_block_size <
+ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
goto out;
- if (unlikely(req->tp_frame_size < po->tp_hdrlen +
- po->tp_reserve))
+ if (unlikely(req->tp_frame_size < min_frame_size))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5fb7d3254d9e..707630ab4713 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -104,9 +104,9 @@ struct rxrpc_net {
#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
u8 peer_keepalive_cursor;
- ktime_t peer_keepalive_base;
- struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
- struct hlist_head peer_keepalive_new;
+ time64_t peer_keepalive_base;
+ struct list_head peer_keepalive[32];
+ struct list_head peer_keepalive_new;
struct timer_list peer_keepalive_timer;
struct work_struct peer_keepalive_work;
};
@@ -295,7 +295,7 @@ struct rxrpc_peer {
struct hlist_head error_targets; /* targets for net error distribution */
struct work_struct error_distributor;
struct rb_root service_conns; /* Service connections */
- struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */
+ struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
time64_t last_tx_at; /* Last time packet sent here */
seqlock_t service_conn_lock;
spinlock_t lock; /* access lock */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index f6734d8cb01a..9486293fef5c 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -415,7 +415,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
bool rxrpc_queue_call(struct rxrpc_call *call)
{
const void *here = __builtin_return_address(0);
- int n = __atomic_add_unless(&call->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&call->usage, 1, 0);
if (n == 0)
return false;
if (rxrpc_queue_work(&call->processor))
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 8229a52c2acd..3fde001fcc39 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -136,7 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
}
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
rxrpc_tx_fail_call_final_resend);
@@ -245,7 +245,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
return -EAGAIN;
}
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
_leave(" = 0");
return 0;
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 4c77a78a252a..77440a356b14 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -266,7 +266,7 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn)
bool rxrpc_queue_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
- int n = __atomic_add_unless(&conn->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
if (n == 0)
return false;
if (rxrpc_queue_work(&conn->processor))
@@ -309,7 +309,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
const void *here = __builtin_return_address(0);
if (conn) {
- int n = __atomic_add_unless(&conn->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
if (n > 0)
trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
else
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index b493e6b62740..777c3ed4cfc0 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -305,7 +305,7 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
const void *here = __builtin_return_address(0);
if (local) {
- int n = __atomic_add_unless(&local->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&local->usage, 1, 0);
if (n > 0)
trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
else
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 5d6a773db973..417d80867c4f 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
hash_init(rxnet->peer_hash);
spin_lock_init(&rxnet->peer_hash_lock);
for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
- INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
- INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
+ INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
+ INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
timer_setup(&rxnet->peer_keepalive_timer,
rxrpc_peer_keepalive_timeout, 0);
INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
- rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
+ rxnet->peer_keepalive_base = ktime_get_seconds();
ret = -ENOMEM;
rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index f03de1c59ba3..4774c8f5634d 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
now = ktime_get_real();
if (ping)
call->ping_time = now;
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_fail_call_ack);
@@ -296,7 +296,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
ret = kernel_sendmsg(conn->params.local->socket,
&msg, iov, 1, sizeof(pkt));
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_fail_call_abort);
@@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
* message and update the peer record
*/
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem);
if (ret < 0)
@@ -457,7 +457,7 @@ send_fragmentable:
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 2, len);
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
opt = IP_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -475,7 +475,7 @@ send_fragmentable:
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 2, len);
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
opt = IPV6_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket,
@@ -599,6 +599,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
rxrpc_tx_fail_version_keepalive);
- peer->last_tx_at = ktime_get_real();
+ peer->last_tx_at = ktime_get_seconds();
_leave("");
}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 0ed8b651cec2..4f9da2f51c69 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
}
/*
- * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ * Perform keep-alive pings.
*/
-void rxrpc_peer_keepalive_worker(struct work_struct *work)
+static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+ struct list_head *collector,
+ time64_t base,
+ u8 cursor)
{
- struct rxrpc_net *rxnet =
- container_of(work, struct rxrpc_net, peer_keepalive_work);
struct rxrpc_peer *peer;
- unsigned long delay;
- ktime_t base, now = ktime_get_real();
- s64 diff;
- u8 cursor, slot;
+ const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
+ time64_t keepalive_at;
+ int slot;
- base = rxnet->peer_keepalive_base;
- cursor = rxnet->peer_keepalive_cursor;
+ spin_lock_bh(&rxnet->peer_hash_lock);
- _enter("%u,%lld", cursor, ktime_sub(now, base));
+ while (!list_empty(collector)) {
+ peer = list_entry(collector->next,
+ struct rxrpc_peer, keepalive_link);
-next_bucket:
- diff = ktime_to_ns(ktime_sub(now, base));
- if (diff < 0)
- goto resched;
+ list_del_init(&peer->keepalive_link);
+ if (!rxrpc_get_peer_maybe(peer))
+ continue;
- _debug("at %u", cursor);
- spin_lock_bh(&rxnet->peer_hash_lock);
-next_peer:
- if (!rxnet->live) {
spin_unlock_bh(&rxnet->peer_hash_lock);
- goto out;
- }
- /* Everything in the bucket at the cursor is processed this second; the
- * bucket at cursor + 1 goes now + 1s and so on...
- */
- if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
- if (hlist_empty(&rxnet->peer_keepalive_new)) {
- spin_unlock_bh(&rxnet->peer_hash_lock);
- goto emptied_bucket;
+ keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+ slot = keepalive_at - base;
+ _debug("%02x peer %u t=%d {%pISp}",
+ cursor, peer->debug_id, slot, &peer->srx.transport);
+
+ if (keepalive_at <= base ||
+ keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
+ rxrpc_send_keepalive(peer);
+ slot = RXRPC_KEEPALIVE_TIME;
}
- hlist_move_list(&rxnet->peer_keepalive_new,
- &rxnet->peer_keepalive[cursor]);
+ /* A transmission to this peer occurred since last we examined
+ * it so put it into the appropriate future bucket.
+ */
+ slot += cursor;
+ slot &= mask;
+ spin_lock_bh(&rxnet->peer_hash_lock);
+ list_add_tail(&peer->keepalive_link,
+ &rxnet->peer_keepalive[slot & mask]);
+ rxrpc_put_peer(peer);
}
- peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
- struct rxrpc_peer, keepalive_link);
- hlist_del_init(&peer->keepalive_link);
- if (!rxrpc_get_peer_maybe(peer))
- goto next_peer;
-
spin_unlock_bh(&rxnet->peer_hash_lock);
+}
- _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
+/*
+ * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ */
+void rxrpc_peer_keepalive_worker(struct work_struct *work)
+{
+ struct rxrpc_net *rxnet =
+ container_of(work, struct rxrpc_net, peer_keepalive_work);
+ const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
+ time64_t base, now, delay;
+ u8 cursor, stop;
+ LIST_HEAD(collector);
-recalc:
- diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
- if (diff < -30 || diff > 30)
- goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
- diff += RXRPC_KEEPALIVE_TIME - 1;
- if (diff < 0)
- goto send;
+ now = ktime_get_seconds();
+ base = rxnet->peer_keepalive_base;
+ cursor = rxnet->peer_keepalive_cursor;
+ _enter("%lld,%u", base - now, cursor);
- slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
- if (slot == 0)
- goto send;
+ if (!rxnet->live)
+ return;
- /* A transmission to this peer occurred since last we examined it so
- * put it into the appropriate future bucket.
+ /* Remove to a temporary list all the peers that are currently lodged
+ * in expired buckets plus all new peers.
+ *
+ * Everything in the bucket at the cursor is processed this
+ * second; the bucket at cursor + 1 goes at now + 1s and so
+ * on...
*/
- slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
spin_lock_bh(&rxnet->peer_hash_lock);
- hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
- rxrpc_put_peer(peer);
- goto next_peer;
-
-send:
- rxrpc_send_keepalive(peer);
- now = ktime_get_real();
- goto recalc;
+ list_splice_init(&rxnet->peer_keepalive_new, &collector);
+
+ stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
+ while (base <= now && (s8)(cursor - stop) < 0) {
+ list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
+ &collector);
+ base++;
+ cursor++;
+ }
-emptied_bucket:
- cursor++;
- if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
- cursor = 0;
- base = ktime_add_ns(base, NSEC_PER_SEC);
- goto next_bucket;
+ base = now;
+ spin_unlock_bh(&rxnet->peer_hash_lock);
-resched:
rxnet->peer_keepalive_base = base;
rxnet->peer_keepalive_cursor = cursor;
- delay = nsecs_to_jiffies(-diff) + 1;
- timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
-out:
+ rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
+ ASSERT(list_empty(&collector));
+
+ /* Schedule the timer for the next occupied timeslot. */
+ cursor = rxnet->peer_keepalive_cursor;
+ stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
+ for (; (s8)(cursor - stop) < 0; cursor++) {
+ if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
+ break;
+ base++;
+ }
+
+ now = ktime_get_seconds();
+ delay = base - now;
+ if (delay < 1)
+ delay = 1;
+ delay *= HZ;
+ if (rxnet->live)
+ timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
+
_leave("");
}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1b7e8107b3ae..1dc7648e3eff 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
if (!peer) {
peer = prealloc;
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
- hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
+ list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
}
spin_unlock(&rxnet->peer_hash_lock);
@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
if (!peer) {
hash_add_rcu(rxnet->peer_hash,
&candidate->hash_link, hash_key);
- hlist_add_head(&candidate->keepalive_link,
- &rxnet->peer_keepalive_new);
+ list_add_tail(&candidate->keepalive_link,
+ &rxnet->peer_keepalive_new);
}
spin_unlock_bh(&rxnet->peer_hash_lock);
@@ -406,7 +406,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
const void *here = __builtin_return_address(0);
if (peer) {
- int n = __atomic_add_unless(&peer->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
if (n > 0)
trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
else
@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
spin_lock_bh(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link);
- hlist_del_init(&peer->keepalive_link);
+ list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock);
kfree_rcu(peer, rcu);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 278ac0807a60..47cb019c521a 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -669,7 +669,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
return -EAGAIN;
}
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
_leave(" = 0");
return 0;
}
@@ -725,7 +725,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
return -EAGAIN;
}
- conn->params.peer->last_tx_at = ktime_get_real();
+ conn->params.peer->last_tx_at = ktime_get_seconds();
_leave(" = 0");
return 0;
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 05e4ffe5aabd..e7de5f282722 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1122,6 +1122,8 @@ static void smc_tcp_listen_work(struct work_struct *work)
sock_hold(lsk); /* sock_put in smc_listen_work */
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
smc_copy_sock_settings_to_smc(new_smc);
+ new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
+ new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
sock_hold(&new_smc->sk); /* sock_put in passive closing */
if (!schedule_work(&new_smc->smc_listen_work))
sock_put(&new_smc->sk);
@@ -1397,8 +1399,7 @@ static int smc_shutdown(struct socket *sock, int how)
lock_sock(sk);
rc = -ENOTCONN;
- if ((sk->sk_state != SMC_LISTEN) &&
- (sk->sk_state != SMC_ACTIVE) &&
+ if ((sk->sk_state != SMC_ACTIVE) &&
(sk->sk_state != SMC_PEERCLOSEWAIT1) &&
(sk->sk_state != SMC_PEERCLOSEWAIT2) &&
(sk->sk_state != SMC_APPCLOSEWAIT1) &&
@@ -1521,12 +1522,16 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
smc = smc_sk(sock->sk);
conn = &smc->conn;
+ lock_sock(&smc->sk);
if (smc->use_fallback) {
- if (!smc->clcsock)
+ if (!smc->clcsock) {
+ release_sock(&smc->sk);
return -EBADF;
- return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+ }
+ answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+ release_sock(&smc->sk);
+ return answ;
}
- lock_sock(&smc->sk);
switch (cmd) {
case SIOCINQ: /* same as FIONREAD */
if (smc->sk.sk_state == SMC_LISTEN) {
diff --git a/net/socket.c b/net/socket.c
index 8c24d5dc4bc8..792f0313ea91 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -388,39 +388,20 @@ static struct file_system_type sock_fs_type = {
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
{
- struct qstr name = { .name = "" };
- struct path path;
struct file *file;
- if (dname) {
- name.name = dname;
- name.len = strlen(name.name);
- } else if (sock->sk) {
- name.name = sock->sk->sk_prot_creator->name;
- name.len = strlen(name.name);
- }
- path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
- if (unlikely(!path.dentry)) {
- sock_release(sock);
- return ERR_PTR(-ENOMEM);
- }
- path.mnt = mntget(sock_mnt);
-
- d_instantiate(path.dentry, SOCK_INODE(sock));
+ if (!dname)
+ dname = sock->sk ? sock->sk->sk_prot_creator->name : "";
- file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
- &socket_file_ops);
+ file = alloc_file_pseudo(SOCK_INODE(sock), sock_mnt, dname,
+ O_RDWR | (flags & O_NONBLOCK),
+ &socket_file_ops);
if (IS_ERR(file)) {
- /* drop dentry, keep inode for a bit */
- ihold(d_inode(path.dentry));
- path_put(&path);
- /* ... and now kill it properly */
sock_release(sock);
return file;
}
sock->file = file;
- file->f_flags = O_RDWR | (flags & O_NONBLOCK);
file->private_data = sock;
return file;
}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index a7f6964c3a4b..62199cf5a56c 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -123,15 +123,13 @@ void tipc_net_finalize(struct net *net, u32 addr)
{
struct tipc_net *tn = tipc_net(net);
- spin_lock_bh(&tn->node_list_lock);
- if (!tipc_own_addr(net)) {
+ if (!cmpxchg(&tn->node_addr, 0, addr)) {
tipc_set_node_addr(net, addr);
tipc_named_reinit(net);
tipc_sk_reinit(net);
tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
TIPC_CLUSTER_SCOPE, 0, addr);
}
- spin_unlock_bh(&tn->node_list_lock);
}
void tipc_net_stop(struct net *net)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index c1076c19b858..ab27a2872935 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
return transport->shutdown(vsock_sk(sk), mode);
}
-void vsock_pending_work(struct work_struct *work)
+static void vsock_pending_work(struct work_struct *work)
{
struct sock *sk;
struct sock *listener;
struct vsock_sock *vsk;
bool cleanup;
- vsk = container_of(work, struct vsock_sock, dwork.work);
+ vsk = container_of(work, struct vsock_sock, pending_work.work);
sk = sk_vsock(vsk);
listener = vsk->listener;
cleanup = true;
@@ -498,7 +498,6 @@ out:
sock_put(sk);
sock_put(listener);
}
-EXPORT_SYMBOL_GPL(vsock_pending_work);
/**** SOCKET OPERATIONS ****/
@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
return retval;
}
+static void vsock_connect_timeout(struct work_struct *work);
+
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net,
vsk->sent_request = false;
vsk->ignore_connecting_rst = false;
vsk->peer_shutdown = 0;
+ INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
+ INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
psk = parent ? vsock_sk(parent) : NULL;
if (parent) {
@@ -1117,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work)
struct vsock_sock *vsk;
int cancel = 0;
- vsk = container_of(work, struct vsock_sock, dwork.work);
+ vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk);
lock_sock(sk);
@@ -1221,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
* timeout fires.
*/
sock_hold(sk);
- INIT_DELAYED_WORK(&vsk->dwork,
- vsock_connect_timeout);
- schedule_delayed_work(&vsk->dwork, timeout);
+ schedule_delayed_work(&vsk->connect_work, timeout);
/* Skip ahead to preserve error code set above. */
goto out_wait;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index a7a73ffe675b..cb332adb84cd 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
vpending->listener = sk;
sock_hold(sk);
sock_hold(pending);
- INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
- schedule_delayed_work(&vpending->dwork, HZ);
+ schedule_delayed_work(&vpending->pending_work, HZ);
out:
return err;
diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
index 303e9e7161f3..4938dcbaecbf 100644
--- a/samples/bpf/xdp_redirect_cpu_kern.c
+++ b/samples/bpf/xdp_redirect_cpu_kern.c
@@ -14,7 +14,7 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
-#define MAX_CPUS 12 /* WARNING - sync with _user.c */
+#define MAX_CPUS 64 /* WARNING - sync with _user.c */
/* Special map type that can XDP_REDIRECT frames to another CPU */
struct bpf_map_def SEC("maps") cpu_map = {
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index f6efaefd485b..4b4d78fffe30 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -19,7 +19,7 @@ static const char *__doc__ =
#include <arpa/inet.h>
#include <linux/if_link.h>
-#define MAX_CPUS 12 /* WARNING - sync with _kern.c */
+#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
/* How many xdp_progs are defined in _kern.c */
#define MAX_PROG 5
@@ -527,7 +527,7 @@ static void stress_cpumap(void)
* procedure.
*/
create_cpu_entry(1, 1024, 0, false);
- create_cpu_entry(1, 128, 0, false);
+ create_cpu_entry(1, 8, 0, false);
create_cpu_entry(1, 16000, 0, false);
}
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index b593b36ccff8..38b2b4818e8e 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -14,10 +14,6 @@ ifdef CONFIG_UBSAN_ALIGNMENT
CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
endif
-ifdef CONFIG_UBSAN_NULL
- CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
-endif
-
# -fsanitize=* options makes GCC less smart than usual and
# increase number of 'maybe-uninitialized false-positives
CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
diff --git a/security/Kconfig b/security/Kconfig
index c4302067a3ad..afa91c6f06bb 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -57,7 +57,7 @@ config SECURITY_NETWORK
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
default y
- depends on X86_64 && !UML
+ depends on X86 && !UML
help
This feature reduces the number of hardware side channels by
ensuring that the majority of kernel addresses are not mapped
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 74f17376202b..8b8b70620bbe 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -395,7 +395,7 @@ static int apparmor_inode_getattr(const struct path *path)
return common_perm_cond(OP_GETATTR, path, AA_MAY_GETATTR);
}
-static int apparmor_file_open(struct file *file, const struct cred *cred)
+static int apparmor_file_open(struct file *file)
{
struct aa_file_ctx *fctx = file_ctx(file);
struct aa_label *label;
@@ -414,7 +414,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
return 0;
}
- label = aa_get_newest_cred_label(cred);
+ label = aa_get_newest_cred_label(file->f_cred);
if (!unconfined(label)) {
struct inode *inode = file_inode(file);
struct path_cond cond = { inode->i_uid, inode->i_mode };
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 354bb5716ce3..e4c1a236976c 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -238,7 +238,7 @@ int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int opened);
+ int xattr_len);
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
@@ -254,7 +254,7 @@ static inline int ima_appraise_measurement(enum ima_hooks func,
struct file *file,
const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int opened)
+ int xattr_len)
{
return INTEGRITY_UNKNOWN;
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 8bd7a0733e51..deec1804a00a 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -212,7 +212,7 @@ int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int opened)
+ int xattr_len)
{
static const char op[] = "appraise_data";
const char *cause = "unknown";
@@ -231,7 +231,7 @@ int ima_appraise_measurement(enum ima_hooks func,
cause = iint->flags & IMA_DIGSIG_REQUIRED ?
"IMA-signature-required" : "missing-hash";
status = INTEGRITY_NOLABEL;
- if (opened & FILE_CREATED)
+ if (file->f_mode & FMODE_CREATED)
iint->flags |= IMA_NEW_FILE;
if ((iint->flags & IMA_NEW_FILE) &&
(!(iint->flags & IMA_DIGSIG_REQUIRED) ||
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index dca44cf7838e..b286f37712d5 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -168,7 +168,7 @@ void ima_file_free(struct file *file)
static int process_measurement(struct file *file, const struct cred *cred,
u32 secid, char *buf, loff_t size, int mask,
- enum ima_hooks func, int opened)
+ enum ima_hooks func)
{
struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint = NULL;
@@ -294,7 +294,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
inode_lock(inode);
rc = ima_appraise_measurement(func, iint, file, pathname,
- xattr_value, xattr_len, opened);
+ xattr_value, xattr_len);
inode_unlock(inode);
}
if (action & IMA_AUDIT)
@@ -338,7 +338,7 @@ int ima_file_mmap(struct file *file, unsigned long prot)
if (file && (prot & PROT_EXEC)) {
security_task_getsecid(current, &secid);
return process_measurement(file, current_cred(), secid, NULL,
- 0, MAY_EXEC, MMAP_CHECK, 0);
+ 0, MAY_EXEC, MMAP_CHECK);
}
return 0;
@@ -364,13 +364,13 @@ int ima_bprm_check(struct linux_binprm *bprm)
security_task_getsecid(current, &secid);
ret = process_measurement(bprm->file, current_cred(), secid, NULL, 0,
- MAY_EXEC, BPRM_CHECK, 0);
+ MAY_EXEC, BPRM_CHECK);
if (ret)
return ret;
security_cred_getsecid(bprm->cred, &secid);
return process_measurement(bprm->file, bprm->cred, secid, NULL, 0,
- MAY_EXEC, CREDS_CHECK, 0);
+ MAY_EXEC, CREDS_CHECK);
}
/**
@@ -383,14 +383,14 @@ int ima_bprm_check(struct linux_binprm *bprm)
* On success return 0. On integrity appraisal error, assuming the file
* is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
-int ima_file_check(struct file *file, int mask, int opened)
+int ima_file_check(struct file *file, int mask)
{
u32 secid;
security_task_getsecid(current, &secid);
return process_measurement(file, current_cred(), secid, NULL, 0,
mask & (MAY_READ | MAY_WRITE | MAY_EXEC |
- MAY_APPEND), FILE_CHECK, opened);
+ MAY_APPEND), FILE_CHECK);
}
EXPORT_SYMBOL_GPL(ima_file_check);
@@ -493,7 +493,7 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size,
func = read_idmap[read_id] ?: FILE_CHECK;
security_task_getsecid(current, &secid);
return process_measurement(file, current_cred(), secid, buf, size,
- MAY_READ, func, 0);
+ MAY_READ, func);
}
static int __init init_ima(void)
diff --git a/security/security.c b/security/security.c
index 68f46d849abe..5dce67070cdf 100644
--- a/security/security.c
+++ b/security/security.c
@@ -970,11 +970,11 @@ int security_file_receive(struct file *file)
return call_int_hook(file_receive, 0, file);
}
-int security_file_open(struct file *file, const struct cred *cred)
+int security_file_open(struct file *file)
{
int ret;
- ret = call_int_hook(file_open, 0, file, cred);
+ ret = call_int_hook(file_open, 0, file);
if (ret)
return ret;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2b5ee5fbd652..18006be15713 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3862,7 +3862,7 @@ static int selinux_file_receive(struct file *file)
return file_has_perm(cred, file, file_to_av(file));
}
-static int selinux_file_open(struct file *file, const struct cred *cred)
+static int selinux_file_open(struct file *file)
{
struct file_security_struct *fsec;
struct inode_security_struct *isec;
@@ -3886,7 +3886,7 @@ static int selinux_file_open(struct file *file, const struct cred *cred)
* new inode label or new policy.
* This check is not redundant - do not remove.
*/
- return file_path_has_perm(cred, file, open_file_to_av(file));
+ return file_path_has_perm(file->f_cred, file, open_file_to_av(file));
}
/* task security operations */
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 19de675d4504..9ab8097dab7c 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1927,9 +1927,9 @@ static int smack_file_receive(struct file *file)
*
* Returns 0
*/
-static int smack_file_open(struct file *file, const struct cred *cred)
+static int smack_file_open(struct file *file)
{
- struct task_smack *tsp = cred->security;
+ struct task_smack *tsp = file->f_cred->security;
struct inode *inode = file_inode(file);
struct smk_audit_info ad;
int rc;
@@ -1937,7 +1937,7 @@ static int smack_file_open(struct file *file, const struct cred *cred)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad);
- rc = smk_bu_credfile(cred, file, MAY_READ, rc);
+ rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc);
return rc;
}
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 213b8c593668..9f932e2d6852 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -320,7 +320,7 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_file_open(struct file *f, const struct cred *cred)
+static int tomoyo_file_open(struct file *f)
{
int flags = f->f_flags;
/* Don't check read permission here if called from do_execve(). */
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..5072cbd15c82
--- /dev/null
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define __ARCH_WANT_RENAMEAT
+
+#include <asm-generic/unistd.h>
diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
index fc0df353ff0d..87245c584784 100644
--- a/tools/arch/parisc/include/uapi/asm/errno.h
+++ b/tools/arch/parisc/include/uapi/asm/errno.h
@@ -113,7 +113,6 @@
#define ELOOP 249 /* Too many symbolic links encountered */
#define ENOSYS 251 /* Function not implemented */
-#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
new file mode 100644
index 000000000000..42990676a55e
--- /dev/null
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -0,0 +1,783 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#include <asm/bitsperlong.h>
+
+/*
+ * This file contains the system call numbers, based on the
+ * layout of the x86-64 architecture, which embeds the
+ * pointer to the syscall in the table.
+ *
+ * As a basic principle, no duplication of functionality
+ * should be added, e.g. we don't use lseek when llseek
+ * is present. New architectures should use this file
+ * and implement the less feature-full calls in user space.
+ */
+
+#ifndef __SYSCALL
+#define __SYSCALL(x, y)
+#endif
+
+#if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)
+#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32)
+#else
+#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
+#endif
+
+#ifdef __SYSCALL_COMPAT
+#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _comp)
+#define __SC_COMP_3264(_nr, _32, _64, _comp) __SYSCALL(_nr, _comp)
+#else
+#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _sys)
+#define __SC_COMP_3264(_nr, _32, _64, _comp) __SC_3264(_nr, _32, _64)
+#endif
+
+#define __NR_io_setup 0
+__SC_COMP(__NR_io_setup, sys_io_setup, compat_sys_io_setup)
+#define __NR_io_destroy 1
+__SYSCALL(__NR_io_destroy, sys_io_destroy)
+#define __NR_io_submit 2
+__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
+#define __NR_io_cancel 3
+__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#define __NR_io_getevents 4
+__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
+
+/* fs/xattr.c */
+#define __NR_setxattr 5
+__SYSCALL(__NR_setxattr, sys_setxattr)
+#define __NR_lsetxattr 6
+__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
+#define __NR_fsetxattr 7
+__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
+#define __NR_getxattr 8
+__SYSCALL(__NR_getxattr, sys_getxattr)
+#define __NR_lgetxattr 9
+__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
+#define __NR_fgetxattr 10
+__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
+#define __NR_listxattr 11
+__SYSCALL(__NR_listxattr, sys_listxattr)
+#define __NR_llistxattr 12
+__SYSCALL(__NR_llistxattr, sys_llistxattr)
+#define __NR_flistxattr 13
+__SYSCALL(__NR_flistxattr, sys_flistxattr)
+#define __NR_removexattr 14
+__SYSCALL(__NR_removexattr, sys_removexattr)
+#define __NR_lremovexattr 15
+__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
+#define __NR_fremovexattr 16
+__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
+
+/* fs/dcache.c */
+#define __NR_getcwd 17
+__SYSCALL(__NR_getcwd, sys_getcwd)
+
+/* fs/cookies.c */
+#define __NR_lookup_dcookie 18
+__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
+
+/* fs/eventfd.c */
+#define __NR_eventfd2 19
+__SYSCALL(__NR_eventfd2, sys_eventfd2)
+
+/* fs/eventpoll.c */
+#define __NR_epoll_create1 20
+__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
+#define __NR_epoll_ctl 21
+__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
+#define __NR_epoll_pwait 22
+__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
+
+/* fs/fcntl.c */
+#define __NR_dup 23
+__SYSCALL(__NR_dup, sys_dup)
+#define __NR_dup3 24
+__SYSCALL(__NR_dup3, sys_dup3)
+#define __NR3264_fcntl 25
+__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
+
+/* fs/inotify_user.c */
+#define __NR_inotify_init1 26
+__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_inotify_add_watch 27
+__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
+#define __NR_inotify_rm_watch 28
+__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+
+/* fs/ioctl.c */
+#define __NR_ioctl 29
+__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
+
+/* fs/ioprio.c */
+#define __NR_ioprio_set 30
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
+#define __NR_ioprio_get 31
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
+
+/* fs/locks.c */
+#define __NR_flock 32
+__SYSCALL(__NR_flock, sys_flock)
+
+/* fs/namei.c */
+#define __NR_mknodat 33
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_mkdirat 34
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_unlinkat 35
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_symlinkat 36
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_linkat 37
+__SYSCALL(__NR_linkat, sys_linkat)
+#ifdef __ARCH_WANT_RENAMEAT
+/* renameat is superseded with flags by renameat2 */
+#define __NR_renameat 38
+__SYSCALL(__NR_renameat, sys_renameat)
+#endif /* __ARCH_WANT_RENAMEAT */
+
+/* fs/namespace.c */
+#define __NR_umount2 39
+__SYSCALL(__NR_umount2, sys_umount)
+#define __NR_mount 40
+__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
+#define __NR_pivot_root 41
+__SYSCALL(__NR_pivot_root, sys_pivot_root)
+
+/* fs/nfsctl.c */
+#define __NR_nfsservctl 42
+__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
+
+/* fs/open.c */
+#define __NR3264_statfs 43
+__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
+ compat_sys_statfs64)
+#define __NR3264_fstatfs 44
+__SC_COMP_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs, \
+ compat_sys_fstatfs64)
+#define __NR3264_truncate 45
+__SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
+ compat_sys_truncate64)
+#define __NR3264_ftruncate 46
+__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
+ compat_sys_ftruncate64)
+
+#define __NR_fallocate 47
+__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
+#define __NR_faccessat 48
+__SYSCALL(__NR_faccessat, sys_faccessat)
+#define __NR_chdir 49
+__SYSCALL(__NR_chdir, sys_chdir)
+#define __NR_fchdir 50
+__SYSCALL(__NR_fchdir, sys_fchdir)
+#define __NR_chroot 51
+__SYSCALL(__NR_chroot, sys_chroot)
+#define __NR_fchmod 52
+__SYSCALL(__NR_fchmod, sys_fchmod)
+#define __NR_fchmodat 53
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_fchownat 54
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_fchown 55
+__SYSCALL(__NR_fchown, sys_fchown)
+#define __NR_openat 56
+__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
+#define __NR_close 57
+__SYSCALL(__NR_close, sys_close)
+#define __NR_vhangup 58
+__SYSCALL(__NR_vhangup, sys_vhangup)
+
+/* fs/pipe.c */
+#define __NR_pipe2 59
+__SYSCALL(__NR_pipe2, sys_pipe2)
+
+/* fs/quota.c */
+#define __NR_quotactl 60
+__SYSCALL(__NR_quotactl, sys_quotactl)
+
+/* fs/readdir.c */
+#define __NR_getdents64 61
+__SYSCALL(__NR_getdents64, sys_getdents64)
+
+/* fs/read_write.c */
+#define __NR3264_lseek 62
+__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
+#define __NR_read 63
+__SYSCALL(__NR_read, sys_read)
+#define __NR_write 64
+__SYSCALL(__NR_write, sys_write)
+#define __NR_readv 65
+__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
+#define __NR_writev 66
+__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
+#define __NR_pread64 67
+__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
+#define __NR_pwrite64 68
+__SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
+#define __NR_preadv 69
+__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
+#define __NR_pwritev 70
+__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
+
+/* fs/sendfile.c */
+#define __NR3264_sendfile 71
+__SYSCALL(__NR3264_sendfile, sys_sendfile64)
+
+/* fs/select.c */
+#define __NR_pselect6 72
+__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
+#define __NR_ppoll 73
+__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
+
+/* fs/signalfd.c */
+#define __NR_signalfd4 74
+__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
+
+/* fs/splice.c */
+#define __NR_vmsplice 75
+__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
+#define __NR_splice 76
+__SYSCALL(__NR_splice, sys_splice)
+#define __NR_tee 77
+__SYSCALL(__NR_tee, sys_tee)
+
+/* fs/stat.c */
+#define __NR_readlinkat 78
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR3264_fstatat 79
+__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
+#define __NR3264_fstat 80
+__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
+
+/* fs/sync.c */
+#define __NR_sync 81
+__SYSCALL(__NR_sync, sys_sync)
+#define __NR_fsync 82
+__SYSCALL(__NR_fsync, sys_fsync)
+#define __NR_fdatasync 83
+__SYSCALL(__NR_fdatasync, sys_fdatasync)
+#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
+#define __NR_sync_file_range2 84
+__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
+ compat_sys_sync_file_range2)
+#else
+#define __NR_sync_file_range 84
+__SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
+ compat_sys_sync_file_range)
+#endif
+
+/* fs/timerfd.c */
+#define __NR_timerfd_create 85
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#define __NR_timerfd_settime 86
+__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
+ compat_sys_timerfd_settime)
+#define __NR_timerfd_gettime 87
+__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
+ compat_sys_timerfd_gettime)
+
+/* fs/utimes.c */
+#define __NR_utimensat 88
+__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
+
+/* kernel/acct.c */
+#define __NR_acct 89
+__SYSCALL(__NR_acct, sys_acct)
+
+/* kernel/capability.c */
+#define __NR_capget 90
+__SYSCALL(__NR_capget, sys_capget)
+#define __NR_capset 91
+__SYSCALL(__NR_capset, sys_capset)
+
+/* kernel/exec_domain.c */
+#define __NR_personality 92
+__SYSCALL(__NR_personality, sys_personality)
+
+/* kernel/exit.c */
+#define __NR_exit 93
+__SYSCALL(__NR_exit, sys_exit)
+#define __NR_exit_group 94
+__SYSCALL(__NR_exit_group, sys_exit_group)
+#define __NR_waitid 95
+__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
+
+/* kernel/fork.c */
+#define __NR_set_tid_address 96
+__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
+#define __NR_unshare 97
+__SYSCALL(__NR_unshare, sys_unshare)
+
+/* kernel/futex.c */
+#define __NR_futex 98
+__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
+#define __NR_set_robust_list 99
+__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
+ compat_sys_set_robust_list)
+#define __NR_get_robust_list 100
+__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
+ compat_sys_get_robust_list)
+
+/* kernel/hrtimer.c */
+#define __NR_nanosleep 101
+__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
+
+/* kernel/itimer.c */
+#define __NR_getitimer 102
+__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
+#define __NR_setitimer 103
+__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
+
+/* kernel/kexec.c */
+#define __NR_kexec_load 104
+__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
+
+/* kernel/module.c */
+#define __NR_init_module 105
+__SYSCALL(__NR_init_module, sys_init_module)
+#define __NR_delete_module 106
+__SYSCALL(__NR_delete_module, sys_delete_module)
+
+/* kernel/posix-timers.c */
+#define __NR_timer_create 107
+__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+#define __NR_timer_gettime 108
+__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
+#define __NR_timer_getoverrun 109
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_settime 110
+__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
+#define __NR_timer_delete 111
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 112
+__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
+#define __NR_clock_gettime 113
+__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
+#define __NR_clock_getres 114
+__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
+#define __NR_clock_nanosleep 115
+__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
+ compat_sys_clock_nanosleep)
+
+/* kernel/printk.c */
+#define __NR_syslog 116
+__SYSCALL(__NR_syslog, sys_syslog)
+
+/* kernel/ptrace.c */
+#define __NR_ptrace 117
+__SYSCALL(__NR_ptrace, sys_ptrace)
+
+/* kernel/sched/core.c */
+#define __NR_sched_setparam 118
+__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
+#define __NR_sched_setscheduler 119
+__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
+#define __NR_sched_getscheduler 120
+__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
+#define __NR_sched_getparam 121
+__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
+#define __NR_sched_setaffinity 122
+__SC_COMP(__NR_sched_setaffinity, sys_sched_setaffinity, \
+ compat_sys_sched_setaffinity)
+#define __NR_sched_getaffinity 123
+__SC_COMP(__NR_sched_getaffinity, sys_sched_getaffinity, \
+ compat_sys_sched_getaffinity)
+#define __NR_sched_yield 124
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_sched_get_priority_max 125
+__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
+#define __NR_sched_get_priority_min 126
+__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#define __NR_sched_rr_get_interval 127
+__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
+ compat_sys_sched_rr_get_interval)
+
+/* kernel/signal.c */
+#define __NR_restart_syscall 128
+__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
+#define __NR_kill 129
+__SYSCALL(__NR_kill, sys_kill)
+#define __NR_tkill 130
+__SYSCALL(__NR_tkill, sys_tkill)
+#define __NR_tgkill 131
+__SYSCALL(__NR_tgkill, sys_tgkill)
+#define __NR_sigaltstack 132
+__SC_COMP(__NR_sigaltstack, sys_sigaltstack, compat_sys_sigaltstack)
+#define __NR_rt_sigsuspend 133
+__SC_COMP(__NR_rt_sigsuspend, sys_rt_sigsuspend, compat_sys_rt_sigsuspend)
+#define __NR_rt_sigaction 134
+__SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
+#define __NR_rt_sigprocmask 135
+__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
+#define __NR_rt_sigpending 136
+__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+#define __NR_rt_sigtimedwait 137
+__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
+ compat_sys_rt_sigtimedwait)
+#define __NR_rt_sigqueueinfo 138
+__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
+ compat_sys_rt_sigqueueinfo)
+#define __NR_rt_sigreturn 139
+__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
+
+/* kernel/sys.c */
+#define __NR_setpriority 140
+__SYSCALL(__NR_setpriority, sys_setpriority)
+#define __NR_getpriority 141
+__SYSCALL(__NR_getpriority, sys_getpriority)
+#define __NR_reboot 142
+__SYSCALL(__NR_reboot, sys_reboot)
+#define __NR_setregid 143
+__SYSCALL(__NR_setregid, sys_setregid)
+#define __NR_setgid 144
+__SYSCALL(__NR_setgid, sys_setgid)
+#define __NR_setreuid 145
+__SYSCALL(__NR_setreuid, sys_setreuid)
+#define __NR_setuid 146
+__SYSCALL(__NR_setuid, sys_setuid)
+#define __NR_setresuid 147
+__SYSCALL(__NR_setresuid, sys_setresuid)
+#define __NR_getresuid 148
+__SYSCALL(__NR_getresuid, sys_getresuid)
+#define __NR_setresgid 149
+__SYSCALL(__NR_setresgid, sys_setresgid)
+#define __NR_getresgid 150
+__SYSCALL(__NR_getresgid, sys_getresgid)
+#define __NR_setfsuid 151
+__SYSCALL(__NR_setfsuid, sys_setfsuid)
+#define __NR_setfsgid 152
+__SYSCALL(__NR_setfsgid, sys_setfsgid)
+#define __NR_times 153
+__SC_COMP(__NR_times, sys_times, compat_sys_times)
+#define __NR_setpgid 154
+__SYSCALL(__NR_setpgid, sys_setpgid)
+#define __NR_getpgid 155
+__SYSCALL(__NR_getpgid, sys_getpgid)
+#define __NR_getsid 156
+__SYSCALL(__NR_getsid, sys_getsid)
+#define __NR_setsid 157
+__SYSCALL(__NR_setsid, sys_setsid)
+#define __NR_getgroups 158
+__SYSCALL(__NR_getgroups, sys_getgroups)
+#define __NR_setgroups 159
+__SYSCALL(__NR_setgroups, sys_setgroups)
+#define __NR_uname 160
+__SYSCALL(__NR_uname, sys_newuname)
+#define __NR_sethostname 161
+__SYSCALL(__NR_sethostname, sys_sethostname)
+#define __NR_setdomainname 162
+__SYSCALL(__NR_setdomainname, sys_setdomainname)
+#define __NR_getrlimit 163
+__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
+#define __NR_setrlimit 164
+__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
+#define __NR_getrusage 165
+__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
+#define __NR_umask 166
+__SYSCALL(__NR_umask, sys_umask)
+#define __NR_prctl 167
+__SYSCALL(__NR_prctl, sys_prctl)
+#define __NR_getcpu 168
+__SYSCALL(__NR_getcpu, sys_getcpu)
+
+/* kernel/time.c */
+#define __NR_gettimeofday 169
+__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
+#define __NR_settimeofday 170
+__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
+#define __NR_adjtimex 171
+__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
+
+/* kernel/timer.c */
+#define __NR_getpid 172
+__SYSCALL(__NR_getpid, sys_getpid)
+#define __NR_getppid 173
+__SYSCALL(__NR_getppid, sys_getppid)
+#define __NR_getuid 174
+__SYSCALL(__NR_getuid, sys_getuid)
+#define __NR_geteuid 175
+__SYSCALL(__NR_geteuid, sys_geteuid)
+#define __NR_getgid 176
+__SYSCALL(__NR_getgid, sys_getgid)
+#define __NR_getegid 177
+__SYSCALL(__NR_getegid, sys_getegid)
+#define __NR_gettid 178
+__SYSCALL(__NR_gettid, sys_gettid)
+#define __NR_sysinfo 179
+__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
+
+/* ipc/mqueue.c */
+#define __NR_mq_open 180
+__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
+#define __NR_mq_unlink 181
+__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#define __NR_mq_timedsend 182
+__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
+#define __NR_mq_timedreceive 183
+__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
+ compat_sys_mq_timedreceive)
+#define __NR_mq_notify 184
+__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
+#define __NR_mq_getsetattr 185
+__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
+
+/* ipc/msg.c */
+#define __NR_msgget 186
+__SYSCALL(__NR_msgget, sys_msgget)
+#define __NR_msgctl 187
+__SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
+#define __NR_msgrcv 188
+__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
+#define __NR_msgsnd 189
+__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
+
+/* ipc/sem.c */
+#define __NR_semget 190
+__SYSCALL(__NR_semget, sys_semget)
+#define __NR_semctl 191
+__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+#define __NR_semtimedop 192
+__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
+#define __NR_semop 193
+__SYSCALL(__NR_semop, sys_semop)
+
+/* ipc/shm.c */
+#define __NR_shmget 194
+__SYSCALL(__NR_shmget, sys_shmget)
+#define __NR_shmctl 195
+__SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
+#define __NR_shmat 196
+__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
+#define __NR_shmdt 197
+__SYSCALL(__NR_shmdt, sys_shmdt)
+
+/* net/socket.c */
+#define __NR_socket 198
+__SYSCALL(__NR_socket, sys_socket)
+#define __NR_socketpair 199
+__SYSCALL(__NR_socketpair, sys_socketpair)
+#define __NR_bind 200
+__SYSCALL(__NR_bind, sys_bind)
+#define __NR_listen 201
+__SYSCALL(__NR_listen, sys_listen)
+#define __NR_accept 202
+__SYSCALL(__NR_accept, sys_accept)
+#define __NR_connect 203
+__SYSCALL(__NR_connect, sys_connect)
+#define __NR_getsockname 204
+__SYSCALL(__NR_getsockname, sys_getsockname)
+#define __NR_getpeername 205
+__SYSCALL(__NR_getpeername, sys_getpeername)
+#define __NR_sendto 206
+__SYSCALL(__NR_sendto, sys_sendto)
+#define __NR_recvfrom 207
+__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
+#define __NR_setsockopt 208
+__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
+#define __NR_getsockopt 209
+__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
+#define __NR_shutdown 210
+__SYSCALL(__NR_shutdown, sys_shutdown)
+#define __NR_sendmsg 211
+__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
+#define __NR_recvmsg 212
+__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
+
+/* mm/filemap.c */
+#define __NR_readahead 213
+__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
+
+/* mm/nommu.c, also with MMU */
+#define __NR_brk 214
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_munmap 215
+__SYSCALL(__NR_munmap, sys_munmap)
+#define __NR_mremap 216
+__SYSCALL(__NR_mremap, sys_mremap)
+
+/* security/keys/keyctl.c */
+#define __NR_add_key 217
+__SYSCALL(__NR_add_key, sys_add_key)
+#define __NR_request_key 218
+__SYSCALL(__NR_request_key, sys_request_key)
+#define __NR_keyctl 219
+__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
+
+/* arch/example/kernel/sys_example.c */
+#define __NR_clone 220
+__SYSCALL(__NR_clone, sys_clone)
+#define __NR_execve 221
+__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
+
+#define __NR3264_mmap 222
+__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
+/* mm/fadvise.c */
+#define __NR3264_fadvise64 223
+__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
+
+/* mm/, CONFIG_MMU only */
+#ifndef __ARCH_NOMMU
+#define __NR_swapon 224
+__SYSCALL(__NR_swapon, sys_swapon)
+#define __NR_swapoff 225
+__SYSCALL(__NR_swapoff, sys_swapoff)
+#define __NR_mprotect 226
+__SYSCALL(__NR_mprotect, sys_mprotect)
+#define __NR_msync 227
+__SYSCALL(__NR_msync, sys_msync)
+#define __NR_mlock 228
+__SYSCALL(__NR_mlock, sys_mlock)
+#define __NR_munlock 229
+__SYSCALL(__NR_munlock, sys_munlock)
+#define __NR_mlockall 230
+__SYSCALL(__NR_mlockall, sys_mlockall)
+#define __NR_munlockall 231
+__SYSCALL(__NR_munlockall, sys_munlockall)
+#define __NR_mincore 232
+__SYSCALL(__NR_mincore, sys_mincore)
+#define __NR_madvise 233
+__SYSCALL(__NR_madvise, sys_madvise)
+#define __NR_remap_file_pages 234
+__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
+#define __NR_mbind 235
+__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
+#define __NR_get_mempolicy 236
+__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
+#define __NR_set_mempolicy 237
+__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
+#define __NR_migrate_pages 238
+__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
+#define __NR_move_pages 239
+__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
+#endif
+
+#define __NR_rt_tgsigqueueinfo 240
+__SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
+ compat_sys_rt_tgsigqueueinfo)
+#define __NR_perf_event_open 241
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_accept4 242
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_recvmmsg 243
+__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
+
+/*
+ * Architectures may provide up to 16 syscalls of their own
+ * starting with this value.
+ */
+#define __NR_arch_specific_syscall 244
+
+#define __NR_wait4 260
+__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
+#define __NR_prlimit64 261
+__SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_fanotify_init 262
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 263
+__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
+#define __NR_name_to_handle_at 264
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at 265
+__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
+ compat_sys_open_by_handle_at)
+#define __NR_clock_adjtime 266
+__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
+#define __NR_syncfs 267
+__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_setns 268
+__SYSCALL(__NR_setns, sys_setns)
+#define __NR_sendmmsg 269
+__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+ compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+ compat_sys_process_vm_writev)
+#define __NR_kcmp 272
+__SYSCALL(__NR_kcmp, sys_kcmp)
+#define __NR_finit_module 273
+__SYSCALL(__NR_finit_module, sys_finit_module)
+#define __NR_sched_setattr 274
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+#define __NR_sched_getattr 275
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
+#define __NR_seccomp 277
+__SYSCALL(__NR_seccomp, sys_seccomp)
+#define __NR_getrandom 278
+__SYSCALL(__NR_getrandom, sys_getrandom)
+#define __NR_memfd_create 279
+__SYSCALL(__NR_memfd_create, sys_memfd_create)
+#define __NR_bpf 280
+__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 281
+__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 282
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 283
+__SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 284
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 285
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 286
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 287
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 288
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 289
+__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
+#define __NR_pkey_free 290
+__SYSCALL(__NR_pkey_free, sys_pkey_free)
+#define __NR_statx 291
+__SYSCALL(__NR_statx, sys_statx)
+#define __NR_io_pgetevents 292
+__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+
+#undef __NR_syscalls
+#define __NR_syscalls 293
+
+/*
+ * 32 bit systems traditionally used different
+ * syscalls for off_t and loff_t arguments, while
+ * 64 bit systems only need the off_t version.
+ * For new 32 bit platforms, there is no need to
+ * implement the old 32 bit off_t syscalls, so
+ * they take different names.
+ * Here we map the numbers so that both versions
+ * use the same syscall table layout.
+ */
+#if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT)
+#define __NR_fcntl __NR3264_fcntl
+#define __NR_statfs __NR3264_statfs
+#define __NR_fstatfs __NR3264_fstatfs
+#define __NR_truncate __NR3264_truncate
+#define __NR_ftruncate __NR3264_ftruncate
+#define __NR_lseek __NR3264_lseek
+#define __NR_sendfile __NR3264_sendfile
+#define __NR_newfstatat __NR3264_fstatat
+#define __NR_fstat __NR3264_fstat
+#define __NR_mmap __NR3264_mmap
+#define __NR_fadvise64 __NR3264_fadvise64
+#ifdef __NR3264_stat
+#define __NR_stat __NR3264_stat
+#define __NR_lstat __NR3264_lstat
+#endif
+#else
+#define __NR_fcntl64 __NR3264_fcntl
+#define __NR_statfs64 __NR3264_statfs
+#define __NR_fstatfs64 __NR3264_fstatfs
+#define __NR_truncate64 __NR3264_truncate
+#define __NR_ftruncate64 __NR3264_ftruncate
+#define __NR_llseek __NR3264_lseek
+#define __NR_sendfile64 __NR3264_sendfile
+#define __NR_fstatat64 __NR3264_fstatat
+#define __NR_fstat64 __NR3264_fstat
+#define __NR_mmap2 __NR3264_mmap
+#define __NR_fadvise64_64 __NR3264_fadvise64
+#ifdef __NR3264_stat
+#define __NR_stat64 __NR3264_stat
+#define __NR_lstat64 __NR3264_lstat
+#endif
+#endif
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
new file mode 100644
index 000000000000..48e8a225b985
--- /dev/null
+++ b/tools/include/uapi/linux/in.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the Internet Protocol.
+ *
+ * Version: @(#)in.h 1.0.1 04/21/93
+ *
+ * Authors: Original taken from the GNU Project <netinet/in.h> file.
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_LINUX_IN_H
+#define _UAPI_LINUX_IN_H
+
+#include <linux/types.h>
+#include <linux/libc-compat.h>
+#include <linux/socket.h>
+
+#if __UAPI_DEF_IN_IPPROTO
+/* Standard well-defined IP protocols. */
+enum {
+ IPPROTO_IP = 0, /* Dummy protocol for TCP */
+#define IPPROTO_IP IPPROTO_IP
+ IPPROTO_ICMP = 1, /* Internet Control Message Protocol */
+#define IPPROTO_ICMP IPPROTO_ICMP
+ IPPROTO_IGMP = 2, /* Internet Group Management Protocol */
+#define IPPROTO_IGMP IPPROTO_IGMP
+ IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */
+#define IPPROTO_IPIP IPPROTO_IPIP
+ IPPROTO_TCP = 6, /* Transmission Control Protocol */
+#define IPPROTO_TCP IPPROTO_TCP
+ IPPROTO_EGP = 8, /* Exterior Gateway Protocol */
+#define IPPROTO_EGP IPPROTO_EGP
+ IPPROTO_PUP = 12, /* PUP protocol */
+#define IPPROTO_PUP IPPROTO_PUP
+ IPPROTO_UDP = 17, /* User Datagram Protocol */
+#define IPPROTO_UDP IPPROTO_UDP
+ IPPROTO_IDP = 22, /* XNS IDP protocol */
+#define IPPROTO_IDP IPPROTO_IDP
+ IPPROTO_TP = 29, /* SO Transport Protocol Class 4 */
+#define IPPROTO_TP IPPROTO_TP
+ IPPROTO_DCCP = 33, /* Datagram Congestion Control Protocol */
+#define IPPROTO_DCCP IPPROTO_DCCP
+ IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */
+#define IPPROTO_IPV6 IPPROTO_IPV6
+ IPPROTO_RSVP = 46, /* RSVP Protocol */
+#define IPPROTO_RSVP IPPROTO_RSVP
+ IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */
+#define IPPROTO_GRE IPPROTO_GRE
+ IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */
+#define IPPROTO_ESP IPPROTO_ESP
+ IPPROTO_AH = 51, /* Authentication Header protocol */
+#define IPPROTO_AH IPPROTO_AH
+ IPPROTO_MTP = 92, /* Multicast Transport Protocol */
+#define IPPROTO_MTP IPPROTO_MTP
+ IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
+#define IPPROTO_BEETPH IPPROTO_BEETPH
+ IPPROTO_ENCAP = 98, /* Encapsulation Header */
+#define IPPROTO_ENCAP IPPROTO_ENCAP
+ IPPROTO_PIM = 103, /* Protocol Independent Multicast */
+#define IPPROTO_PIM IPPROTO_PIM
+ IPPROTO_COMP = 108, /* Compression Header Protocol */
+#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
+#define IPPROTO_SCTP IPPROTO_SCTP
+ IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
+#define IPPROTO_UDPLITE IPPROTO_UDPLITE
+ IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */
+#define IPPROTO_MPLS IPPROTO_MPLS
+ IPPROTO_RAW = 255, /* Raw IP packets */
+#define IPPROTO_RAW IPPROTO_RAW
+ IPPROTO_MAX
+};
+#endif
+
+#if __UAPI_DEF_IN_ADDR
+/* Internet address. */
+struct in_addr {
+ __be32 s_addr;
+};
+#endif
+
+#define IP_TOS 1
+#define IP_TTL 2
+#define IP_HDRINCL 3
+#define IP_OPTIONS 4
+#define IP_ROUTER_ALERT 5
+#define IP_RECVOPTS 6
+#define IP_RETOPTS 7
+#define IP_PKTINFO 8
+#define IP_PKTOPTIONS 9
+#define IP_MTU_DISCOVER 10
+#define IP_RECVERR 11
+#define IP_RECVTTL 12
+#define IP_RECVTOS 13
+#define IP_MTU 14
+#define IP_FREEBIND 15
+#define IP_IPSEC_POLICY 16
+#define IP_XFRM_POLICY 17
+#define IP_PASSSEC 18
+#define IP_TRANSPARENT 19
+
+/* BSD compatibility */
+#define IP_RECVRETOPTS IP_RETOPTS
+
+/* TProxy original addresses */
+#define IP_ORIGDSTADDR 20
+#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
+
+#define IP_MINTTL 21
+#define IP_NODEFRAG 22
+#define IP_CHECKSUM 23
+#define IP_BIND_ADDRESS_NO_PORT 24
+#define IP_RECVFRAGSIZE 25
+
+/* IP_MTU_DISCOVER values */
+#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
+#define IP_PMTUDISC_WANT 1 /* Use per route hints */
+#define IP_PMTUDISC_DO 2 /* Always DF */
+#define IP_PMTUDISC_PROBE 3 /* Ignore dst pmtu */
+/* Always use interface mtu (ignores dst pmtu) but don't set DF flag.
+ * Also incoming ICMP frag_needed notifications will be ignored on
+ * this socket to prevent accepting spoofed ones.
+ */
+#define IP_PMTUDISC_INTERFACE 4
+/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+ * fragmented if they exeed the interface mtu
+ */
+#define IP_PMTUDISC_OMIT 5
+
+#define IP_MULTICAST_IF 32
+#define IP_MULTICAST_TTL 33
+#define IP_MULTICAST_LOOP 34
+#define IP_ADD_MEMBERSHIP 35
+#define IP_DROP_MEMBERSHIP 36
+#define IP_UNBLOCK_SOURCE 37
+#define IP_BLOCK_SOURCE 38
+#define IP_ADD_SOURCE_MEMBERSHIP 39
+#define IP_DROP_SOURCE_MEMBERSHIP 40
+#define IP_MSFILTER 41
+#define MCAST_JOIN_GROUP 42
+#define MCAST_BLOCK_SOURCE 43
+#define MCAST_UNBLOCK_SOURCE 44
+#define MCAST_LEAVE_GROUP 45
+#define MCAST_JOIN_SOURCE_GROUP 46
+#define MCAST_LEAVE_SOURCE_GROUP 47
+#define MCAST_MSFILTER 48
+#define IP_MULTICAST_ALL 49
+#define IP_UNICAST_IF 50
+
+#define MCAST_EXCLUDE 0
+#define MCAST_INCLUDE 1
+
+/* These need to appear somewhere around here */
+#define IP_DEFAULT_MULTICAST_TTL 1
+#define IP_DEFAULT_MULTICAST_LOOP 1
+
+/* Request struct for multicast socket ops */
+
+#if __UAPI_DEF_IP_MREQ
+struct ip_mreq {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+struct ip_mreqn {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_address; /* local IP address of interface */
+ int imr_ifindex; /* Interface index */
+};
+
+struct ip_mreq_source {
+ __be32 imr_multiaddr;
+ __be32 imr_interface;
+ __be32 imr_sourceaddr;
+};
+
+struct ip_msfilter {
+ __be32 imsf_multiaddr;
+ __be32 imsf_interface;
+ __u32 imsf_fmode;
+ __u32 imsf_numsrc;
+ __be32 imsf_slist[1];
+};
+
+#define IP_MSFILTER_SIZE(numsrc) \
+ (sizeof(struct ip_msfilter) - sizeof(__u32) \
+ + (numsrc) * sizeof(__u32))
+
+struct group_req {
+ __u32 gr_interface; /* interface index */
+ struct __kernel_sockaddr_storage gr_group; /* group address */
+};
+
+struct group_source_req {
+ __u32 gsr_interface; /* interface index */
+ struct __kernel_sockaddr_storage gsr_group; /* group address */
+ struct __kernel_sockaddr_storage gsr_source; /* source address */
+};
+
+struct group_filter {
+ __u32 gf_interface; /* interface index */
+ struct __kernel_sockaddr_storage gf_group; /* multicast address */
+ __u32 gf_fmode; /* filter mode */
+ __u32 gf_numsrc; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+};
+
+#define GROUP_FILTER_SIZE(numsrc) \
+ (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \
+ + (numsrc) * sizeof(struct __kernel_sockaddr_storage))
+#endif
+
+#if __UAPI_DEF_IN_PKTINFO
+struct in_pktinfo {
+ int ipi_ifindex;
+ struct in_addr ipi_spec_dst;
+ struct in_addr ipi_addr;
+};
+#endif
+
+/* Structure describing an Internet (IP) socket address. */
+#if __UAPI_DEF_SOCKADDR_IN
+#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
+struct sockaddr_in {
+ __kernel_sa_family_t sin_family; /* Address family */
+ __be16 sin_port; /* Port number */
+ struct in_addr sin_addr; /* Internet address */
+
+ /* Pad to size of `struct sockaddr'. */
+ unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) -
+ sizeof(unsigned short int) - sizeof(struct in_addr)];
+};
+#define sin_zero __pad /* for BSD UNIX comp. -FvK */
+#endif
+
+#if __UAPI_DEF_IN_CLASS
+/*
+ * Definitions of the bits in an Internet address integer.
+ * On subnets, host and network parts are found according
+ * to the subnet mask, not these masks.
+ */
+#define IN_CLASSA(a) ((((long int) (a)) & 0x80000000) == 0)
+#define IN_CLASSA_NET 0xff000000
+#define IN_CLASSA_NSHIFT 24
+#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET)
+#define IN_CLASSA_MAX 128
+
+#define IN_CLASSB(a) ((((long int) (a)) & 0xc0000000) == 0x80000000)
+#define IN_CLASSB_NET 0xffff0000
+#define IN_CLASSB_NSHIFT 16
+#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET)
+#define IN_CLASSB_MAX 65536
+
+#define IN_CLASSC(a) ((((long int) (a)) & 0xe0000000) == 0xc0000000)
+#define IN_CLASSC_NET 0xffffff00
+#define IN_CLASSC_NSHIFT 8
+#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET)
+
+#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
+#define IN_MULTICAST(a) IN_CLASSD(a)
+#define IN_MULTICAST_NET 0xF0000000
+
+#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define IN_BADCLASS(a) IN_EXPERIMENTAL((a))
+
+/* Address to accept any incoming messages. */
+#define INADDR_ANY ((unsigned long int) 0x00000000)
+
+/* Address to send to all hosts. */
+#define INADDR_BROADCAST ((unsigned long int) 0xffffffff)
+
+/* Address indicating an error return. */
+#define INADDR_NONE ((unsigned long int) 0xffffffff)
+
+/* Network number for local host loopback. */
+#define IN_LOOPBACKNET 127
+
+/* Address to loopback in software to local host. */
+#define INADDR_LOOPBACK 0x7f000001 /* 127.0.0.1 */
+#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
+
+/* Defines for Multicast INADDR */
+#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
+#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
+#endif
+
+/* <asm/byteorder.h> contains the htonl type stuff.. */
+#include <asm/byteorder.h>
+
+
+#endif /* _UAPI_LINUX_IN_H */
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 2d270c560df3..c36a3a76986a 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: LGPL-2.1
/* Copyright (c) 2018 Facebook */
#include <stdlib.h>
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index e2a09a155f84..caac3a404dc5 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: LGPL-2.1 */
/* Copyright (c) 2018 Facebook */
#ifndef __BPF_BTF_H
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index 1b09f3175a1f..0cbd1ef8f86d 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -804,7 +804,7 @@ type of fence:
Second, some types of fence affect the way the memory subsystem
propagates stores. When a fence instruction is executed on CPU C:
- For each other CPU C', smb_wmb() forces all po-earlier stores
+ For each other CPU C', smp_wmb() forces all po-earlier stores
on C to propagate to C' before any po-later stores do.
For each other CPU C', any store which propagates to C before
diff --git a/tools/memory-model/Documentation/recipes.txt b/tools/memory-model/Documentation/recipes.txt
index ee4309a87fc4..af72700cc20a 100644
--- a/tools/memory-model/Documentation/recipes.txt
+++ b/tools/memory-model/Documentation/recipes.txt
@@ -126,7 +126,7 @@ However, it is not necessarily the case that accesses ordered by
locking will be seen as ordered by CPUs not holding that lock.
Consider this example:
- /* See Z6.0+pooncelock+pooncelock+pombonce.litmus. */
+ /* See Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus. */
void CPU0(void)
{
spin_lock(&mylock);
@@ -292,7 +292,7 @@ and to use smp_load_acquire() instead of smp_rmb(). However, the older
smp_wmb() and smp_rmb() APIs are still heavily used, so it is important
to understand their use cases. The general approach is shown below:
- /* See MP+wmbonceonce+rmbonceonce.litmus. */
+ /* See MP+fencewmbonceonce+fencermbonceonce.litmus. */
void CPU0(void)
{
WRITE_ONCE(x, 1);
@@ -322,9 +322,9 @@ the following write-side code fragment:
And the xlog_valid_lsn() function in fs/xfs/xfs_log_priv.h contains
the corresponding read-side code fragment:
- cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
+ cur_cycle = READ_ONCE(log->l_curr_cycle);
smp_rmb();
- cur_block = ACCESS_ONCE(log->l_curr_block);
+ cur_block = READ_ONCE(log->l_curr_block);
Alternatively, consider the following comment in function
perf_output_put_handle() in kernel/events/ring_buffer.c:
@@ -360,7 +360,7 @@ can be seen in the LB+poonceonces.litmus litmus test.
One way of avoiding the counter-intuitive outcome is through the use of a
control dependency paired with a full memory barrier:
- /* See LB+ctrlonceonce+mbonceonce.litmus. */
+ /* See LB+fencembonceonce+ctrlonceonce.litmus. */
void CPU0(void)
{
r0 = READ_ONCE(x);
@@ -476,7 +476,7 @@ that one CPU first stores to one variable and then loads from a second,
while another CPU stores to the second variable and then loads from the
first. Preserving order requires nothing less than full barriers:
- /* See SB+mbonceonces.litmus. */
+ /* See SB+fencembonceonces.litmus. */
void CPU0(void)
{
WRITE_ONCE(x, 1);
diff --git a/tools/memory-model/README b/tools/memory-model/README
index 734f7feaa5dc..ee987ce20aae 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -35,13 +35,13 @@ BASIC USAGE: HERD7
The memory model is used, in conjunction with "herd7", to exhaustively
explore the state space of small litmus tests.
-For example, to run SB+mbonceonces.litmus against the memory model:
+For example, to run SB+fencembonceonces.litmus against the memory model:
- $ herd7 -conf linux-kernel.cfg litmus-tests/SB+mbonceonces.litmus
+ $ herd7 -conf linux-kernel.cfg litmus-tests/SB+fencembonceonces.litmus
Here is the corresponding output:
- Test SB+mbonceonces Allowed
+ Test SB+fencembonceonces Allowed
States 3
0:r0=0; 1:r0=1;
0:r0=1; 1:r0=0;
@@ -50,8 +50,8 @@ Here is the corresponding output:
Witnesses
Positive: 0 Negative: 3
Condition exists (0:r0=0 /\ 1:r0=0)
- Observation SB+mbonceonces Never 0 3
- Time SB+mbonceonces 0.01
+ Observation SB+fencembonceonces Never 0 3
+ Time SB+fencembonceonces 0.01
Hash=d66d99523e2cac6b06e66f4c995ebb48
The "Positive: 0 Negative: 3" and the "Never 0 3" each indicate that
@@ -67,16 +67,16 @@ BASIC USAGE: KLITMUS7
The "klitmus7" tool converts a litmus test into a Linux kernel module,
which may then be loaded and run.
-For example, to run SB+mbonceonces.litmus against hardware:
+For example, to run SB+fencembonceonces.litmus against hardware:
$ mkdir mymodules
- $ klitmus7 -o mymodules litmus-tests/SB+mbonceonces.litmus
+ $ klitmus7 -o mymodules litmus-tests/SB+fencembonceonces.litmus
$ cd mymodules ; make
$ sudo sh run.sh
The corresponding output includes:
- Test SB+mbonceonces Allowed
+ Test SB+fencembonceonces Allowed
Histogram (3 states)
644580 :>0:r0=1; 1:r0=0;
644328 :>0:r0=0; 1:r0=1;
@@ -86,8 +86,8 @@ The corresponding output includes:
Positive: 0, Negative: 2000000
Condition exists (0:r0=0 /\ 1:r0=0) is NOT validated
Hash=d66d99523e2cac6b06e66f4c995ebb48
- Observation SB+mbonceonces Never 0 2000000
- Time SB+mbonceonces 0.16
+ Observation SB+fencembonceonces Never 0 2000000
+ Time SB+fencembonceonces 0.16
The "Positive: 0 Negative: 2000000" and the "Never 0 2000000" indicate
that during two million trials, the state specified in this litmus
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index 64f5740e0e75..b84fb2f67109 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -13,7 +13,7 @@
"Linux-kernel memory consistency model"
-enum Accesses = 'once (*READ_ONCE,WRITE_ONCE,ACCESS_ONCE*) ||
+enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
'release (*smp_store_release*) ||
'acquire (*smp_load_acquire*) ||
'noreturn (* R of non-return RMW *)
diff --git a/tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus b/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus
index 98a3716efa37..e729d2776e89 100644
--- a/tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus
+++ b/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus
@@ -1,4 +1,4 @@
-C IRIW+mbonceonces+OnceOnce
+C IRIW+fencembonceonces+OnceOnce
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
index 7a39a0aaa976..0f749e419b34 100644
--- a/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
+++ b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
@@ -1,4 +1,4 @@
-C ISA2+pooncelock+pooncelock+pombonce.litmus
+C ISA2+pooncelock+pooncelock+pombonce
(*
* Result: Sometimes
diff --git a/tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus b/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus
index de6708229dd1..4727f5aaf03b 100644
--- a/tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus
+++ b/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus
@@ -1,4 +1,4 @@
-C LB+ctrlonceonce+mbonceonce
+C LB+fencembonceonce+ctrlonceonce
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/MP+wmbonceonce+rmbonceonce.litmus b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
index c078f38ff27a..a273da9faa6d 100644
--- a/tools/memory-model/litmus-tests/MP+wmbonceonce+rmbonceonce.litmus
+++ b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
@@ -1,4 +1,4 @@
-C MP+wmbonceonce+rmbonceonce
+C MP+fencewmbonceonce+fencermbonceonce
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/R+mbonceonces.litmus b/tools/memory-model/litmus-tests/R+fencembonceonces.litmus
index a0e884ad2132..222a0b850b4a 100644
--- a/tools/memory-model/litmus-tests/R+mbonceonces.litmus
+++ b/tools/memory-model/litmus-tests/R+fencembonceonces.litmus
@@ -1,4 +1,4 @@
-C R+mbonceonces
+C R+fencembonceonces
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/README b/tools/memory-model/litmus-tests/README
index 17eb9a8c222d..4581ec2d3c57 100644
--- a/tools/memory-model/litmus-tests/README
+++ b/tools/memory-model/litmus-tests/README
@@ -18,7 +18,7 @@ CoWW+poonceonce.litmus
Test of write-write coherence, that is, whether or not two
successive writes to the same variable are ordered.
-IRIW+mbonceonces+OnceOnce.litmus
+IRIW+fencembonceonces+OnceOnce.litmus
Test of independent reads from independent writes with smp_mb()
between each pairs of reads. In other words, is smp_mb()
sufficient to cause two different reading processes to agree on
@@ -47,7 +47,7 @@ ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus
Can a release-acquire chain order a prior store against
a later load?
-LB+ctrlonceonce+mbonceonce.litmus
+LB+fencembonceonce+ctrlonceonce.litmus
Does a control dependency and an smp_mb() suffice for the
load-buffering litmus test, where each process reads from one
of two variables then writes to the other?
@@ -88,14 +88,14 @@ MP+porevlocks.litmus
As below, but with the first access of the writer process
and the second access of reader process protected by a lock.
-MP+wmbonceonce+rmbonceonce.litmus
+MP+fencewmbonceonce+fencermbonceonce.litmus
Does a smp_wmb() (between the stores) and an smp_rmb() (between
the loads) suffice for the message-passing litmus test, where one
process writes data and then a flag, and the other process reads
the flag and then the data. (This is similar to the ISA2 tests,
but with two processes instead of three.)
-R+mbonceonces.litmus
+R+fencembonceonces.litmus
This is the fully ordered (via smp_mb()) version of one of
the classic counterintuitive litmus tests that illustrates the
effects of store propagation delays.
@@ -103,7 +103,7 @@ R+mbonceonces.litmus
R+poonceonces.litmus
As above, but without the smp_mb() invocations.
-SB+mbonceonces.litmus
+SB+fencembonceonces.litmus
This is the fully ordered (again, via smp_mb() version of store
buffering, which forms the core of Dekker's mutual-exclusion
algorithm.
@@ -111,15 +111,24 @@ SB+mbonceonces.litmus
SB+poonceonces.litmus
As above, but without the smp_mb() invocations.
+SB+rfionceonce-poonceonces.litmus
+ This litmus test demonstrates that LKMM is not fully multicopy
+ atomic. (Neither is it other multicopy atomic.) This litmus test
+ also demonstrates the "locations" debugging aid, which designates
+ additional registers and locations to be printed out in the dump
+ of final states in the herd7 output. Without the "locations"
+ statement, only those registers and locations mentioned in the
+ "exists" clause will be printed.
+
S+poonceonces.litmus
As below, but without the smp_wmb() and acquire load.
-S+wmbonceonce+poacquireonce.litmus
+S+fencewmbonceonce+poacquireonce.litmus
Can a smp_wmb(), instead of a release, and an acquire order
a prior store against a subsequent store?
WRC+poonceonces+Once.litmus
-WRC+pooncerelease+rmbonceonce+Once.litmus
+WRC+pooncerelease+fencermbonceonce+Once.litmus
These two are members of an extension of the MP litmus-test
class in which the first write is moved to a separate process.
The second is forbidden because smp_store_release() is
@@ -134,7 +143,7 @@ Z6.0+pooncelock+poonceLock+pombonce.litmus
As above, but with smp_mb__after_spinlock() immediately
following the spin_lock().
-Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus
+Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
Is the ordering provided by a release-acquire chain sufficient
to make ordering apparent to accesses by a process that does
not participate in that release-acquire chain?
diff --git a/tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus b/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus
index c53350205d28..18479823cd6c 100644
--- a/tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus
+++ b/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus
@@ -1,4 +1,4 @@
-C S+wmbonceonce+poacquireonce
+C S+fencewmbonceonce+poacquireonce
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/SB+mbonceonces.litmus b/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus
index 74b874ffa8da..ed5fff18d223 100644
--- a/tools/memory-model/litmus-tests/SB+mbonceonces.litmus
+++ b/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus
@@ -1,4 +1,4 @@
-C SB+mbonceonces
+C SB+fencembonceonces
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus
new file mode 100644
index 000000000000..04a16603660b
--- /dev/null
+++ b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus
@@ -0,0 +1,32 @@
+C SB+rfionceonce-poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test demonstrates that LKMM is not fully multicopy atomic.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+ int r1;
+ int r2;
+
+ WRITE_ONCE(*x, 1);
+ r1 = READ_ONCE(*x);
+ r2 = READ_ONCE(*y);
+}
+
+P1(int *x, int *y)
+{
+ int r3;
+ int r4;
+
+ WRITE_ONCE(*y, 1);
+ r3 = READ_ONCE(*y);
+ r4 = READ_ONCE(*x);
+}
+
+locations [0:r1; 1:r3; x; y] (* Debug aid: Print things not in "exists". *)
+exists (0:r2=0 /\ 1:r4=0)
diff --git a/tools/memory-model/litmus-tests/WRC+pooncerelease+rmbonceonce+Once.litmus b/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus
index ad3448b941e6..e9947250d7de 100644
--- a/tools/memory-model/litmus-tests/WRC+pooncerelease+rmbonceonce+Once.litmus
+++ b/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus
@@ -1,4 +1,4 @@
-C WRC+pooncerelease+rmbonceonce+Once
+C WRC+pooncerelease+fencermbonceonce+Once
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus b/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
index a20fc3fafb53..88e70b87a683 100644
--- a/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus
+++ b/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
@@ -1,4 +1,4 @@
-C Z6.0+pooncerelease+poacquirerelease+mbonceonce
+C Z6.0+pooncerelease+poacquirerelease+fencembonceonce
(*
* Result: Sometimes
diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh
index af0aa15ab84e..ca528f9a24d4 100644..100755
--- a/tools/memory-model/scripts/checkalllitmus.sh
+++ b/tools/memory-model/scripts/checkalllitmus.sh
@@ -9,7 +9,7 @@
# appended.
#
# Usage:
-# sh checkalllitmus.sh [ directory ]
+# checkalllitmus.sh [ directory ]
#
# The LINUX_HERD_OPTIONS environment variable may be used to specify
# arguments to herd, whose default is defined by the checklitmus.sh script.
diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh
index e2e477472844..bf12a75c0719 100644..100755
--- a/tools/memory-model/scripts/checklitmus.sh
+++ b/tools/memory-model/scripts/checklitmus.sh
@@ -8,7 +8,7 @@
# with ".out" appended.
#
# Usage:
-# sh checklitmus.sh file.litmus
+# checklitmus.sh file.litmus
#
# The LINUX_HERD_OPTIONS environment variable may be used to specify
# arguments to herd, which default to "-conf linux-kernel.cfg". Thus,
diff --git a/tools/objtool/arch/x86/include/asm/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
index 9c9dc579bd7d..46f516dd80ce 100644
--- a/tools/objtool/arch/x86/include/asm/orc_types.h
+++ b/tools/objtool/arch/x86/include/asm/orc_types.h
@@ -88,6 +88,7 @@ struct orc_entry {
unsigned sp_reg:4;
unsigned bp_reg:4;
unsigned type:2;
+ unsigned end:1;
} __packed;
/*
@@ -101,6 +102,7 @@ struct unwind_hint {
s16 sp_offset;
u8 sp_reg;
u8 type;
+ u8 end;
};
#endif /* __ASSEMBLY__ */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index f4a25bd1871f..2928939b98ec 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1157,6 +1157,7 @@ static int read_unwind_hints(struct objtool_file *file)
cfa->offset = hint->sp_offset;
insn->state.type = hint->type;
+ insn->state.end = hint->end;
}
return 0;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index c6b68fcb926f..95700a2bcb7c 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -31,7 +31,7 @@ struct insn_state {
int stack_size;
unsigned char type;
bool bp_scratch;
- bool drap;
+ bool drap, end;
int drap_reg, drap_offset;
struct cfi_reg vals[CFI_NUM_REGS];
};
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index c3343820916a..faa444270ee3 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -203,7 +203,8 @@ int orc_dump(const char *_objname)
print_reg(orc[i].bp_reg, orc[i].bp_offset);
- printf(" type:%s\n", orc_type_name(orc[i].type));
+ printf(" type:%s end:%d\n",
+ orc_type_name(orc[i].type), orc[i].end);
}
elf_end(elf);
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index 18384d9be4e1..3f98dcfbc177 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -31,6 +31,8 @@ int create_orc(struct objtool_file *file)
struct cfi_reg *cfa = &insn->state.cfa;
struct cfi_reg *bp = &insn->state.regs[CFI_BP];
+ orc->end = insn->state.end;
+
if (cfa->base == CFI_UNDEFINED) {
orc->sp_reg = ORC_REG_UNDEFINED;
continue;
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 11300dbe35c5..236b9b97dfdb 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -18,6 +18,10 @@ various perf commands with the -e option.
OPTIONS
-------
+-d::
+--desc::
+Print extra event descriptions. (default)
+
--no-desc::
Don't print descriptions.
@@ -25,11 +29,13 @@ Don't print descriptions.
--long-desc::
Print longer event descriptions.
+--debug::
+Enable debugging output.
+
--details::
Print how named events are resolved internally into perf events, and also
any extra expressions computed by perf stat.
-
[[EVENT_MODIFIERS]]
EVENT MODIFIERS
---------------
@@ -234,7 +240,7 @@ perf also supports group leader sampling using the :S specifier.
perf record -e '{cycles,instructions}:S' ...
perf report --group
-Normally all events in a event group sample, but with :S only
+Normally all events in an event group sample, but with :S only
the first event (the leader) samples, and it only reads the values of the
other events in the group.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 04168da4268e..246dee081efd 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -94,7 +94,7 @@ OPTIONS
"perf report" to view group events together.
--filter=<filter>::
- Event filter. This option should follow a event selector (-e) which
+ Event filter. This option should follow an event selector (-e) which
selects either tracepoint event(s) or a hardware trace PMU
(e.g. Intel PT or CoreSight).
@@ -153,7 +153,7 @@ OPTIONS
--exclude-perf::
Don't record events issued by perf itself. This option should follow
- a event selector (-e) which selects tracepoint event(s). It adds a
+ an event selector (-e) which selects tracepoint event(s). It adds a
filter expression 'common_pid != $PERFPID' to filters. If other
'--filter' exists, the new filter expression will be combined with
them by '&&'.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index f5a3b402589e..f6d1a03c7523 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -54,6 +54,8 @@ endif
ifeq ($(SRCARCH),arm64)
NO_PERF_REGS := 0
+ NO_SYSCALL_TABLE := 0
+ CFLAGS += -I$(OUTPUT)arch/arm64/include/generated
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
endif
@@ -905,8 +907,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
-perf_include_dir = lib/include/perf
-perf_examples_dir = lib/examples/perf
+perf_include_dir = lib/perf/include
+perf_examples_dir = lib/perf/examples
sharedir = $(prefix)/share
template_dir = share/perf-core/templates
STRACE_GROUPS_DIR = share/perf-core/strace/groups
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index ecc9fc952655..b3d1b12a5081 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -384,6 +384,8 @@ export INSTALL SHELL_PATH
SHELL = $(SHELL_PATH)
+linux_uapi_dir := $(srctree)/tools/include/uapi/linux
+
beauty_outdir := $(OUTPUT)trace/beauty/generated
beauty_ioctl_outdir := $(beauty_outdir)/ioctl
drm_ioctl_array := $(beauty_ioctl_outdir)/drm_ioctl_array.c
@@ -431,6 +433,12 @@ kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh
$(kvm_ioctl_array): $(kvm_hdr_dir)/kvm.h $(kvm_ioctl_tbl)
$(Q)$(SHELL) '$(kvm_ioctl_tbl)' $(kvm_hdr_dir) > $@
+socket_ipproto_array := $(beauty_outdir)/socket_ipproto_array.c
+socket_ipproto_tbl := $(srctree)/tools/perf/trace/beauty/socket_ipproto.sh
+
+$(socket_ipproto_array): $(linux_uapi_dir)/in.h $(socket_ipproto_tbl)
+ $(Q)$(SHELL) '$(socket_ipproto_tbl)' $(linux_uapi_dir) > $@
+
vhost_virtio_ioctl_array := $(beauty_ioctl_outdir)/vhost_virtio_ioctl_array.c
vhost_virtio_hdr_dir := $(srctree)/tools/include/uapi/linux
vhost_virtio_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
@@ -566,6 +574,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(sndrv_ctl_ioctl_array) \
$(kcmp_type_array) \
$(kvm_ioctl_array) \
+ $(socket_ipproto_array) \
$(vhost_virtio_ioctl_array) \
$(madvise_behavior_array) \
$(perf_ioctl_array) \
@@ -860,6 +869,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(sndrv_pcm_ioctl_array) \
$(OUTPUT)$(kvm_ioctl_array) \
$(OUTPUT)$(kcmp_type_array) \
+ $(OUTPUT)$(socket_ipproto_array) \
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
$(OUTPUT)$(prctl_option_array) \
diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
index 91de4860faad..f013b115dc86 100644
--- a/tools/perf/arch/arm64/Makefile
+++ b/tools/perf/arch/arm64/Makefile
@@ -4,3 +4,24 @@ PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_JITDUMP := 1
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+
+#
+# Syscall table generation for perf
+#
+
+out := $(OUTPUT)arch/arm64/include/generated/asm
+header := $(out)/syscalls.c
+sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h
+sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+ $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@
+
+clean::
+ $(call QUIET_CLEAN, arm64) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
new file mode 100755
index 000000000000..52e197317d3e
--- /dev/null
+++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
@@ -0,0 +1,62 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf. Derived from
+# powerpc script.
+#
+# Copyright IBM Corp. 2017
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
+# Changed by: Kim Phillips <kim.phillips@arm.com>
+
+gcc=$1
+hostcc=$2
+input=$3
+
+if ! test -r $input; then
+ echo "Could not read input file" >&2
+ exit 1
+fi
+
+create_table_from_c()
+{
+ local sc nr last_sc
+
+ create_table_exe=`mktemp /tmp/create-table-XXXXXX`
+
+ {
+
+ cat <<-_EoHEADER
+ #include <stdio.h>
+ #define __ARCH_WANT_RENAMEAT
+ #include "$input"
+ int main(int argc, char *argv[])
+ {
+ _EoHEADER
+
+ while read sc nr; do
+ printf "%s\n" " printf(\"\\t[%d] = \\\"$sc\\\",\\n\", __NR_$sc);"
+ last_sc=$sc
+ done
+
+ printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
+ printf "}\n"
+
+ } | $hostcc -o $create_table_exe -x c -
+
+ $create_table_exe
+
+ rm -f $create_table_exe
+}
+
+create_table()
+{
+ echo "static const char *syscalltbl_arm64[] = {"
+ create_table_from_c
+ echo "};"
+}
+
+$gcc -E -dM -x c $input \
+ |sed -ne 's/^#define __NR_//p' \
+ |sort -t' ' -k2 -nu \
+ |create_table
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index ef5d59a5742e..7c6eeb4633fe 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -58,9 +58,13 @@ static int check_return_reg(int ra_regno, Dwarf_Frame *frame)
}
/*
- * Check if return address is on the stack.
+ * Check if return address is on the stack. If return address
+ * is in a register (typically R0), it is yet to be saved on
+ * the stack.
*/
- if (nops != 0 || ops != NULL)
+ if ((nops != 0 || ops != NULL) &&
+ !(nops == 1 && ops[0].atom == DW_OP_regx &&
+ ops[0].number2 == 0 && ops[0].offset == 0))
return 0;
/*
@@ -246,7 +250,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
if (!chain || chain->nr < 3)
return skip_slot;
- ip = chain->ips[2];
+ ip = chain->ips[1];
thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
index d233e2eb9592..aaabab5e2830 100644
--- a/tools/perf/arch/s390/util/kvm-stat.c
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -102,7 +102,7 @@ const char * const kvm_skip_events[] = {
int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
{
- if (strstr(cpuid, "IBM/S390")) {
+ if (strstr(cpuid, "IBM")) {
kvm->exit_reasons = sie_exit_reasons;
kvm->exit_reasons_isa = "SIE";
} else
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 6a8738f7ead3..f3aa9d02a5ab 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2193,7 +2193,7 @@ static void print_cacheline(struct c2c_hists *c2c_hists,
fprintf(out, "%s\n", bf);
fprintf(out, " -------------------------------------------------------------\n");
- hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, true);
+ hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, false);
}
static void print_pareto(FILE *out)
@@ -2268,7 +2268,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
fprintf(out, "=================================================\n");
fprintf(out, "#\n");
- hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, false);
+ hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, true);
fprintf(out, "\n");
fprintf(out, "=================================================\n");
@@ -2349,6 +2349,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
" s Toggle full length of symbol and source line columns \n"
" q Return back to cacheline list \n";
+ if (!he)
+ return 0;
+
/* Display compact version first. */
c2c.symbol_full = false;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index d660cb7b222b..39db2ee32d48 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -696,7 +696,7 @@ static void hists__process(struct hists *hists)
hists__output_resort(hists, NULL);
hists__fprintf(hists, !quiet, 0, 0, 0, stdout,
- symbol_conf.use_callchain);
+ !symbol_conf.use_callchain);
}
static void data__fprintf(void)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index c04dc7b53797..02f7a3c27761 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -478,8 +478,8 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
- symbol_conf.use_callchain ||
- symbol_conf.show_branchflag_count);
+ !(symbol_conf.use_callchain ||
+ symbol_conf.show_branchflag_count));
fprintf(stdout, "\n\n");
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 05be023c3f0e..d097b5b47eb8 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -296,18 +296,6 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
}
-/*
- * Does the counter have nsecs as a unit?
- */
-static inline int nsec_counter(struct perf_evsel *evsel)
-{
- if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
- perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
- return 1;
-
- return 0;
-}
-
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -1058,34 +1046,6 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused,
fprintf(os->fh, "%*s ", metric_only_len, unit);
}
-static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
-{
- FILE *output = stat_config.output;
- double msecs = avg / NSEC_PER_MSEC;
- const char *fmt_v, *fmt_n;
- char name[25];
-
- fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
- fmt_n = csv_output ? "%s" : "%-25s";
-
- aggr_printout(evsel, id, nr);
-
- scnprintf(name, sizeof(name), "%s%s",
- perf_evsel__name(evsel), csv_output ? "" : " (msec)");
-
- fprintf(output, fmt_v, msecs, csv_sep);
-
- if (csv_output)
- fprintf(output, "%s%s", evsel->unit, csv_sep);
- else
- fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
-
- fprintf(output, fmt_n, name);
-
- if (evsel->cgrp)
- fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
-}
-
static int first_shadow_cpu(struct perf_evsel *evsel, int id)
{
int i;
@@ -1241,11 +1201,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
return;
}
- if (metric_only)
- /* nothing */;
- else if (nsec_counter(counter))
- nsec_printout(id, nr, counter, uval);
- else
+ if (!metric_only)
abs_printout(id, nr, counter, uval);
out.print_metric = pm;
@@ -1331,7 +1287,7 @@ static void collect_all_aliases(struct perf_evsel *counter,
alias->scale != counter->scale ||
alias->cgrp != counter->cgrp ||
strcmp(alias->unit, counter->unit) ||
- nsec_counter(alias) != nsec_counter(counter))
+ perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter))
break;
alias->merged_stat = true;
cb(alias, data, false);
@@ -2449,6 +2405,18 @@ static int add_default_attributes(void)
return 0;
if (transaction_run) {
+ /* Handle -T as -M transaction. Once platform specific metrics
+ * support has been added to the json files, all archictures
+ * will use this approach. To determine transaction support
+ * on an architecture test for such a metric name.
+ */
+ if (metricgroup__has_metric("transaction")) {
+ struct option opt = { .value = &evsel_list };
+
+ return metricgroup__parse_groups(&opt, "transaction",
+ &metric_events);
+ }
+
if (pmu_have_event("cpu", "cycles-ct") &&
pmu_have_event("cpu", "el-start"))
err = parse_events(evsel_list, transaction_attrs,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ffdc2769ff9f..d21d8751e749 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -307,7 +307,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
hists__output_recalc_col_len(hists, top->print_entries - printed);
putchar('\n');
hists__fprintf(hists, false, top->print_entries - printed, win_width,
- top->min_percent, stdout, symbol_conf.use_callchain);
+ top->min_percent, stdout, !symbol_conf.use_callchain);
}
static void prompt_integer(int *target, const char *msg)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6a748eca2edb..88561eed7950 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -291,7 +291,7 @@ size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const cha
{
int idx = val - sa->offset;
- if (idx < 0 || idx >= sa->nr_entries)
+ if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL)
return scnprintf(bf, size, intfmt, val);
return scnprintf(bf, size, "%s", sa->entries[idx]);
@@ -761,10 +761,12 @@ static struct syscall_fmt {
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
{ .name = "socket",
.arg = { [0] = STRARRAY(family, socket_families),
- [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, },
+ [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
+ [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
{ .name = "socketpair",
.arg = { [0] = STRARRAY(family, socket_families),
- [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, },
+ [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
+ [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
{ .name = "stat", .alias = "newstat", },
{ .name = "statx",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
@@ -2990,6 +2992,7 @@ static int trace__parse_events_option(const struct option *opt, const char *str,
if (trace__validate_ev_qualifier(trace))
goto out;
+ trace->trace_syscalls = true;
}
err = 0;
@@ -3045,7 +3048,7 @@ int cmd_trace(int argc, const char **argv)
},
.output = stderr,
.show_comm = true,
- .trace_syscalls = true,
+ .trace_syscalls = false,
.kernel_syscallchains = false,
.max_stack = UINT_MAX,
};
@@ -3191,13 +3194,7 @@ int cmd_trace(int argc, const char **argv)
if (!trace.trace_syscalls && !trace.trace_pgfaults &&
trace.evlist->nr_entries == 0 /* Was --events used? */) {
- pr_err("Please specify something to trace.\n");
- return -1;
- }
-
- if (!trace.trace_syscalls && trace.ev_qualifier) {
- pr_err("The -e option can't be used with --no-syscalls.\n");
- goto out;
+ trace.trace_syscalls = true;
}
if (output_name != NULL) {
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 10f333e2e825..de28466c0186 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -7,6 +7,7 @@ include/uapi/drm/i915_drm.h
include/uapi/linux/fcntl.h
include/uapi/linux/kcmp.h
include/uapi/linux/kvm.h
+include/uapi/linux/in.h
include/uapi/linux/perf_event.h
include/uapi/linux/prctl.h
include/uapi/linux/sched.h
@@ -35,6 +36,7 @@ arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sie.h
arch/arm/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h
+arch/arm64/include/uapi/asm/unistd.h
arch/alpha/include/uapi/asm/errno.h
arch/mips/include/asm/errno.h
arch/mips/include/uapi/asm/errno.h
@@ -53,6 +55,7 @@ include/uapi/asm-generic/errno.h
include/uapi/asm-generic/errno-base.h
include/uapi/asm-generic/ioctls.h
include/uapi/asm-generic/mman-common.h
+include/uapi/asm-generic/unistd.h
'
check_2 () {
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index dd764ad5efdf..a63aa6241b7f 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#ifndef _PERF_BPF_H
#define _PERF_BPF_H
+
+#include <uapi/linux/bpf.h>
+
#define SEC(NAME) __attribute__((section(NAME), used))
#define probe(function, vars) \
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index d215714f48df..21bf7f5a3cf5 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -25,7 +25,9 @@ static inline unsigned long long rdclock(void)
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}
+#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 1024
+#endif
extern const char *input_name;
extern bool perf_host, perf_guest;
diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
index bc03c06c3918..752e47eb6977 100644
--- a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
@@ -12,6 +12,21 @@
"ArchStdEvent": "L1D_CACHE_REFILL_WR",
},
{
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL",
+ },
+ {
"ArchStdEvent": "L1D_TLB_REFILL_RD",
},
{
@@ -24,9 +39,75 @@
"ArchStdEvent": "L1D_TLB_WR",
},
{
+ "ArchStdEvent": "L2D_TLB_REFILL_RD",
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR",
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD",
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR",
+ },
+ {
"ArchStdEvent": "BUS_ACCESS_RD",
- },
- {
+ },
+ {
"ArchStdEvent": "BUS_ACCESS_WR",
- }
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF",
+ },
+ {
+ "ArchStdEvent": "EXC_SVC",
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ",
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ",
+ },
+ {
+ "ArchStdEvent": "EXC_SMC",
+ },
+ {
+ "ArchStdEvent": "EXC_HVC",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ",
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
index 8bf16759ca53..2dd8dafff2ef 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
@@ -1,71 +1,83 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
"BriefDescription": "Problem-State L1I Directory Writes",
"PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1I Penalty Cycles",
"PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
"BriefDescription": "Problem-State L1D Directory Writes",
"PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
index 0feedb40f30f..b6b7f29ca831 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
@@ -1,107 +1,125 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "L1I_L3_LOCAL_WRITES",
"BriefDescription": "L1I L3 Local Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1D_L3_LOCAL_WRITES",
"BriefDescription": "L1D L3 Local Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the installtion cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1I_L3_REMOTE_WRITES",
"BriefDescription": "L1I L3 Remote Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L3_REMOTE_WRITES",
"BriefDescription": "L1D L3 Remote Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_CACHELINE_INVALIDATES",
"BriefDescription": "L1I Cacheline Invalidates",
"PublicDescription": "A cache line in the Level-1 I-Cache has been invalidated by a store on the same CPU as the Level-1 I-Cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
"PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
"PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
"PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
index 8bf16759ca53..2dd8dafff2ef 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
@@ -1,71 +1,83 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
"BriefDescription": "Problem-State L1I Directory Writes",
"PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1I Penalty Cycles",
"PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
"BriefDescription": "Problem-State L1D Directory Writes",
"PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
index 9a002b6967f1..436ce33f1182 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
@@ -1,335 +1,391 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
"PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB1_GPAGE_WRITES",
"BriefDescription": "DTLB1 Two-Gigabyte Page Writes",
"PublicDescription": "Counter:132 Name:DTLB1_GPAGE_WRITES A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
"PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB1_MISSES",
"BriefDescription": "L1C TLB1 Misses",
"PublicDescription": "Increments by one for any cycle where a Level-1 cache or Level-1 TLB miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Node L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Node Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1D_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1D_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1D_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Node L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Node L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "176",
"EventName": "L1I_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Node Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "L1I_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "L1I_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "L1I_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "218",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
"PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "219",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "220",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "448",
"EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
"BriefDescription": "Cycle count with one thread active",
"PublicDescription": "Cycle count with one thread active"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "449",
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
index 8f653c9d899d..17fb5241928b 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
@@ -1,47 +1,55 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
index aa4dfb46b65b..e7a3524b748f 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
@@ -1,317 +1,370 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "Counter:128 Name:L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB2_WRITES",
"BriefDescription": "DTLB2 Writes",
"PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB2_MISSES",
"BriefDescription": "DTLB2 Misses",
"PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB2_HPAGE_WRITES",
"BriefDescription": "DTLB2 One-Megabyte Page Writes",
"PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB2_GPAGE_WRITES",
"BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
"PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB2_WRITES",
"BriefDescription": "ITLB2 Writes",
"PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB2_MISSES",
"BriefDescription": "ITLB2 Misses",
"PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_ENGINES_BUSY",
"BriefDescription": "TLB2 Engines Busy",
"PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB2_MISSES",
"BriefDescription": "L1C TLB2 Misses",
"PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
"BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "224",
"EventName": "BCD_DFP_EXECUTION_SLOTS",
"BriefDescription": "BCD DFP Execution Slots",
"PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "225",
"EventName": "VX_BCD_EXECUTION_SLOTS",
"BriefDescription": "VX BCD Execution Slots",
"PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "226",
"EventName": "DECIMAL_INSTRUCTIONS",
"BriefDescription": "Decimal Instructions",
"PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "232",
"EventName": "LAST_HOST_TRANSLATIONS",
"BriefDescription": "Last host translation done",
"PublicDescription": "Last Host Translation done"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "243",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
"PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "244",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "245",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "448",
"EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
"BriefDescription": "Cycle count with one thread active",
"PublicDescription": "Cycle count with one thread active"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "449",
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
index 8bf16759ca53..2dd8dafff2ef 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
@@ -1,71 +1,83 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
"BriefDescription": "Problem-State L1I Directory Writes",
"PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1I Penalty Cycles",
"PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
"BriefDescription": "Problem-State L1D Directory Writes",
"PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
index b6d7fec7c2e7..b7b42a870bb0 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
@@ -1,143 +1,167 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from the Level-2 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
"PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
"PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle a ITLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
"PublicDescription": "Incremented by one for every store sent to Level-2 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
index 8bf16759ca53..2dd8dafff2ef 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
@@ -1,71 +1,83 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
"BriefDescription": "Problem-State L1I Directory Writes",
"PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1I Penalty Cycles",
"PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
"BriefDescription": "Problem-State L1D Directory Writes",
"PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
index 8682126aabb2..162251037219 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
@@ -1,209 +1,244 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
"PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
"PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle a ITLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "L1D_L2I_SOURCED_WRITES",
"BriefDescription": "L1D L2I Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Book L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Book L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
"PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json b/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index db3a594ee1e4..68c92bb599ee 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -233,6 +233,8 @@ static struct map {
{ "QPI LL", "uncore_qpi" },
{ "SBO", "uncore_sbox" },
{ "iMPH-U", "uncore_arb" },
+ { "CPU-M-CF", "cpum_cf" },
+ { "CPU-M-SF", "cpum_sf" },
{}
};
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index dd850a26d579..d7a5e1b9aa6f 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -385,7 +385,7 @@ static int test_and_print(struct test *t, bool force_skip, int subtest)
if (!t->subtest.get_nr)
pr_debug("%s:", t->desc);
else
- pr_debug("%s subtest %d:", t->desc, subtest);
+ pr_debug("%s subtest %d:", t->desc, subtest + 1);
switch (err) {
case TEST_OK:
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 61211918bfba..3b97ac018d5a 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1322,6 +1322,14 @@ static int test__intel_pt(struct perf_evlist *evlist)
return 0;
}
+static int test__checkevent_complex_name(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0);
+ return 0;
+}
+
static int count_tracepoints(void)
{
struct dirent *events_ent;
@@ -1658,6 +1666,11 @@ static struct evlist_test test__events[] = {
.check = test__intel_pt,
.id = 52,
},
+ {
+ .name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
+ .check = test__checkevent_complex_name,
+ .id = 53
+ }
};
static struct evlist_test test__events_pmu[] = {
@@ -1676,6 +1689,11 @@ static struct evlist_test test__events_pmu[] = {
.check = test__checkevent_pmu_partial_time_callgraph,
.id = 2,
},
+ {
+ .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
+ .check = test__checkevent_complex_name,
+ .id = 3,
+ }
};
struct terms_test {
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 94e513e62b34..3013ac8f83d0 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -13,11 +13,24 @@
libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
+event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?'
+
+add_libc_inet_pton_event() {
+
+ event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
+ grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))")
+
+ if [ $? -ne 0 -o -z "$event_name" ] ; then
+ printf "FAIL: could not add event\n"
+ return 1
+ fi
+}
+
trace_libc_inet_pton_backtrace() {
expected=`mktemp -u /tmp/expected.XXX`
- echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected
+ echo "ping[][0-9 \.:]+$event_name: \([[:xdigit:]]+\)" > $expected
echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
case "$(uname -m)" in
s390x)
@@ -26,6 +39,12 @@ trace_libc_inet_pton_backtrace() {
echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
+ ppc64|ppc64le)
+ eventattr='max-stack=4'
+ echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
*)
eventattr='max-stack=3'
echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
@@ -35,7 +54,7 @@ trace_libc_inet_pton_backtrace() {
perf_data=`mktemp -u /tmp/perf.data.XXX`
perf_script=`mktemp -u /tmp/perf.script.XXX`
- perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
+ perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
perf script -i $perf_data > $perf_script
exec 3<$perf_script
@@ -46,7 +65,7 @@ trace_libc_inet_pton_backtrace() {
echo "$line" | egrep -q "$pattern"
if [ $? -ne 0 ] ; then
printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
- exit 1
+ return 1
fi
done
@@ -56,13 +75,20 @@ trace_libc_inet_pton_backtrace() {
# even if the perf script output does not match.
}
+delete_libc_inet_pton_event() {
+
+ if [ -n "$event_name" ] ; then
+ perf probe -q -d $event_name
+ fi
+}
+
# Check for IPv6 interface existence
ip a sh lo | fgrep -q inet6 || exit 2
skip_if_no_perf_probe && \
-perf probe -q $libc inet_pton && \
+add_libc_inet_pton_event && \
trace_libc_inet_pton_backtrace
err=$?
rm -f ${perf_data} ${perf_script} ${expected}
-perf probe -q -d probe_libc:inet_pton
+delete_libc_inet_pton_event
exit $err
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 66330d4b739b..f528ba35e140 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -7,4 +7,5 @@ endif
libperf-y += kcmp.o
libperf-y += pkey_alloc.o
libperf-y += prctl.o
+libperf-y += socket.o
libperf-y += statx.o
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 984a504d335c..9615af5d412b 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -106,6 +106,9 @@ size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_a
size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_PRCTL_ARG3 syscall_arg__scnprintf_prctl_arg3
+size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_SK_PROTO syscall_arg__scnprintf_socket_protocol
+
size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags
diff --git a/tools/perf/trace/beauty/drm_ioctl.sh b/tools/perf/trace/beauty/drm_ioctl.sh
index 2149d3a98e42..9d3816815e60 100755
--- a/tools/perf/trace/beauty/drm_ioctl.sh
+++ b/tools/perf/trace/beauty/drm_ioctl.sh
@@ -1,13 +1,14 @@
#!/bin/sh
-drm_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/drm/
+
printf "#ifndef DRM_COMMAND_BASE\n"
-grep "#define DRM_COMMAND_BASE" $drm_header_dir/drm.h
+grep "#define DRM_COMMAND_BASE" $header_dir/drm.h
printf "#endif\n"
printf "static const char *drm_ioctl_cmds[] = {\n"
-grep "^#define DRM_IOCTL.*DRM_IO" $drm_header_dir/drm.h | \
+grep "^#define DRM_IOCTL.*DRM_IO" $header_dir/drm.h | \
sed -r 's/^#define +DRM_IOCTL_([A-Z0-9_]+)[ ]+DRM_IO[A-Z]* *\( *(0x[[:xdigit:]]+),*.*/ [\2] = "\1",/g'
-grep "^#define DRM_I915_[A-Z_0-9]\+[ ]\+0x" $drm_header_dir/i915_drm.h | \
+grep "^#define DRM_I915_[A-Z_0-9]\+[ ]\+0x" $header_dir/i915_drm.h | \
sed -r 's/^#define +DRM_I915_([A-Z0-9_]+)[ ]+(0x[[:xdigit:]]+)/\t[DRM_COMMAND_BASE + \2] = "I915_\1",/g'
printf "};\n"
diff --git a/tools/perf/trace/beauty/kcmp_type.sh b/tools/perf/trace/beauty/kcmp_type.sh
index 40d063b8c082..a3c304caa336 100755
--- a/tools/perf/trace/beauty/kcmp_type.sh
+++ b/tools/perf/trace/beauty/kcmp_type.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *kcmp_types[] = {\n"
regex='^[[:space:]]+(KCMP_(\w+)),'
diff --git a/tools/perf/trace/beauty/kvm_ioctl.sh b/tools/perf/trace/beauty/kvm_ioctl.sh
index bd28817afced..c4699fd46bb6 100755
--- a/tools/perf/trace/beauty/kvm_ioctl.sh
+++ b/tools/perf/trace/beauty/kvm_ioctl.sh
@@ -1,10 +1,10 @@
#!/bin/sh
-kvm_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *kvm_ioctl_cmds[] = {\n"
regex='^#[[:space:]]*define[[:space:]]+KVM_(\w+)[[:space:]]+_IO[RW]*\([[:space:]]*KVMIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${kvm_header_dir}/kvm.h | \
+egrep $regex ${header_dir}/kvm.h | \
sed -r "s/$regex/\2 \1/g" | \
egrep -v " ((ARM|PPC|S390)_|[GS]ET_(DEBUGREGS|PIT2|XSAVE|TSC_KHZ)|CREATE_SPAPR_TCE_64)" | \
sort | xargs printf "\t[%s] = \"%s\",\n"
diff --git a/tools/perf/trace/beauty/madvise_behavior.sh b/tools/perf/trace/beauty/madvise_behavior.sh
index 60ef8640ee70..431639eb4d29 100755
--- a/tools/perf/trace/beauty/madvise_behavior.sh
+++ b/tools/perf/trace/beauty/madvise_behavior.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/
printf "static const char *madvise_advices[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MADV_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
diff --git a/tools/perf/trace/beauty/perf_ioctl.sh b/tools/perf/trace/beauty/perf_ioctl.sh
index faea4237c793..6492c74df928 100755
--- a/tools/perf/trace/beauty/perf_ioctl.sh
+++ b/tools/perf/trace/beauty/perf_ioctl.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *perf_ioctl_cmds[] = {\n"
regex='^#[[:space:]]*define[[:space:]]+PERF_EVENT_IOC_(\w+)[[:space:]]+_IO[RW]*[[:space:]]*\([[:space:]]*.\$.[[:space:]]*,[[:space:]]*([[:digit:]]+).*'
diff --git a/tools/perf/trace/beauty/pkey_alloc_access_rights.sh b/tools/perf/trace/beauty/pkey_alloc_access_rights.sh
index 62e51a02b839..e0a51aeb20b2 100755
--- a/tools/perf/trace/beauty/pkey_alloc_access_rights.sh
+++ b/tools/perf/trace/beauty/pkey_alloc_access_rights.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/
printf "static const char *pkey_alloc_access_rights[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+PKEY_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*'
diff --git a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh
index aad5ab130539..eb511bb5fbd3 100755
--- a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh
+++ b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh
@@ -1,8 +1,8 @@
#!/bin/sh
-sound_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/
printf "static const char *sndrv_ctl_ioctl_cmds[] = {\n"
-grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $sound_header_dir/asound.h | \
+grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $header_dir/asound.h | \
sed -r 's/^#define +SNDRV_CTL_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.U., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g'
printf "};\n"
diff --git a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
index b7e9ef6b2f55..6818392968b2 100755
--- a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
+++ b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
@@ -1,8 +1,8 @@
#!/bin/sh
-sound_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/
printf "static const char *sndrv_pcm_ioctl_cmds[] = {\n"
-grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $sound_header_dir/asound.h | \
+grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $header_dir/asound.h | \
sed -r 's/^#define +SNDRV_PCM_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.A., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g'
printf "};\n"
diff --git a/tools/perf/trace/beauty/socket.c b/tools/perf/trace/beauty/socket.c
new file mode 100644
index 000000000000..65227269384b
--- /dev/null
+++ b/tools/perf/trace/beauty/socket.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace/beauty/socket.c
+ *
+ * Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "trace/beauty/beauty.h"
+#include <sys/types.h>
+#include <sys/socket.h>
+
+static size_t socket__scnprintf_ipproto(int protocol, char *bf, size_t size)
+{
+#include "trace/beauty/generated/socket_ipproto_array.c"
+ static DEFINE_STRARRAY(socket_ipproto);
+
+ return strarray__scnprintf(&strarray__socket_ipproto, bf, size, "%d", protocol);
+}
+
+size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int domain = syscall_arg__val(arg, 0);
+
+ if (domain == AF_INET || domain == AF_INET6)
+ return socket__scnprintf_ipproto(arg->val, bf, size);
+
+ return syscall_arg__scnprintf_int(bf, size, arg);
+}
diff --git a/tools/perf/trace/beauty/socket_ipproto.sh b/tools/perf/trace/beauty/socket_ipproto.sh
new file mode 100755
index 000000000000..a3cc24633bec
--- /dev/null
+++ b/tools/perf/trace/beauty/socket_ipproto.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+
+printf "static const char *socket_ipproto[] = {\n"
+regex='^[[:space:]]+IPPROTO_(\w+)[[:space:]]+=[[:space:]]+([[:digit:]]+),.*'
+
+egrep $regex ${header_dir}/in.h | \
+ sed -r "s/$regex/\2 \1/g" | \
+ sort | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n"
diff --git a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
index 76f1de697787..0f6a5197d0be 100755
--- a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
+++ b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
@@ -1,17 +1,17 @@
#!/bin/sh
-vhost_virtio_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *vhost_virtio_ioctl_cmds[] = {\n"
regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${vhost_virtio_header_dir}/vhost.h | \
+egrep $regex ${header_dir}/vhost.h | \
sed -r "s/$regex/\2 \1/g" | \
sort | xargs printf "\t[%s] = \"%s\",\n"
printf "};\n"
printf "static const char *vhost_virtio_ioctl_read_cmds[] = {\n"
regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?R\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${vhost_virtio_header_dir}/vhost.h | \
+egrep $regex ${header_dir}/vhost.h | \
sed -r "s/$regex/\2 \1/g" | \
sort | xargs printf "\t[%s] = \"%s\",\n"
printf "};\n"
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 69b7a28f7a1c..74c4ae1f0a05 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -529,7 +529,7 @@ out:
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
char *bf, size_t bfsz, FILE *fp,
- bool use_callchain)
+ bool ignore_callchains)
{
int ret;
int callchain_ret = 0;
@@ -550,7 +550,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
ret = fprintf(fp, "%s\n", bf);
- if (hist_entry__has_callchains(he) && use_callchain)
+ if (hist_entry__has_callchains(he) && !ignore_callchains)
callchain_ret = hist_entry_callchain__fprintf(he, total_period,
0, fp);
@@ -755,7 +755,7 @@ int hists__fprintf_headers(struct hists *hists, FILE *fp)
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
- bool use_callchain)
+ bool ignore_callchains)
{
struct rb_node *nd;
size_t ret = 0;
@@ -799,7 +799,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
if (percent < min_pcnt)
continue;
- ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
+ ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
if (max_rows && ++nr_rows >= max_rows)
break;
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index cee658733e2c..3d02ae38ec56 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -747,7 +747,9 @@ int bpf__load(struct bpf_object *obj)
err = bpf_object__load(obj);
if (err) {
- pr_debug("bpf: load objects failed\n");
+ char bf[128];
+ libbpf_strerror(err, bf, sizeof(bf));
+ pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
return err;
}
return 0;
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 7798a2cc8a86..31279a7bd919 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -20,9 +20,10 @@ static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,}
static struct comm_str *comm_str__get(struct comm_str *cs)
{
- if (cs)
- refcount_inc(&cs->refcnt);
- return cs;
+ if (cs && refcount_inc_not_zero(&cs->refcnt))
+ return cs;
+
+ return NULL;
}
static void comm_str__put(struct comm_str *cs)
@@ -67,9 +68,14 @@ struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root)
parent = *p;
iter = rb_entry(parent, struct comm_str, rb_node);
+ /*
+ * If we race with comm_str__put, iter->refcnt is 0
+ * and it will be removed within comm_str__put call
+ * shortly, ignore it in this search.
+ */
cmp = strcmp(str, iter->str);
- if (!cmp)
- return comm_str__get(iter);
+ if (!cmp && comm_str__get(iter))
+ return iter;
if (cmp < 0)
p = &(*p)->rb_left;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 4d5fc374e730..938def6d0bb9 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -31,6 +31,8 @@
#endif
#endif
+#define CS_ETM_INVAL_ADDR 0xdeadbeefdeadbeefUL
+
struct cs_etm_decoder {
void *data;
void (*packet_printer)(const char *msg);
@@ -261,8 +263,8 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
decoder->tail = 0;
decoder->packet_count = 0;
for (i = 0; i < MAX_BUFFER; i++) {
- decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL;
- decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
+ decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[i].last_instr_taken_branch = false;
decoder->packet_buffer[i].exc = false;
decoder->packet_buffer[i].exc_ret = false;
@@ -295,8 +297,8 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
decoder->packet_buffer[et].exc = false;
decoder->packet_buffer[et].exc_ret = false;
decoder->packet_buffer[et].cpu = *((int *)inode->priv);
- decoder->packet_buffer[et].start_addr = 0xdeadbeefdeadbeefUL;
- decoder->packet_buffer[et].end_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
+ decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
if (decoder->packet_count == MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index 743f5f444304..612b5755f742 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -23,6 +23,7 @@ struct cs_etm_buffer {
};
enum cs_etm_sample_type {
+ CS_ETM_EMPTY = 0,
CS_ETM_RANGE = 1 << 0,
CS_ETM_TRACE_ON = 1 << 1,
};
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 822ba915d144..2ae640257fdb 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -494,6 +494,10 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
{
+ /* Returns 0 for the CS_ETM_TRACE_ON packet */
+ if (packet->sample_type == CS_ETM_TRACE_ON)
+ return 0;
+
/*
* The packet records the execution range with an exclusive end address
*
@@ -505,6 +509,15 @@ static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
return packet->end_addr - A64_INSTR_SIZE;
}
+static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
+{
+ /* Returns 0 for the CS_ETM_TRACE_ON packet */
+ if (packet->sample_type == CS_ETM_TRACE_ON)
+ return 0;
+
+ return packet->start_addr;
+}
+
static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
{
/*
@@ -546,7 +559,7 @@ static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
be = &bs->entries[etmq->last_branch_pos];
be->from = cs_etm__last_executed_instr(etmq->prev_packet);
- be->to = etmq->packet->start_addr;
+ be->to = cs_etm__first_executed_instr(etmq->packet);
/* No support for mispredict */
be->flags.mispred = 0;
be->flags.predicted = 1;
@@ -701,7 +714,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
sample.ip = cs_etm__last_executed_instr(etmq->prev_packet);
sample.pid = etmq->pid;
sample.tid = etmq->tid;
- sample.addr = etmq->packet->start_addr;
+ sample.addr = cs_etm__first_executed_instr(etmq->packet);
sample.id = etmq->etm->branches_id;
sample.stream_id = etmq->etm->branches_id;
sample.period = 1;
@@ -897,13 +910,23 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
etmq->period_instructions = instrs_over;
}
- if (etm->sample_branches &&
- etmq->prev_packet &&
- etmq->prev_packet->sample_type == CS_ETM_RANGE &&
- etmq->prev_packet->last_instr_taken_branch) {
- ret = cs_etm__synth_branch_sample(etmq);
- if (ret)
- return ret;
+ if (etm->sample_branches && etmq->prev_packet) {
+ bool generate_sample = false;
+
+ /* Generate sample for tracing on packet */
+ if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON)
+ generate_sample = true;
+
+ /* Generate sample for branch taken packet */
+ if (etmq->prev_packet->sample_type == CS_ETM_RANGE &&
+ etmq->prev_packet->last_instr_taken_branch)
+ generate_sample = true;
+
+ if (generate_sample) {
+ ret = cs_etm__synth_branch_sample(etmq);
+ if (ret)
+ return ret;
+ }
}
if (etm->sample_branches || etm->synth_opts.last_branch) {
@@ -922,10 +945,17 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
static int cs_etm__flush(struct cs_etm_queue *etmq)
{
int err = 0;
+ struct cs_etm_auxtrace *etm = etmq->etm;
struct cs_etm_packet *tmp;
+ if (!etmq->prev_packet)
+ return 0;
+
+ /* Handle start tracing packet */
+ if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
+ goto swap_packet;
+
if (etmq->etm->synth_opts.last_branch &&
- etmq->prev_packet &&
etmq->prev_packet->sample_type == CS_ETM_RANGE) {
/*
* Generate a last branch event for the branches left in the
@@ -939,8 +969,22 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
err = cs_etm__synth_instruction_sample(
etmq, addr,
etmq->period_instructions);
+ if (err)
+ return err;
+
etmq->period_instructions = 0;
+ }
+
+ if (etm->sample_branches &&
+ etmq->prev_packet->sample_type == CS_ETM_RANGE) {
+ err = cs_etm__synth_branch_sample(etmq);
+ if (err)
+ return err;
+ }
+
+swap_packet:
+ if (etmq->etm->synth_opts.last_branch) {
/*
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
* the next incoming packet.
@@ -1020,6 +1064,13 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
*/
cs_etm__flush(etmq);
break;
+ case CS_ETM_EMPTY:
+ /*
+ * Should not receive empty packet,
+ * report error.
+ */
+ pr_err("CS ETM Trace: empty packet\n");
+ return -EINVAL;
default:
break;
}
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 94fce4f537e9..ddf84b941abf 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -260,6 +260,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
evsel->attr.sample_period = 1;
}
+ if (perf_evsel__is_clock(evsel)) {
+ /*
+ * The evsel->unit points to static alias->unit
+ * so it's ok to use static string in here.
+ */
+ static const char *unit = "msec";
+
+ evsel->unit = unit;
+ evsel->scale = 1e-6;
+ }
+
return evsel;
}
@@ -848,6 +859,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
}
}
+static bool is_dummy_event(struct perf_evsel *evsel)
+{
+ return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
+ (evsel->attr.config == PERF_COUNT_SW_DUMMY);
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -1086,6 +1103,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
else
perf_evsel__reset_sample_bit(evsel, PERIOD);
}
+
+ /*
+ * For initial_delay, a dummy event is added implicitly.
+ * The software event will trigger -EOPNOTSUPP error out,
+ * if BRANCH_STACK bit is set.
+ */
+ if (opts->initial_delay && is_dummy_event(evsel))
+ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
}
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index d277930b19a1..973c03167947 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -402,10 +402,13 @@ bool perf_evsel__is_function_event(struct perf_evsel *evsel);
static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
{
- struct perf_event_attr *attr = &evsel->attr;
+ return perf_evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
+}
- return (attr->config == PERF_COUNT_SW_BPF_OUTPUT) &&
- (attr->type == PERF_TYPE_SOFTWARE);
+static inline bool perf_evsel__is_clock(struct perf_evsel *evsel)
+{
+ return perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
+ perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
}
struct perf_attr_details {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 653ff65aa2c3..5af58aac91ad 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2587,7 +2587,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
FEAT_OPN(BRANCH_STACK, branch_stack, false),
FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
- FEAT_OPN(GROUP_DESC, group_desc, false),
+ FEAT_OPR(GROUP_DESC, group_desc, false),
FEAT_OPN(AUXTRACE, auxtrace, false),
FEAT_OPN(STAT, stat, false),
FEAT_OPN(CACHE, cache, true),
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 73049f7f0f60..3badd7f1e1b8 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -181,7 +181,7 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
- bool use_callchain);
+ bool ignore_callchains);
size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp);
void hists__filter_by_dso(struct hists *hists);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index e7b4a8b513f2..b300a3973448 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -408,23 +408,16 @@ out_err:
}
/*
- * Caller must eventually drop thread->refcnt returned with a successful
- * lookup/new thread inserted.
+ * Front-end cache - TID lookups come in blocks,
+ * so most of the time we dont have to look up
+ * the full rbtree:
*/
-static struct thread *____machine__findnew_thread(struct machine *machine,
- struct threads *threads,
- pid_t pid, pid_t tid,
- bool create)
+static struct thread*
+__threads__get_last_match(struct threads *threads, struct machine *machine,
+ int pid, int tid)
{
- struct rb_node **p = &threads->entries.rb_node;
- struct rb_node *parent = NULL;
struct thread *th;
- /*
- * Front-end cache - TID lookups come in blocks,
- * so most of the time we dont have to look up
- * the full rbtree:
- */
th = threads->last_match;
if (th != NULL) {
if (th->tid == tid) {
@@ -435,12 +428,57 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
threads->last_match = NULL;
}
+ return NULL;
+}
+
+static struct thread*
+threads__get_last_match(struct threads *threads, struct machine *machine,
+ int pid, int tid)
+{
+ struct thread *th = NULL;
+
+ if (perf_singlethreaded)
+ th = __threads__get_last_match(threads, machine, pid, tid);
+
+ return th;
+}
+
+static void
+__threads__set_last_match(struct threads *threads, struct thread *th)
+{
+ threads->last_match = th;
+}
+
+static void
+threads__set_last_match(struct threads *threads, struct thread *th)
+{
+ if (perf_singlethreaded)
+ __threads__set_last_match(threads, th);
+}
+
+/*
+ * Caller must eventually drop thread->refcnt returned with a successful
+ * lookup/new thread inserted.
+ */
+static struct thread *____machine__findnew_thread(struct machine *machine,
+ struct threads *threads,
+ pid_t pid, pid_t tid,
+ bool create)
+{
+ struct rb_node **p = &threads->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct thread *th;
+
+ th = threads__get_last_match(threads, machine, pid, tid);
+ if (th)
+ return th;
+
while (*p != NULL) {
parent = *p;
th = rb_entry(parent, struct thread, rb_node);
if (th->tid == tid) {
- threads->last_match = th;
+ threads__set_last_match(threads, th);
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
@@ -477,7 +515,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* It is now in the rbtree, get a ref
*/
thread__get(th);
- threads->last_match = th;
+ threads__set_last_match(threads, th);
++threads->nr;
}
@@ -1635,7 +1673,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
struct threads *threads = machine__threads(machine, th->tid);
if (threads->last_match == th)
- threads->last_match = NULL;
+ threads__set_last_match(threads, NULL);
BUG_ON(refcount_read(&th->refcnt) == 0);
if (lock)
@@ -2272,6 +2310,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
const char *srcline = NULL;
+ u64 addr;
if (symbol_conf.hide_unresolved && entry->sym == NULL)
return 0;
@@ -2279,7 +2318,13 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
return 0;
- srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
+ /*
+ * Convert entry->ip from a virtual address to an offset in
+ * its corresponding binary.
+ */
+ addr = map__map_ip(entry->map, entry->ip);
+
+ srcline = callchain_srcline(entry->map, entry->sym, addr);
return callchain_cursor_append(cursor, entry->ip,
entry->map, entry->sym,
false, NULL, 0, 0, 0, srcline);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 1ddc3d1d0147..a28f9b5cc4ff 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -326,8 +326,8 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
if (raw)
s = (char *)pe->metric_name;
else {
- if (asprintf(&s, "%s\n\t[%s]",
- pe->metric_name, pe->desc) < 0)
+ if (asprintf(&s, "%s\n%*s%s]",
+ pe->metric_name, 8, "[", pe->desc) < 0)
return;
}
@@ -490,3 +490,25 @@ out:
metricgroup__free_egroups(&group_list);
return ret;
}
+
+bool metricgroup__has_metric(const char *metric)
+{
+ struct pmu_events_map *map = perf_pmu__find_map(NULL);
+ struct pmu_event *pe;
+ int i;
+
+ if (!map)
+ return false;
+
+ for (i = 0; ; i++) {
+ pe = &map->table[i];
+
+ if (!pe->name && !pe->metric_group && !pe->metric_name)
+ break;
+ if (!pe->metric_expr)
+ continue;
+ if (match_metric(pe->metric_name, metric))
+ return true;
+ }
+ return false;
+}
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 06854e125ee7..8a155dba0581 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -28,4 +28,5 @@ int metricgroup__parse_groups(const struct option *opt,
struct rblist *metric_events);
void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
+bool metricgroup__has_metric(const char *metric);
#endif
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 3ba6a1742f91..afd68524ffa9 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -652,12 +652,6 @@ static int is_arm_pmu_core(const char *name)
if (stat(path, &st) == 0)
return 1;
- /* Look for cpu sysfs (specific to s390) */
- scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s",
- sysfs, name);
- if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5))
- return 1;
-
return 0;
}
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 594d14a02b67..99990f5f2512 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -913,11 +913,10 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
ratio = total / avg;
print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
- } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
- perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
+ } else if (perf_evsel__is_clock(evsel)) {
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
- avg / ratio);
+ avg / (ratio * evsel->scale));
else
print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 0ee7f568d60c..3393d7ee9401 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -38,6 +38,10 @@ static const char **syscalltbl_native = syscalltbl_powerpc_64;
#include <asm/syscalls_32.c>
const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID;
static const char **syscalltbl_native = syscalltbl_powerpc_32;
+#elif defined(__aarch64__)
+#include <asm/syscalls.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_arm64;
#endif
struct syscall {
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 538db4e5d1e6..6f318b15950e 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -77,7 +77,7 @@ static int entry(u64 ip, struct unwind_info *ui)
if (__report_module(&al, ip, ui))
return -1;
- e->ip = al.addr;
+ e->ip = ip;
e->map = al.map;
e->sym = al.sym;
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 6a11bc7e6b27..79f521a552cf 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -575,7 +575,7 @@ static int entry(u64 ip, struct thread *thread,
struct addr_location al;
e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
- e.ip = al.addr;
+ e.ip = ip;
e.map = al.map;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 9e78df207919..0c7d9e556b47 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
while (s->bytes_recvd < total_bytes) {
if (txmsg_cork) {
timeout.tv_sec = 0;
- timeout.tv_usec = 1000;
+ timeout.tv_usec = 300000;
} else {
timeout.tv_sec = 1;
timeout.tv_usec = 0;
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index c15f270e121d..65541c21a544 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Usage: configinit.sh config-spec-file [ build output dir ]
+# Usage: configinit.sh config-spec-file build-output-dir results-dir
#
# Create a .config file from the spec file. Run from the kernel source tree.
# Exits with 0 if all went well, with 1 if all went well but the config
@@ -40,20 +40,18 @@ mkdir $T
c=$1
buildloc=$2
+resdir=$3
builddir=
-if test -n $buildloc
+if echo $buildloc | grep -q '^O='
then
- if echo $buildloc | grep -q '^O='
+ builddir=`echo $buildloc | sed -e 's/^O=//'`
+ if test ! -d $builddir
then
- builddir=`echo $buildloc | sed -e 's/^O=//'`
- if test ! -d $builddir
- then
- mkdir $builddir
- fi
- else
- echo Bad build directory: \"$buildloc\"
- exit 2
+ mkdir $builddir
fi
+else
+ echo Bad build directory: \"$buildloc\"
+ exit 2
fi
sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh
@@ -61,12 +59,12 @@ sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh
grep '^grep' < $T/u.sh > $T/upd.sh
echo "cat - $c" >> $T/upd.sh
make mrproper
-make $buildloc distclean > $builddir/Make.distclean 2>&1
-make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1
+make $buildloc distclean > $resdir/Make.distclean 2>&1
+make $buildloc $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
mv $builddir/.config $builddir/.config.sav
sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
cp $builddir/.config $builddir/.config.new
-yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err
+yes '' | make $buildloc oldconfig > $resdir/Make.oldconfig.out 2> $resdir/Make.oldconfig.err
# verify new config matches specification.
configcheck.sh $builddir/.config $c
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 34d126734cde..9115fcdb5617 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -2,7 +2,7 @@
#
# Build a kvm-ready Linux kernel from the tree in the current directory.
#
-# Usage: kvm-build.sh config-template build-dir
+# Usage: kvm-build.sh config-template build-dir resdir
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@ then
exit 1
fi
builddir=${2}
+resdir=${3}
T=${TMPDIR-/tmp}/test-linux.sh.$$
trap 'rm -rf $T' 0
@@ -41,19 +42,19 @@ CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_CONSOLE=y
___EOF___
-configinit.sh $T/config O=$builddir
+configinit.sh $T/config O=$builddir $resdir
retval=$?
if test $retval -gt 1
then
exit 2
fi
ncpus=`cpus2use.sh`
-make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1
+make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
retval=$?
-if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out
+if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $resdir/Make.out
then
echo Kernel build error
- egrep "Stop|Error|error:|warning:" < $builddir/Make.out
+ egrep "Stop|Error|error:|warning:" < $resdir/Make.out
echo Run aborted.
exit 3
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 477ecb1293ab..0fa8a61ccb7b 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -70,4 +70,5 @@ else
else
print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
fi
+ echo $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i > $i/console.log.rcu.diags
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index c27e97824163..c9bab57a77eb 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -39,6 +39,7 @@ do
head -1 $resdir/log
fi
TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
+ rm -f $i/console.log.*.diags
kvm-recheck-${TORTURE_SUITE}.sh $i
if test -f "$i/console.log"
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index c5b0f94341d9..f7247ee00514 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -98,14 +98,15 @@ then
ln -s $base_resdir/.config $resdir # for kvm-recheck.sh
# Arch-independent indicator
touch $resdir/builtkernel
-elif kvm-build.sh $T/Kc2 $builddir
+elif kvm-build.sh $T/Kc2 $builddir $resdir
then
# Had to build a kernel for this test.
QEMU="`identify_qemu $builddir/vmlinux`"
BOOT_IMAGE="`identify_boot_image $QEMU`"
- cp $builddir/Make*.out $resdir
cp $builddir/vmlinux $resdir
cp $builddir/.config $resdir
+ cp $builddir/Module.symvers $resdir > /dev/null || :
+ cp $builddir/System.map $resdir > /dev/null || :
if test -n "$BOOT_IMAGE"
then
cp $builddir/$BOOT_IMAGE $resdir
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 56610dbbdf73..5a7a62d76a50 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -347,7 +347,7 @@ function dump(first, pastlast, batchnum)
print "needqemurun="
jn=1
for (j = first; j < pastlast; j++) {
- builddir=KVM "/b" jn
+ builddir=KVM "/b1"
cpusr[jn] = cpus[j];
if (cfrep[cf[j]] == "") {
cfr[jn] = cf[j];
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index 17293436f551..84933f6aed77 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -163,6 +163,13 @@ then
print_warning Summary: $summary
cat $T.diags >> $file.diags
fi
+for i in $file.*.diags
+do
+ if test -f "$i"
+ then
+ cat $i >> $file.diags
+ fi
+done
if ! test -s $file.diags
then
rm -f $file.diags
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
index 5d2cc0bd50a0..5c3213cc3ad7 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
@@ -1,5 +1,5 @@
-rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30
-rcutree.gp_preinit_delay=3
+rcutorture.onoff_interval=200 rcutorture.onoff_holdoff=30
+rcutree.gp_preinit_delay=12
rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3
rcutree.kthread_prio=2
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
deleted file mode 100644
index 883149b5f2d1..000000000000
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
+++ /dev/null
@@ -1 +0,0 @@
-rcutree.rcu_fanout_exact=1
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index 24ec91041957..7bab8246392b 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -39,7 +39,7 @@ rcutorture_param_onoff () {
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
- echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
+ echo rcutorture.onoff_interval=1000 rcutorture.onoff_holdoff=30
fi
}
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 615252331813..91cf50b2b0e5 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -90,6 +90,30 @@ unsigned int yield_mod_cnt, nr_abort;
#error "Unsupported architecture"
#endif
+#elif defined(__s390__)
+
+#define RSEQ_INJECT_INPUT \
+ , [loop_cnt_1]"m"(loop_cnt[1]) \
+ , [loop_cnt_2]"m"(loop_cnt[2]) \
+ , [loop_cnt_3]"m"(loop_cnt[3]) \
+ , [loop_cnt_4]"m"(loop_cnt[4]) \
+ , [loop_cnt_5]"m"(loop_cnt[5]) \
+ , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "r12"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+ "l %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+ "ltr %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG "\n\t" \
+ "je 333f\n\t" \
+ "222:\n\t" \
+ "ahi %%" INJECT_ASM_REG ", -1\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t"
+
#elif defined(__ARMEL__)
#define RSEQ_INJECT_INPUT \
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
new file mode 100644
index 000000000000..1069e85258ce
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-s390.h
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#define RSEQ_SIG 0x53053053
+
+#define rseq_smp_mb() __asm__ __volatile__ ("bcr 15,0" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_barrier(); \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ rseq_barrier(); \
+ RSEQ_WRITE_ONCE(*p, v); \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#ifdef __s390x__
+
+#define LONG_L "lg"
+#define LONG_S "stg"
+#define LONG_LT_R "ltgr"
+#define LONG_CMP "cg"
+#define LONG_CMP_R "cgr"
+#define LONG_ADDI "aghi"
+#define LONG_ADD_R "agr"
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
+ ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t"
+
+#elif __s390__
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
+ ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t"
+
+#define LONG_L "l"
+#define LONG_S "st"
+#define LONG_LT_R "ltr"
+#define LONG_CMP "c"
+#define LONG_CMP_R "cr"
+#define LONG_ADDI "ahi"
+#define LONG_ADD_R "ar"
+
+#endif
+
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ "larl %%r0, " __rseq_str(cs_label) "\n\t" \
+ LONG_S " %%r0, %[" __rseq_str(rseq_cs) "]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ "c %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "jnz " __rseq_str(label) "\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ ".long " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "j %l[" __rseq_str(abort_label) "]\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "j %l[" __rseq_str(cmpfail_label) "]\n\t" \
+ ".popsection\n\t"
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " %%r1, %[v]\n\t"
+ LONG_CMP_R " %%r1, %[expectnot]\n\t"
+ "je %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " %%r1, %[v]\n\t"
+ LONG_CMP_R " %%r1, %[expectnot]\n\t"
+ "je %l[error2]\n\t"
+#endif
+ LONG_S " %%r1, %[load]\n\t"
+ LONG_ADD_R " %%r1, %[voffp]\n\t"
+ LONG_L " %%r1, 0(%%r1)\n\t"
+ /* final store */
+ LONG_S " %%r1, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "r" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0", "r1"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ LONG_L " %%r0, %[v]\n\t"
+ LONG_ADD_R " %%r0, %[count]\n\t"
+ /* final store */
+ LONG_S " %%r0, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "r" (count)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* try store */
+ LONG_S " %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* s390 is TSO. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu);
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ LONG_CMP " %[expect2], %[v2]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+ LONG_CMP " %[expect2], %[v2]\n\t"
+ "jnz %l[error3]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint64_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ LONG_S " %[src], %[rseq_scratch0]\n\t"
+ LONG_S " %[dst], %[rseq_scratch1]\n\t"
+ LONG_S " %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz 7f\n\t"
+#endif
+ /* try memcpy */
+ LONG_LT_R " %[len], %[len]\n\t"
+ "jz 333f\n\t"
+ "222:\n\t"
+ "ic %%r0,0(%[src])\n\t"
+ "stc %%r0,0(%[dst])\n\t"
+ LONG_ADDI " %[src], 1\n\t"
+ LONG_ADDI " %[dst], 1\n\t"
+ LONG_ADDI " %[len], -1\n\t"
+ "jnz 222b\n\t"
+ "333:\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t"
+ RSEQ_ASM_DEFINE_ABORT(4,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ abort)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* s390 is TSO. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ return rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
+ newv, cpu);
+}
+#endif /* !RSEQ_SKIP_FASTPATH */
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index 86ce22417e0d..ca332efe9713 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -75,6 +75,8 @@ extern __thread volatile struct rseq __rseq_abi;
#include <rseq-ppc.h>
#elif defined(__mips__)
#include <rseq-mips.h>
+#elif defined(__s390__)
+#include <rseq-s390.h>
#else
#error unsupported target
#endif
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index ca6cd146aafe..dcf73c5dab6e 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -134,6 +134,11 @@ int main(int argv, char **argc)
printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000)));
if (llabs(eppm - ppm) > 1000) {
+ if (tx1.offset || tx2.offset ||
+ tx1.freq != tx2.freq || tx1.tick != tx2.tick) {
+ printf(" [SKIP]\n");
+ return ksft_exit_skip("The clock was adjusted externally. Shutdown NTPd or other time sync daemons\n");
+ }
printf(" [FAILED]\n");
return ksft_exit_fail();
}
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 04e554cae3a2..108250e4d376 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -604,7 +604,7 @@ void kvm_arm_resume_guest(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.pause = false;
- swake_up(kvm_arch_vcpu_wq(vcpu));
+ swake_up_one(kvm_arch_vcpu_wq(vcpu));
}
}
@@ -612,7 +612,7 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
- swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause)));
if (vcpu->arch.power_off || vcpu->arch.pause) {
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index c95ab4c5a475..9b73d3ad918a 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -155,7 +155,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
- swake_up(wq);
+ swake_up_one(wq);
return PSCI_RET_SUCCESS;
}
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 57bcb27dcf30..23c2519c5b32 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -107,7 +107,7 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, gva);
if (swq_has_sleeper(&vcpu->wq))
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
mmput(mm);
kvm_put_kvm(vcpu->kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8b47507faab5..3d233ebfbee9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2172,7 +2172,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_blocking(vcpu);
for (;;) {
- prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_vcpu_check_block(vcpu) < 0)
break;
@@ -2214,7 +2214,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
wqp = kvm_arch_vcpu_wq(vcpu);
if (swq_has_sleeper(wqp)) {
- swake_up(wqp);
+ swake_up_one(wqp);
++vcpu->stat.halt_wakeup;
return true;
}