aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/include/asm/atomic.h87
-rw-r--r--arch/alpha/include/asm/pgalloc.h4
-rw-r--r--arch/alpha/include/asm/rwsem.h68
-rw-r--r--arch/alpha/include/asm/spinlock.h9
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/Kconfig31
-rw-r--r--arch/arc/Makefile8
-rw-r--r--arch/arc/boot/dts/abilis_tb100.dtsi2
-rw-r--r--arch/arc/boot/dts/abilis_tb101.dtsi2
-rw-r--r--arch/arc/boot/dts/axc001.dtsi1
-rw-r--r--arch/arc/boot/dts/axc003.dtsi1
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi1
-rw-r--r--arch/arc/boot/dts/eznps.dts1
-rw-r--r--arch/arc/boot/dts/nsim_700.dts1
-rw-r--r--arch/arc/boot/dts/nsimosci.dts1
-rw-r--r--arch/arc/boot/dts/nsimosci_hs.dts1
-rw-r--r--arch/arc/boot/dts/nsimosci_hs_idu.dts1
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs_idu.dtsi1
-rw-r--r--arch/arc/boot/dts/vdk_axc003.dtsi1
-rw-r--r--arch/arc/boot/dts/vdk_axc003_idu.dtsi1
-rw-r--r--arch/arc/include/asm/atomic.h144
-rw-r--r--arch/arc/include/asm/entry-compact.h4
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/include/asm/pgtable.h4
-rw-r--r--arch/arc/include/asm/processor.h2
-rw-r--r--arch/arc/include/asm/smp.h2
-rw-r--r--arch/arc/include/asm/spinlock.h299
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/asm/uaccess.h2
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/kernel/entry-compact.S18
-rw-r--r--arch/arc/kernel/intc-compact.c6
-rw-r--r--arch/arc/kernel/perf_event.c2
-rw-r--r--arch/arc/kernel/setup.c10
-rw-r--r--arch/arc/kernel/signal.c2
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/time.c99
-rw-r--r--arch/arc/kernel/troubleshoot.c2
-rw-r--r--arch/arc/mm/cache.c6
-rw-r--r--arch/arc/mm/dma.c5
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arc/mm/ioremap.c2
-rw-r--r--arch/arm/Kconfig56
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/Makefile6
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi3
-rw-r--r--arch/arm/boot/dts/am4372.dtsi3
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts2
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts2
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi32
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi47
-rw-r--r--arch/arm/boot/dts/bcm958625k.dts12
-rw-r--r--arch/arm/boot/dts/dm8148-evm.dts8
-rw-r--r--arch/arm/boot/dts/dm8148-t410.dts9
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi5
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-common.dtsi13
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts13
-rw-r--r--arch/arm/boot/dts/imx28-m28.dtsi2
-rw-r--r--arch/arm/boot/dts/imx51-ts4800.dts2
-rw-r--r--arch/arm/boot/dts/imx53-m53.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-ns2lite.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-topkick.dts2
-rw-r--r--arch/arm/boot/dts/meson8-minix-neo-x8.dts1
-rw-r--r--arch/arm/boot/dts/omap3-evm-37xx.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi11
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-zoom3.dts6
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi48
-rw-r--r--arch/arm/boot/dts/omap5-igep0050.dts26
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts10
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom-msm8960.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi5
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi16
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socrates.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts1
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi3
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi21
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi11
-rw-r--r--arch/arm/boot/dts/sun5i-r8-chip.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi13
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi312
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts3
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts328
-rw-r--r--arch/arm/configs/collie_defconfig2
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/configs/ixp4xx_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c40
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/atomic.h106
-rw-r--r--arch/arm/include/asm/barrier.h4
-rw-r--r--arch/arm/include/asm/delay.h6
-rw-r--r--arch/arm/include/asm/efi.h4
-rw-r--r--arch/arm/include/asm/floppy.h2
-rw-r--r--arch/arm/include/asm/io.h2
-rw-r--r--arch/arm/include/asm/pgalloc.h4
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h5
-rw-r--r--arch/arm/include/asm/pgtable.h1
-rw-r--r--arch/arm/include/asm/ptrace.h10
-rw-r--r--arch/arm/include/asm/spinlock.h19
-rw-r--r--arch/arm/include/asm/tlb.h29
-rw-r--r--arch/arm/include/asm/uaccess.h114
-rw-r--r--arch/arm/include/asm/xen/hypercall.h1
-rw-r--r--arch/arm/include/asm/xen/xen-ops.h6
-rw-r--r--arch/arm/include/uapi/asm/kvm.h4
-rw-r--r--arch/arm/kernel/asm-offsets.c5
-rw-r--r--arch/arm/kernel/cpuidle.c23
-rw-r--r--arch/arm/kernel/devtree.c3
-rw-r--r--arch/arm/kernel/entry-armv.S19
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/entry-header.S12
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/process.c14
-rw-r--r--arch/arm/kernel/ptrace.c15
-rw-r--r--arch/arm/kernel/setup.c12
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kernel/smp_tlb.c44
-rw-r--r--arch/arm/kernel/smp_twd.c34
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
-rw-r--r--arch/arm/kvm/arm.c1
-rw-r--r--arch/arm/lib/Makefile5
-rw-r--r--arch/arm/lib/delay-loop.S15
-rw-r--r--arch/arm/mach-artpec/board-artpec6.c3
-rw-r--r--arch/arm/mach-at91/at91rm9200.c2
-rw-r--r--arch/arm/mach-at91/at91sam9.c2
-rw-r--r--arch/arm/mach-at91/sama5.c2
-rw-r--r--arch/arm/mach-bcm/Kconfig2
-rw-r--r--arch/arm/mach-bcm/board_bcm21664.c2
-rw-r--r--arch/arm/mach-bcm/board_bcm281xx.c2
-rw-r--r--arch/arm/mach-bcm/board_bcm2835.c10
-rw-r--r--arch/arm/mach-cns3xxx/core.c3
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/exynos.c3
-rw-r--r--arch/arm/mach-highbank/highbank.c3
-rw-r--r--arch/arm/mach-imx/mach-imx51.c2
-rw-r--r--arch/arm/mach-imx/mach-imx53.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6sl.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6sx.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c3
-rw-r--r--arch/arm/mach-imx/mach-imx7d.c1
-rw-r--r--arch/arm/mach-integrator/Kconfig2
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c3
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c3
-rw-r--r--arch/arm/mach-keystone/Kconfig2
-rw-r--r--arch/arm/mach-keystone/keystone.c1
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c3
-rw-r--r--arch/arm/mach-moxart/Kconfig2
-rw-r--r--arch/arm/mach-mvebu/Makefile10
-rw-r--r--arch/arm/mach-mvebu/board-v7.c3
-rw-r--r--arch/arm/mach-mvebu/coherency.c42
-rw-r--r--arch/arm/mach-mvebu/dove.c2
-rw-r--r--arch/arm/mach-mvebu/kirkwood.c2
-rw-r--r--arch/arm/mach-mxs/Kconfig2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c3
-rw-r--r--arch/arm/mach-nspire/Kconfig1
-rw-r--r--arch/arm/mach-nspire/nspire.c3
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S6
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c5
-rw-r--r--arch/arm/mach-omap1/board-osk.c2
-rw-r--r--arch/arm/mach-omap1/include/mach/ams-delta-fiq.h2
-rw-r--r--arch/arm/mach-omap2/Kconfig12
-rw-r--r--arch/arm/mach-omap2/omap-secure.h1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c48
-rw-r--r--arch/arm/mach-omap2/powerdomain.c9
-rw-r--r--arch/arm/mach-omap2/powerdomains7xx_data.c76
-rw-r--r--arch/arm/mach-omap2/timer.c7
-rw-r--r--arch/arm/mach-orion5x/board-dt.c3
-rw-r--r--arch/arm/mach-picoxcell/common.c2
-rw-r--r--arch/arm/mach-prima2/Kconfig2
-rw-r--r--arch/arm/mach-pxa/spitz.c2
-rw-r--r--arch/arm/mach-rockchip/rockchip.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-s3c2416-dt.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c3
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c3
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c2
-rw-r--r--arch/arm/mach-spear/spear1310.c2
-rw-r--r--arch/arm/mach-spear/spear1340.c1
-rw-r--r--arch/arm/mach-spear/spear300.c3
-rw-r--r--arch/arm/mach-spear/spear310.c3
-rw-r--r--arch/arm/mach-spear/spear320.c3
-rw-r--r--arch/arm/mach-spear/spear6xx.c3
-rw-r--r--arch/arm/mach-tegra/tegra.c2
-rw-r--r--arch/arm/mach-u300/Kconfig2
-rw-r--r--arch/arm/mach-u300/core.c3
-rw-r--r--arch/arm/mach-versatile/versatile_dt.c3
-rw-r--r--arch/arm/mach-vexpress/spc.c2
-rw-r--r--arch/arm/mach-vt8500/vt8500.c3
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/mm/cache-l2x0.c27
-rw-r--r--arch/arm/mm/dma-mapping.c144
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/pgd.c2
-rw-r--r--arch/arm/mm/proc-v7.S43
-rw-r--r--arch/arm/plat-samsung/devs.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c28
-rw-r--r--arch/arm/xen/Makefile1
-rw-r--r--arch/arm/xen/efi.c40
-rw-r--r--arch/arm/xen/enlighten.c194
-rw-r--r--arch/arm/xen/hypercall.S1
-rw-r--r--arch/arm64/Kconfig38
-rw-r--r--arch/arm64/Kconfig.debug25
-rw-r--r--arch/arm64/Makefile17
-rw-r--r--arch/arm64/boot/Makefile2
-rw-r--r--arch/arm64/boot/dts/apm/apm-merlin.dts6
-rw-r--r--arch/arm64/boot/dts/apm/apm-mustang.dts12
-rw-r--r--arch/arm64/boot/dts/apm/apm-shadowcat.dtsi11
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi26
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts16
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi39
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi43
-rw-r--r--arch/arm64/boot/dts/lg/lg1312.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi62
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi10
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h10
-rw-r--r--arch/arm64/include/asm/alternative.h16
-rw-r--r--arch/arm64/include/asm/assembler.h12
-rw-r--r--arch/arm64/include/asm/atomic.h60
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h110
-rw-r--r--arch/arm64/include/asm/atomic_lse.h278
-rw-r--r--arch/arm64/include/asm/barrier.h13
-rw-r--r--arch/arm64/include/asm/checksum.h51
-rw-r--r--arch/arm64/include/asm/cmpxchg.h51
-rw-r--r--arch/arm64/include/asm/cpu.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/debug-monitors.h5
-rw-r--r--arch/arm64/include/asm/efi.h7
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/esr.h1
-rw-r--r--arch/arm64/include/asm/insn.h41
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/arm64/include/asm/irqflags.h3
-rw-r--r--arch/arm64/include/asm/kexec.h48
-rw-r--r--arch/arm64/include/asm/kgdb.h45
-rw-r--r--arch/arm64/include/asm/kprobes.h62
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm64/include/asm/memory.h3
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/numa.h2
-rw-r--r--arch/arm64/include/asm/page.h12
-rw-r--r--arch/arm64/include/asm/pgalloc.h2
-rw-r--r--arch/arm64/include/asm/probes.h35
-rw-r--r--arch/arm64/include/asm/processor.h1
-rw-r--r--arch/arm64/include/asm/ptdump.h44
-rw-r--r--arch/arm64/include/asm/ptrace.h66
-rw-r--r--arch/arm64/include/asm/smp.h12
-rw-r--r--arch/arm64/include/asm/spinlock.h42
-rw-r--r--arch/arm64/include/asm/sysreg.h2
-rw-r--r--arch/arm64/include/asm/traps.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h38
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h8
-rw-r--r--arch/arm64/include/asm/virt.h5
-rw-r--r--arch/arm64/include/asm/xen/xen-ops.h6
-rw-r--r--arch/arm64/kernel/Makefile12
-rw-r--r--arch/arm64/kernel/acpi_numa.c112
-rw-r--r--arch/arm64/kernel/arm64ksyms.c6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c66
-rw-r--r--arch/arm64/kernel/asm-offsets.c18
-rw-r--r--arch/arm64/kernel/cpu-reset.S54
-rw-r--r--arch/arm64/kernel/cpu-reset.h34
-rw-r--r--arch/arm64/kernel/cpu_errata.c13
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/kernel/cpuidle.c20
-rw-r--r--arch/arm64/kernel/cpuinfo.c128
-rw-r--r--arch/arm64/kernel/debug-monitors.c47
-rw-r--r--arch/arm64/kernel/efi.c50
-rw-r--r--arch/arm64/kernel/entry.S36
-rw-r--r--arch/arm64/kernel/hibernate.c6
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm64/kernel/hyp-stub.S10
-rw-r--r--arch/arm64/kernel/insn.c133
-rw-r--r--arch/arm64/kernel/kgdb.c18
-rw-r--r--arch/arm64/kernel/machine_kexec.c212
-rw-r--r--arch/arm64/kernel/probes/Makefile3
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c174
-rw-r--r--arch/arm64/kernel/probes/decode-insn.h35
-rw-r--r--arch/arm64/kernel/probes/kprobes.c686
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S81
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c217
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.h28
-rw-r--r--arch/arm64/kernel/ptrace.c109
-rw-r--r--arch/arm64/kernel/relocate_kernel.S130
-rw-r--r--arch/arm64/kernel/setup.c26
-rw-r--r--arch/arm64/kernel/smp.c30
-rw-r--r--arch/arm64/kernel/traps.c183
-rw-r--r--arch/arm64/kernel/vdso.c8
-rw-r--r--arch/arm64/kernel/vdso/Makefile7
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S331
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S9
-rw-r--r--arch/arm64/kvm/handle_exit.c4
-rw-r--r--arch/arm64/kvm/hyp/Makefile4
-rw-r--r--arch/arm64/kvm/hyp/switch.c2
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c8
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c36
-rw-r--r--arch/arm64/kvm/sys_regs.c13
-rw-r--r--arch/arm64/lib/copy_from_user.S4
-rw-r--r--arch/arm64/lib/copy_to_user.S4
-rw-r--r--arch/arm64/mm/cache.S2
-rw-r--r--arch/arm64/mm/context.c9
-rw-r--r--arch/arm64/mm/dma-mapping.c37
-rw-r--r--arch/arm64/mm/dump.c40
-rw-r--r--arch/arm64/mm/fault.c50
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
-rw-r--r--arch/arm64/mm/init.c15
-rw-r--r--arch/arm64/mm/mmu.c154
-rw-r--r--arch/arm64/mm/numa.c28
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/arm64/net/bpf_jit.h3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c111
-rw-r--r--arch/arm64/xen/Makefile1
-rw-r--r--arch/arm64/xen/hypercall.S1
-rw-r--r--arch/avr32/include/asm/atomic.h54
-rw-r--r--arch/avr32/include/asm/pgalloc.h6
-rw-r--r--arch/avr32/include/uapi/asm/unistd.h646
-rw-r--r--arch/avr32/kernel/syscall-stubs.S18
-rw-r--r--arch/avr32/kernel/syscall_table.S662
-rw-r--r--arch/avr32/mach-at32ap/pio.c2
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/blackfin/include/asm/atomic.h8
-rw-r--r--arch/blackfin/include/asm/spinlock.h5
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c1
-rw-r--r--arch/blackfin/kernel/perf_event.c26
-rw-r--r--arch/blackfin/mach-bf561/atomic.S43
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c2
-rw-r--r--arch/c6x/platforms/Makefile2
-rw-r--r--arch/c6x/platforms/platform.c17
-rw-r--r--arch/cris/include/asm/pgalloc.h4
-rw-r--r--arch/cris/kernel/setup.c8
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/include/asm/atomic.h30
-rw-r--r--arch/frv/include/asm/atomic_defs.h2
-rw-r--r--arch/frv/include/asm/serial.h4
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/frv/mm/pgalloc.c6
-rw-r--r--arch/h8300/include/asm/atomic.h29
-rw-r--r--arch/hexagon/Kconfig3
-rw-r--r--arch/hexagon/include/asm/atomic.h31
-rw-r--r--arch/hexagon/include/asm/pgalloc.h4
-rw-r--r--arch/hexagon/include/asm/spinlock.h10
-rw-r--r--arch/hexagon/mm/vm_fault.c2
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/include/asm/acpi.h3
-rw-r--r--arch/ia64/include/asm/atomic.h130
-rw-r--r--arch/ia64/include/asm/mutex.h2
-rw-r--r--arch/ia64/include/asm/rwsem.h31
-rw-r--r--arch/ia64/include/asm/spinlock.h4
-rw-r--r--arch/ia64/include/asm/thread_info.h8
-rw-r--r--arch/ia64/include/asm/tlb.h31
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/init_task.c1
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/m32r/boot/compressed/m32r_sio.c9
-rw-r--r--arch/m32r/include/asm/atomic.h36
-rw-r--r--arch/m32r/include/asm/spinlock.h9
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c3
-rw-r--r--arch/m32r/lib/Makefile4
-rw-r--r--arch/m32r/lib/libgcc.h23
-rw-r--r--arch/m32r/lib/ucmpdi2.c17
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/coldfire/head.S2
-rw-r--r--arch/m68k/coldfire/m5272.c2
-rw-r--r--arch/m68k/coldfire/pci.c2
-rw-r--r--arch/m68k/configs/amiga_defconfig4
-rw-r--r--arch/m68k/configs/apollo_defconfig4
-rw-r--r--arch/m68k/configs/atari_defconfig4
-rw-r--r--arch/m68k/configs/bvme6000_defconfig4
-rw-r--r--arch/m68k/configs/hp300_defconfig4
-rw-r--r--arch/m68k/configs/mac_defconfig4
-rw-r--r--arch/m68k/configs/multi_defconfig4
-rw-r--r--arch/m68k/configs/mvme147_defconfig4
-rw-r--r--arch/m68k/configs/mvme16x_defconfig4
-rw-r--r--arch/m68k/configs/q40_defconfig4
-rw-r--r--arch/m68k/configs/sun3_defconfig4
-rw-r--r--arch/m68k/configs/sun3x_defconfig4
-rw-r--r--arch/m68k/ifpsp060/src/fpsp.S8
-rw-r--r--arch/m68k/ifpsp060/src/pfpsp.S4
-rw-r--r--arch/m68k/include/asm/atomic.h44
-rw-r--r--arch/m68k/include/asm/dma.h2
-rw-r--r--arch/m68k/include/asm/m525xsim.h4
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h4
-rw-r--r--arch/m68k/include/asm/mcfmmu.h2
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h4
-rw-r--r--arch/m68k/include/asm/q40_master.h2
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h4
-rw-r--r--arch/m68k/mac/iop.c2
-rw-r--r--arch/m68k/math-emu/fp_decode.h2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h36
-rw-r--r--arch/metag/include/asm/atomic_lock1.h33
-rw-r--r--arch/metag/include/asm/pgalloc.h5
-rw-r--r--arch/metag/include/asm/spinlock.h14
-rw-r--r--arch/metag/kernel/perf/perf_event.c26
-rw-r--r--arch/metag/kernel/setup.c5
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/pgalloc.h4
-rw-r--r--arch/microblaze/kernel/timer.c49
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/microblaze/mm/pgtable.c3
-rw-r--r--arch/mips/ath79/setup.c2
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig2
-rw-r--r--arch/mips/configs/maltaaprp_defconfig2
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig2
-rw-r--r--arch/mips/configs/maltaup_defconfig2
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig2
-rw-r--r--arch/mips/include/asm/atomic.h154
-rw-r--r--arch/mips/include/asm/kvm_host.h3
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mpi-defs.h328
-rw-r--r--arch/mips/include/asm/pgalloc.h6
-rw-r--r--arch/mips/include/asm/pgtable.h10
-rw-r--r--arch/mips/include/asm/spinlock.h19
-rw-r--r--arch/mips/jz4740/setup.c8
-rw-r--r--arch/mips/kernel/ptrace.c9
-rw-r--r--arch/mips/kvm/emulate.c19
-rw-r--r--arch/mips/kvm/interrupt.h1
-rw-r--r--arch/mips/kvm/locore.S1
-rw-r--r--arch/mips/kvm/mips.c11
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c8
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c35
-rw-r--r--arch/mips/pic32/pic32mzda/init.c3
-rw-r--r--arch/mips/pistachio/init.c13
-rw-r--r--arch/mips/ralink/cevt-rt3352.c17
-rw-r--r--arch/mips/txx9/generic/setup.c2
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c2
-rw-r--r--arch/mips/xilfpga/init.c13
-rw-r--r--arch/mn10300/include/asm/atomic.h33
-rw-r--r--arch/mn10300/include/asm/spinlock.h8
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/kernel/kgdb.c3
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/mn10300/mm/pgtable.c6
-rw-r--r--arch/nios2/include/asm/pgalloc.h5
-rw-r--r--arch/nios2/kernel/time.c63
-rw-r--r--arch/nios2/mm/fault.c2
-rw-r--r--arch/nios2/platform/platform.c4
-rw-r--r--arch/openrisc/Kconfig2
-rw-r--r--arch/openrisc/include/asm/pgalloc.h2
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig2
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig2
-rw-r--r--arch/parisc/include/asm/atomic.h63
-rw-r--r--arch/parisc/include/asm/pgalloc.h7
-rw-r--r--arch/parisc/include/asm/spinlock.h9
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/kernel/processor.c5
-rw-r--r--arch/parisc/kernel/ptrace.c9
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/parisc/kernel/unaligned.c13
-rw-r--r--arch/parisc/kernel/unwind.c22
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/Kconfig72
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/Makefile7
-rw-r--r--arch/powerpc/boot/Makefile8
-rw-r--r--arch/powerpc/boot/dts/ac14xx.dts2
-rw-r--r--arch/powerpc/boot/dts/akebono.dts2
-rw-r--r--arch/powerpc/boot/dts/bluestone.dts2
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts2
-rw-r--r--arch/powerpc/boot/dts/currituck.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/kmcoge4.dts37
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8569mds.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/mvme7100.dts153
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022rdk.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi49
-rw-r--r--arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi38
-rw-r--r--arch/powerpc/boot/dts/fsl/t104xqds.dtsi38
-rw-r--r--arch/powerpc/boot/dts/fsl/t104xrdb.dtsi38
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi2
-rw-r--r--arch/powerpc/boot/dts/glacier.dts2
-rw-r--r--arch/powerpc/boot/dts/icon.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc5121ads.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc836x_rdk.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/pdm360ng.dts2
-rw-r--r--arch/powerpc/boot/dts/sam440ep.dts2
-rw-r--r--arch/powerpc/boot/dts/xcalibur1501.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5200.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5200_xmon.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5301.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5330.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5370.dts2
-rw-r--r--arch/powerpc/boot/motload-head.S11
-rw-r--r--arch/powerpc/boot/mvme7100.c59
-rw-r--r--arch/powerpc/boot/opal-calls.S58
-rw-r--r--arch/powerpc/boot/opal.c98
-rw-r--r--arch/powerpc/boot/ops.h1
-rw-r--r--arch/powerpc/boot/ppc_asm.h4
-rw-r--r--arch/powerpc/boot/ppcboot.h2
-rw-r--r--arch/powerpc/boot/serial.c2
-rw-r--r--arch/powerpc/boot/types.h10
-rwxr-xr-xarch/powerpc/boot/wrapper5
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig1
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig1
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig1
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig3
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig1
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig1
-rw-r--r--arch/powerpc/configs/40x/virtex_defconfig1
-rw-r--r--arch/powerpc/configs/40x/walnut_defconfig1
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig8
-rw-r--r--arch/powerpc/configs/44x/arches_defconfig1
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig1
-rw-r--r--arch/powerpc/configs/44x/bluestone_defconfig2
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig1
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig8
-rw-r--r--arch/powerpc/configs/44x/ebony_defconfig1
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig2
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig4
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig8
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig1
-rw-r--r--arch/powerpc/configs/44x/rainier_defconfig1
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig2
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig6
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig1
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig1
-rw-r--r--arch/powerpc/configs/44x/virtex5_defconfig1
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig5
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig4
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig4
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig4
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig4
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/asp8347_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_rdk_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/sbc834x_defconfig5
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/kmp204x_defconfig3
-rw-r--r--arch/powerpc/configs/85xx/ksi8560_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/mpc8540_ads_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/mpc8560_ads_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/sbc8548_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/socrates_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/stx_gp3_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8540_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8541_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/tqm8555_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8560_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig4
-rw-r--r--arch/powerpc/configs/86xx-hw.config4
-rw-r--r--arch/powerpc/configs/adder875_defconfig1
-rw-r--r--arch/powerpc/configs/amigaone_defconfig2
-rw-r--r--arch/powerpc/configs/c2k_defconfig8
-rw-r--r--arch/powerpc/configs/cell_defconfig2
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/ep8248e_defconfig5
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig1
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config4
-rw-r--r--arch/powerpc/configs/g5_defconfig7
-rw-r--r--arch/powerpc/configs/gamecube_defconfig5
-rw-r--r--arch/powerpc/configs/holly_defconfig3
-rw-r--r--arch/powerpc/configs/linkstation_defconfig4
-rw-r--r--arch/powerpc/configs/maple_defconfig5
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig1
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig8
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig4
-rw-r--r--arch/powerpc/configs/mpc7448_hpc2_defconfig4
-rw-r--r--arch/powerpc/configs/mpc8272_ads_defconfig4
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig4
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig4
-rw-r--r--arch/powerpc/configs/mpc86xx_basic_defconfig1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig4
-rw-r--r--arch/powerpc/configs/pasemi_defconfig3
-rw-r--r--arch/powerpc/configs/pmac32_defconfig7
-rw-r--r--arch/powerpc/configs/powernv_defconfig8
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig4
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig4
-rw-r--r--arch/powerpc/configs/ppc64_defconfig6
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig6
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig11
-rw-r--r--arch/powerpc/configs/pq2fads_defconfig5
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig7
-rw-r--r--arch/powerpc/configs/storcenter_defconfig4
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig1
-rw-r--r--arch/powerpc/configs/wii_defconfig5
-rw-r--r--arch/powerpc/crypto/Makefile2
-rw-r--r--arch/powerpc/crypto/aes-spe-regs.h2
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_asm.S1553
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c167
-rw-r--r--arch/powerpc/include/asm/accounting.h24
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h75
-rw-r--r--arch/powerpc/include/asm/atomic.h83
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h51
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h100
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h14
-rw-r--r--arch/powerpc/include/asm/book3s/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/code-patching.h10
-rw-r--r--arch/powerpc/include/asm/cpufeature.h40
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/cputime.h14
-rw-r--r--arch/powerpc/include/asm/eeh.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h4
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h4
-rw-r--r--arch/powerpc/include/asm/firmware.h6
-rw-r--r--arch/powerpc/include/asm/fixmap.h7
-rw-r--r--arch/powerpc/include/asm/ftrace.h8
-rw-r--r--arch/powerpc/include/asm/hvcall.h11
-rw-r--r--arch/powerpc/include/asm/hw_irq.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h1
-rw-r--r--arch/powerpc/include/asm/kprobes.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/linkage.h6
-rw-r--r--arch/powerpc/include/asm/machdep.h44
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h9
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h2
-rw-r--r--arch/powerpc/include/asm/mutex.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-44x.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h10
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-64k.h1
-rw-r--r--arch/powerpc/include/asm/opal-api.h46
-rw-r--r--arch/powerpc/include/asm/opal.h34
-rw-r--r--arch/powerpc/include/asm/paca.h9
-rw-r--r--arch/powerpc/include/asm/page.h6
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h15
-rw-r--r--arch/powerpc/include/asm/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h2
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h47
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h59
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h2
-rw-r--r--arch/powerpc/include/asm/ppc4xx.h2
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h31
-rw-r--r--arch/powerpc/include/asm/processor.h7
-rw-r--r--arch/powerpc/include/asm/ps3.h2
-rw-r--r--arch/powerpc/include/asm/ps3av.h2
-rw-r--r--arch/powerpc/include/asm/pte-common.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/asm/reg.h94
-rw-r--r--arch/powerpc/include/asm/rtas.h7
-rw-r--r--arch/powerpc/include/asm/sections.h4
-rw-r--r--arch/powerpc/include/asm/setup.h12
-rw-r--r--arch/powerpc/include/asm/smp.h9
-rw-r--r--arch/powerpc/include/asm/smu.h9
-rw-r--r--arch/powerpc/include/asm/spinlock.h38
-rw-r--r--arch/powerpc/include/asm/string.h4
-rw-r--r--arch/powerpc/include/asm/synch.h1
-rw-r--r--arch/powerpc/include/asm/tce.h3
-rw-r--r--arch/powerpc/include/asm/thread_info.h4
-rw-r--r--arch/powerpc/include/asm/time.h6
-rw-r--r--arch/powerpc/include/asm/tsi108.h2
-rw-r--r--arch/powerpc/include/asm/types.h8
-rw-r--r--arch/powerpc/include/asm/xics.h6
-rw-r--r--arch/powerpc/kernel/Makefile5
-rw-r--r--arch/powerpc/kernel/align.c18
-rw-r--r--arch/powerpc/kernel/asm-offsets.c80
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S2
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S20
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/crash.c15
-rw-r--r--arch/powerpc/kernel/eeh_cache.c8
-rw-r--r--arch/powerpc/kernel/eeh_dev.c17
-rw-r--r--arch/powerpc/kernel/eeh_driver.c11
-rw-r--r--arch/powerpc/kernel/entry_32.S17
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S73
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/kernel/ftrace.c39
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/head_8xx.S159
-rw-r--r--arch/powerpc/kernel/idle_book3s.S (renamed from arch/powerpc/kernel/idle_power7.S)371
-rw-r--r--arch/powerpc/kernel/iomap.c24
-rw-r--r--arch/powerpc/kernel/irq.c17
-rw-r--r--arch/powerpc/kernel/kprobes.c17
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c10
-rw-r--r--arch/powerpc/kernel/misc_32.S14
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/module_64.c9
-rw-r--r--arch/powerpc/kernel/nvram_64.c4
-rw-r--r--arch/powerpc/kernel/pci-common.c82
-rw-r--r--arch/powerpc/kernel/pci_64.c3
-rw-r--r--arch/powerpc/kernel/pci_dn.c34
-rw-r--r--arch/powerpc/kernel/process.c58
-rw-r--r--arch/powerpc/kernel/prom.c25
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c50
-rw-r--r--arch/powerpc/kernel/rtas-proc.c2
-rw-r--r--arch/powerpc/kernel/rtas.c8
-rw-r--r--arch/powerpc/kernel/rtasd.c28
-rw-r--r--arch/powerpc/kernel/setup-common.c189
-rw-r--r--arch/powerpc/kernel/setup.h58
-rw-r--r--arch/powerpc/kernel/setup_32.c113
-rw-r--r--arch/powerpc/kernel/setup_64.c287
-rw-r--r--arch/powerpc/kernel/signal_64.c11
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/powerpc/kernel/time.c148
-rw-r--r--arch/powerpc/kernel/tm.S64
-rw-r--r--arch/powerpc/kernel/traps.c13
-rw-r--r--arch/powerpc/kernel/vector.S9
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c18
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S4
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c6
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S2
-rw-r--r--arch/powerpc/lib/checksum_64.S12
-rw-r--r--arch/powerpc/lib/feature-fixups.c32
-rw-r--r--arch/powerpc/lib/locks.c16
-rw-r--r--arch/powerpc/lib/ppc_ksyms.c4
-rw-r--r--arch/powerpc/lib/rheap.c2
-rw-r--r--arch/powerpc/lib/string.S44
-rw-r--r--arch/powerpc/lib/vmx-helper.c1
-rw-r--r--arch/powerpc/mm/8xx_mmu.c131
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/hash64_4k.c18
-rw-r--r--arch/powerpc/mm/hash64_64k.c39
-rw-r--r--arch/powerpc/mm/hash_native_64.c50
-rw-r--r--arch/powerpc/mm/hash_utils_64.c189
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c17
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c9
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/mem.c18
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c7
-rw-r--r--arch/powerpc/mm/mmu_decl.h3
-rw-r--r--arch/powerpc/mm/numa.c84
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c5
-rw-r--r--arch/powerpc/mm/pgtable-radix.c50
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c3
-rw-r--r--arch/powerpc/mm/tlb-radix.c157
-rw-r--r--arch/powerpc/net/Makefile4
-rw-r--r--arch/powerpc/net/bpf_jit.h235
-rw-r--r--arch/powerpc/net/bpf_jit32.h139
-rw-r--r--arch/powerpc/net/bpf_jit64.h102
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S2
-rw-r--r--arch/powerpc/net/bpf_jit_asm64.S180
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c10
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c954
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c2
-rw-r--r--arch/powerpc/perf/Makefile2
-rw-r--r--arch/powerpc/perf/core-book3s.c32
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/perf/hv-24x7.h2
-rw-r--r--arch/powerpc/perf/isa207-common.c263
-rw-r--r--arch/powerpc/perf/isa207-common.h236
-rw-r--r--arch/powerpc/perf/power8-pmu.c477
-rw-r--r--arch/powerpc/perf/power9-events-list.h55
-rw-r--r--arch/powerpc/perf/power9-pmu.c330
-rw-r--r--arch/powerpc/platforms/40x/Kconfig2
-rw-r--r--arch/powerpc/platforms/40x/ep405.c4
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/40x/virtex.c4
-rw-r--r--arch/powerpc/platforms/40x/walnut.c4
-rw-r--r--arch/powerpc/platforms/44x/Kconfig2
-rw-r--r--arch/powerpc/platforms/44x/canyonlands.c5
-rw-r--r--arch/powerpc/platforms/44x/ebony.c4
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c4
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c3
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c10
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c4
-rw-r--r--arch/powerpc/platforms/44x/virtex.c4
-rw-r--r--arch/powerpc/platforms/44x/warp.c4
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c4
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads.c8
-rw-r--r--arch/powerpc/platforms/512x/mpc512x.h2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_generic.c8
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c2
-rw-r--r--arch/powerpc/platforms/512x/pdm360ng.c8
-rw-r--r--arch/powerpc/platforms/52xx/efika.c3
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c3
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c3
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c3
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2.h2
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c3
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig3
-rw-r--r--arch/powerpc/platforms/83xx/asp834x.c3
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c3
-rw-r--r--arch/powerpc/platforms/83xx/misc.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h2
-rw-r--r--arch/powerpc/platforms/83xx/sbc834x.c4
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_qds.c4
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_rdb.c4
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c4
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c5
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c7
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c16
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c12
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c43
-rw-r--r--arch/powerpc/platforms/85xx/mvme2500.c4
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c6
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c4
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rdb.c4
-rw-r--r--arch/powerpc/platforms/85xx/ppa8548.c4
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c4
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c4
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c4
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c4
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c2
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c4
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c12
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig15
-rw-r--r--arch/powerpc/platforms/86xx/Makefile1
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c4
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c4
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c6
-rw-r--r--arch/powerpc/platforms/86xx/mvme7100.c121
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c4
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/8xx/adder875.c3
-rw-r--r--arch/powerpc/platforms/8xx/ep88xc.c3
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc86xads_setup.c3
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c3
-rw-r--r--arch/powerpc/platforms/8xx/mpc8xx.h2
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c4
-rw-r--r--arch/powerpc/platforms/Kconfig8
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c6
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c72
-rw-r--r--arch/powerpc/platforms/cell/iommu.c4
-rw-r--r--arch/powerpc/platforms/cell/setup.c7
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c7
-rw-r--r--arch/powerpc/platforms/embedded6xx/c2k.c10
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c19
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c12
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c19
-rw-r--r--arch/powerpc/platforms/maple/pci.c34
-rw-r--r--arch/powerpc/platforms/maple/setup.c37
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c15
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c3
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c18
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c38
-rw-r--r--arch/powerpc/platforms/powermac/setup.c54
-rw-r--r--arch/powerpc/platforms/powermac/smp.c3
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c49
-rw-r--r--arch/powerpc/platforms/powernv/idle.c188
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-async.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-sysparam.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-tracepoints.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S72
-rw-r--r--arch/powerpc/platforms/powernv/opal.c30
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c385
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c905
-rw-r--r--arch/powerpc/platforms/powernv/pci.c132
-rw-r--r--arch/powerpc/platforms/powernv/pci.h43
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h1
-rw-r--r--arch/powerpc/platforms/powernv/setup.c16
-rw-r--r--arch/powerpc/platforms/powernv/smp.c4
-rw-r--r--arch/powerpc/platforms/ps3/htab.c12
-rw-r--r--arch/powerpc/platforms/ps3/repository.c2
-rw-r--r--arch/powerpc/platforms/ps3/setup.c23
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c56
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c51
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c53
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c48
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c13
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c184
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c57
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c23
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c41
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--arch/powerpc/platforms/pseries/power.c2
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h21
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c8
-rw-r--r--arch/powerpc/platforms/pseries/ras.c39
-rw-r--r--arch/powerpc/platforms/pseries/setup.c241
-rw-r--r--arch/powerpc/platforms/pseries/smp.c31
-rw-r--r--arch/powerpc/sysdev/axonram.c7
-rw-r--r--arch/powerpc/sysdev/cpm_common.c22
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c184
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c8
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c8
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h6
-rw-r--r--arch/powerpc/sysdev/xics/Makefile2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c144
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c5
-rw-r--r--arch/powerpc/xmon/xmon.c107
-rw-r--r--arch/s390/Kconfig16
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/configs/default_defconfig45
-rw-r--r--arch/s390/configs/gcov_defconfig35
-rw-r--r--arch/s390/configs/performance_defconfig37
-rw-r--r--arch/s390/configs/zfcpdump_defconfig4
-rw-r--r--arch/s390/crypto/Makefile3
-rw-r--r--arch/s390/crypto/aes_s390.c113
-rw-r--r--arch/s390/crypto/crc32-vx.c310
-rw-r--r--arch/s390/crypto/crc32be-vx.S207
-rw-r--r--arch/s390/crypto/crc32le-vx.S268
-rw-r--r--arch/s390/defconfig48
-rw-r--r--arch/s390/hypfs/hypfs_diag.c14
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/atomic.h40
-rw-r--r--arch/s390/include/asm/cache.h5
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/cpu_mf.h17
-rw-r--r--arch/s390/include/asm/diag.h2
-rw-r--r--arch/s390/include/asm/etr.h261
-rw-r--r--arch/s390/include/asm/fcx.h2
-rw-r--r--arch/s390/include/asm/fpu/api.h77
-rw-r--r--arch/s390/include/asm/fpu/types.h10
-rw-r--r--arch/s390/include/asm/hugetlb.h5
-rw-r--r--arch/s390/include/asm/ipl.h10
-rw-r--r--arch/s390/include/asm/irq.h7
-rw-r--r--arch/s390/include/asm/jump_label.h1
-rw-r--r--arch/s390/include/asm/kprobes.h4
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/mathemu.h28
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h15
-rw-r--r--arch/s390/include/asm/page.h8
-rw-r--r--arch/s390/include/asm/perf_event.h12
-rw-r--r--arch/s390/include/asm/pgtable.h272
-rw-r--r--arch/s390/include/asm/processor.h17
-rw-r--r--arch/s390/include/asm/rwsem.h37
-rw-r--r--arch/s390/include/asm/sections.h1
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/asm/sfp-machine.h142
-rw-r--r--arch/s390/include/asm/sfp-util.h67
-rw-r--r--arch/s390/include/asm/sigp.h17
-rw-r--r--arch/s390/include/asm/spinlock.h3
-rw-r--r--arch/s390/include/asm/stp.h51
-rw-r--r--arch/s390/include/asm/timex.h66
-rw-r--r--arch/s390/include/asm/tlb.h22
-rw-r--r--arch/s390/include/asm/tlbflush.h31
-rw-r--r--arch/s390/include/asm/topology.h4
-rw-r--r--arch/s390/include/asm/uaccess.h65
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h6
-rw-r--r--arch/s390/kernel/Makefile5
-rw-r--r--arch/s390/kernel/cache.c7
-rw-r--r--arch/s390/kernel/dis.c1
-rw-r--r--arch/s390/kernel/dumpstack.c12
-rw-r--r--arch/s390/kernel/early.c22
-rw-r--r--arch/s390/kernel/entry.S11
-rw-r--r--arch/s390/kernel/fpu.c249
-rw-r--r--arch/s390/kernel/ipl.c32
-rw-r--r--arch/s390/kernel/irq.c7
-rw-r--r--arch/s390/kernel/machine_kexec.c55
-rw-r--r--arch/s390/kernel/nmi.c13
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c54
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c59
-rw-r--r--arch/s390/kernel/perf_event.c30
-rw-r--r--arch/s390/kernel/processor.c114
-rw-r--r--arch/s390/kernel/ptrace.c30
-rw-r--r--arch/s390/kernel/setup.c40
-rw-r--r--arch/s390/kernel/smp.c18
-rw-r--r--arch/s390/kernel/sysinfo.c63
-rw-r--r--arch/s390/kernel/time.c1053
-rw-r--r--arch/s390/kernel/topology.c89
-rw-r--r--arch/s390/kernel/vdso32/Makefile2
-rw-r--r--arch/s390/kernel/vdso64/Makefile2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S21
-rw-r--r--arch/s390/kvm/intercept.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c5
-rw-r--r--arch/s390/lib/string.c50
-rw-r--r--arch/s390/lib/uaccess.c6
-rw-r--r--arch/s390/mm/dump_pagetables.c2
-rw-r--r--arch/s390/mm/fault.c5
-rw-r--r--arch/s390/mm/gmap.c7
-rw-r--r--arch/s390/mm/gup.c45
-rw-r--r--arch/s390/mm/hugetlbpage.c129
-rw-r--r--arch/s390/mm/init.c13
-rw-r--r--arch/s390/mm/page-states.c13
-rw-r--r--arch/s390/mm/pageattr.c267
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/mm/pgtable.c95
-rw-r--r--arch/s390/mm/vmem.c73
-rw-r--r--arch/s390/net/bpf_jit.h4
-rw-r--r--arch/s390/net/bpf_jit_comp.c4
-rw-r--r--arch/s390/numa/mode_emu.c25
-rw-r--r--arch/s390/oprofile/Makefile1
-rw-r--r--arch/s390/oprofile/hwsampler.c1178
-rw-r--r--arch/s390/oprofile/hwsampler.h63
-rw-r--r--arch/s390/oprofile/init.c489
-rw-r--r--arch/s390/oprofile/op_counter.h21
-rw-r--r--arch/s390/pci/pci_dma.c4
-rw-r--r--arch/s390/pci/pci_event.c3
-rw-r--r--arch/s390/pci/pci_insn.c12
-rw-r--r--arch/score/include/asm/pgalloc.h5
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/Kconfig11
-rw-r--r--arch/sh/boards/Kconfig17
-rw-r--r--arch/sh/boards/mach-highlander/Kconfig2
-rw-r--r--arch/sh/boards/mach-rsk/Kconfig6
-rw-r--r--arch/sh/boards/of-generic.c16
-rw-r--r--arch/sh/include/asm/atomic-grb.h34
-rw-r--r--arch/sh/include/asm/atomic-irq.h31
-rw-r--r--arch/sh/include/asm/atomic-llsc.h32
-rw-r--r--arch/sh/include/asm/pgalloc.h4
-rw-r--r--arch/sh/include/asm/spinlock.h10
-rw-r--r--arch/sh/include/asm/tlb.h20
-rw-r--r--arch/sh/kernel/perf_event.c23
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sh/mm/pgtable.c2
-rw-r--r--arch/sparc/include/asm/atomic_32.h13
-rw-r--r--arch/sparc/include/asm/atomic_64.h16
-rw-r--r--arch/sparc/include/asm/head_64.h4
-rw-r--r--arch/sparc/include/asm/hugetlb.h12
-rw-r--r--arch/sparc/include/asm/mmu_64.h3
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h6
-rw-r--r--arch/sparc/include/asm/pgtable_64.h7
-rw-r--r--arch/sparc/include/asm/spinlock_32.h7
-rw-r--r--arch/sparc/include/asm/spinlock_64.h10
-rw-r--r--arch/sparc/include/asm/tsb.h2
-rw-r--r--arch/sparc/include/asm/ttable.h8
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/dtlb_prot.S4
-rw-r--r--arch/sparc/kernel/irq_32.c4
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/ktlb.S12
-rw-r--r--arch/sparc/kernel/rtrap_64.S57
-rw-r--r--arch/sparc/kernel/signal32.c46
-rw-r--r--arch/sparc/kernel/signal_32.c41
-rw-r--r--arch/sparc/kernel/signal_64.c31
-rw-r--r--arch/sparc/kernel/sigutil_32.c9
-rw-r--r--arch/sparc/kernel/sigutil_64.c10
-rw-r--r--arch/sparc/kernel/tsb.S12
-rw-r--r--arch/sparc/kernel/urtt_fill.S98
-rw-r--r--arch/sparc/lib/atomic32.c29
-rw-r--r--arch/sparc/lib/atomic_64.S61
-rw-r--r--arch/sparc/lib/ksyms.c17
-rw-r--r--arch/sparc/mm/fault_32.c4
-rw-r--r--arch/sparc/mm/fault_64.c12
-rw-r--r--arch/sparc/mm/hugetlbpage.c170
-rw-r--r--arch/sparc/mm/init_64.c23
-rw-r--r--arch/sparc/mm/tlb.c4
-rw-r--r--arch/sparc/mm/tsb.c14
-rw-r--r--arch/tile/include/asm/atomic.h2
-rw-r--r--arch/tile/include/asm/atomic_32.h74
-rw-r--r--arch/tile/include/asm/atomic_64.h115
-rw-r--r--arch/tile/include/asm/barrier.h7
-rw-r--r--arch/tile/include/asm/bitops_32.h18
-rw-r--r--arch/tile/include/asm/elf.h1
-rw-r--r--arch/tile/include/asm/futex.h14
-rw-r--r--arch/tile/include/asm/setup.h5
-rw-r--r--arch/tile/include/asm/thread_info.h2
-rw-r--r--arch/tile/include/uapi/asm/auxvec.h2
-rw-r--r--arch/tile/kernel/compat.c35
-rw-r--r--arch/tile/kernel/process.c3
-rw-r--r--arch/tile/kernel/ptrace.c11
-rw-r--r--arch/tile/kernel/sys.c13
-rw-r--r--arch/tile/lib/atomic_32.c50
-rw-r--r--arch/tile/lib/atomic_asm_32.S27
-rw-r--r--arch/tile/lib/exports.c6
-rw-r--r--arch/tile/lib/spinlock_32.c6
-rw-r--r--arch/tile/lib/spinlock_64.c6
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/tile/mm/pgtable.c20
-rw-r--r--arch/um/drivers/ubd_kern.c7
-rw-r--r--arch/um/include/asm/tlb.h20
-rw-r--r--arch/um/kernel/mem.c4
-rw-r--r--arch/um/kernel/skas/syscall.c9
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/configs/unicore32_defconfig2
-rw-r--r--arch/unicore32/include/asm/pgalloc.h2
-rw-r--r--arch/unicore32/kernel/gpio.c2
-rw-r--r--arch/unicore32/mm/fault.c2
-rw-r--r--arch/x86/Kconfig77
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/Makefile3
-rw-r--r--arch/x86/boot/bitops.h8
-rw-r--r--arch/x86/boot/boot.h18
-rw-r--r--arch/x86/boot/compressed/Makefile18
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/boot/compressed/kaslr.c251
-rw-r--r--arch/x86/boot/compressed/misc.c49
-rw-r--r--arch/x86/boot/compressed/misc.h25
-rw-r--r--arch/x86/boot/compressed/pagetable.c29
-rw-r--r--arch/x86/boot/cpu.c2
-rw-r--r--arch/x86/boot/cpucheck.c33
-rw-r--r--arch/x86/boot/cpuflags.c1
-rw-r--r--arch/x86/boot/cpuflags.h1
-rw-r--r--arch/x86/boot/string.c2
-rw-r--r--arch/x86/crypto/Makefile4
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c94
-rw-r--r--arch/x86/crypto/chacha20_glue.c2
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c40
-rw-r--r--arch/x86/crypto/sha1-mb/Makefile (renamed from arch/x86/crypto/sha-mb/Makefile)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c (renamed from arch/x86/crypto/sha-mb/sha1_mb.c)288
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h (renamed from arch/x86/crypto/sha-mb/sha_mb_ctx.h)2
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr.h (renamed from arch/x86/crypto/sha-mb/sha_mb_mgr.h)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S (renamed from arch/x86/crypto/sha-mb/sha1_mb_mgr_datastruct.S)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S (renamed from arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c (renamed from arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c)2
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S (renamed from arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_x8_avx2.S (renamed from arch/x86/crypto/sha-mb/sha1_x8_avx2.S)0
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c6
-rw-r--r--arch/x86/crypto/sha256-mb/Makefile11
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c1030
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h136
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr.h108
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S304
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S304
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c65
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S215
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_x8_avx2.S593
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c10
-rw-r--r--arch/x86/crypto/sha512-mb/Makefile11
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c1046
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h130
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr.h104
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S281
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S291
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c67
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S222
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_x4_avx2.S529
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c6
-rw-r--r--arch/x86/entry/common.c112
-rw-r--r--arch/x86/entry/entry_32.S11
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl4
-rw-r--r--arch/x86/entry/thunk_64.S6
-rw-r--r--arch/x86/entry/vdso/Makefile7
-rw-r--r--arch/x86/entry/vdso/vdso32/sigreturn.S8
-rw-r--r--arch/x86/entry/vdso/vdso32/system_call.S7
-rw-r--r--arch/x86/entry/vdso/vma.c67
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c12
-rw-r--r--arch/x86/events/amd/core.c6
-rw-r--r--arch/x86/events/amd/ibs.c75
-rw-r--r--arch/x86/events/amd/iommu.c2
-rw-r--r--arch/x86/events/amd/power.c60
-rw-r--r--arch/x86/events/amd/uncore.c122
-rw-r--r--arch/x86/events/core.c142
-rw-r--r--arch/x86/events/intel/Makefile4
-rw-r--r--arch/x86/events/intel/core.c234
-rw-r--r--arch/x86/events/intel/cqm.c49
-rw-r--r--arch/x86/events/intel/cstate.c98
-rw-r--r--arch/x86/events/intel/lbr.c124
-rw-r--r--arch/x86/events/intel/rapl.c118
-rw-r--r--arch/x86/events/intel/uncore.c223
-rw-r--r--arch/x86/events/intel/uncore.h6
-rw-r--r--arch/x86/events/intel/uncore_snb.c67
-rw-r--r--arch/x86/events/intel/uncore_snbep.c117
-rw-r--r--arch/x86/events/msr.c63
-rw-r--r--arch/x86/events/perf_event.h12
-rw-r--r--arch/x86/include/asm/Kbuild6
-rw-r--r--arch/x86/include/asm/acpi.h3
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/apm.h6
-rw-r--r--arch/x86/include/asm/arch_hweight.h24
-rw-r--r--arch/x86/include/asm/archrandom.h132
-rw-r--r--arch/x86/include/asm/asm.h12
-rw-r--r--arch/x86/include/asm/atomic.h51
-rw-r--r--arch/x86/include/asm/atomic64_32.h25
-rw-r--r--arch/x86/include/asm/atomic64_64.h53
-rw-r--r--arch/x86/include/asm/bios_ebda.h2
-rw-r--r--arch/x86/include/asm/bitops.h50
-rw-r--r--arch/x86/include/asm/checksum_32.h3
-rw-r--r--arch/x86/include/asm/compat.h11
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h90
-rw-r--r--arch/x86/include/asm/cpufeatures.h9
-rw-r--r--arch/x86/include/asm/disabled-features.h2
-rw-r--r--arch/x86/include/asm/efi.h10
-rw-r--r--arch/x86/include/asm/fpu/internal.h5
-rw-r--r--arch/x86/include/asm/fpu/types.h7
-rw-r--r--arch/x86/include/asm/fpu/xstate.h10
-rw-r--r--arch/x86/include/asm/inat.h17
-rw-r--r--arch/x86/include/asm/insn.h12
-rw-r--r--arch/x86/include/asm/intel-family.h68
-rw-r--r--arch/x86/include/asm/intel-mid.h88
-rw-r--r--arch/x86/include/asm/kaslr.h15
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/kprobes.h11
-rw-r--r--arch/x86/include/asm/kvm_host.h11
-rw-r--r--arch/x86/include/asm/livepatch.h1
-rw-r--r--arch/x86/include/asm/local.h16
-rw-r--r--arch/x86/include/asm/microcode.h26
-rw-r--r--arch/x86/include/asm/microcode_amd.h1
-rw-r--r--arch/x86/include/asm/microcode_intel.h5
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/mutex_32.h2
-rw-r--r--arch/x86/include/asm/mutex_64.h6
-rw-r--r--arch/x86/include/asm/mwait.h2
-rw-r--r--arch/x86/include/asm/page_32_types.h3
-rw-r--r--arch/x86/include/asm/page_64_types.h11
-rw-r--r--arch/x86/include/asm/percpu.h17
-rw-r--r--arch/x86/include/asm/pgalloc.h12
-rw-r--r--arch/x86/include/asm/pgtable.h30
-rw-r--r--arch/x86/include/asm/pgtable_64.h26
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h15
-rw-r--r--arch/x86/include/asm/pgtable_types.h8
-rw-r--r--arch/x86/include/asm/pmem.h77
-rw-r--r--arch/x86/include/asm/preempt.h2
-rw-r--r--arch/x86/include/asm/processor.h23
-rw-r--r--arch/x86/include/asm/ptrace.h6
-rw-r--r--arch/x86/include/asm/pvclock.h25
-rw-r--r--arch/x86/include/asm/required-features.h2
-rw-r--r--arch/x86/include/asm/rmwcc.h20
-rw-r--r--arch/x86/include/asm/rwsem.h35
-rw-r--r--arch/x86/include/asm/signal.h6
-rw-r--r--arch/x86/include/asm/smp.h9
-rw-r--r--arch/x86/include/asm/special_insns.h46
-rw-r--r--arch/x86/include/asm/stacktrace.h6
-rw-r--r--arch/x86/include/asm/sync_bitops.h18
-rw-r--r--arch/x86/include/asm/thread_info.h9
-rw-r--r--arch/x86/include/asm/topology.h22
-rw-r--r--arch/x86/include/asm/trace/fpu.h119
-rw-r--r--arch/x86/include/asm/tsc.h5
-rw-r--r--arch/x86/include/asm/uaccess.h33
-rw-r--r--arch/x86/include/asm/unistd.h2
-rw-r--r--arch/x86/include/asm/vmx.h1
-rw-r--r--arch/x86/include/asm/x86_init.h11
-rw-r--r--arch/x86/include/asm/xen/cpuid.h5
-rw-r--r--arch/x86/include/uapi/asm/vmx.h4
-rw-r--r--arch/x86/kernel/acpi/boot.c18
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/amd_gart_64.c1
-rw-r--r--arch/x86/kernel/amd_nb.c43
-rw-r--r--arch/x86/kernel/apb_timer.c29
-rw-r--r--arch/x86/kernel/apic/apic.c10
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c4
-rw-r--r--arch/x86/kernel/apic/apic_noop.c2
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c1
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c25
-rw-r--r--arch/x86/kernel/apic/ipi.c1
-rw-r--r--arch/x86/kernel/apic/probe_32.c3
-rw-r--r--arch/x86/kernel/apic/probe_64.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c81
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c7
-rw-r--r--arch/x86/kernel/asm-offsets.c4
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c3
-rw-r--r--arch/x86/kernel/cpu/intel.c14
-rw-r--r--arch/x86/kernel/cpu/match.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c33
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c10
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c261
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_lib.c2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/cpu/rdrand.c4
-rw-r--r--arch/x86/kernel/cpu/vmware.c3
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/dumpstack.c47
-rw-r--r--arch/x86/kernel/dumpstack_32.c10
-rw-r--r--arch/x86/kernel/dumpstack_64.c26
-rw-r--r--arch/x86/kernel/early-quirks.c105
-rw-r--r--arch/x86/kernel/ebda.c114
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/fpu/core.c33
-rw-r--r--arch/x86/kernel/fpu/init.c34
-rw-r--r--arch/x86/kernel/fpu/regset.c52
-rw-r--r--arch/x86/kernel/fpu/signal.c46
-rw-r--r--arch/x86/kernel/fpu/xstate.c451
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_64.S3
-rw-r--r--arch/x86/kernel/hpet.c69
-rw-r--r--arch/x86/kernel/hw_breakpoint.c3
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c5
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/io_delay.c2
-rw-r--r--arch/x86/kernel/irq_32.c3
-rw-r--r--arch/x86/kernel/irq_64.c1
-rw-r--r--arch/x86/kernel/kdebugfs.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c12
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/mpparse.c1
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c2
-rw-r--r--arch/x86/kernel/paravirt.c3
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/platform-quirks.c4
-rw-r--r--arch/x86/kernel/pmem.c2
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/pvclock.c11
-rw-r--r--arch/x86/kernel/reboot.c23
-rw-r--r--arch/x86/kernel/setup.c14
-rw-r--r--arch/x86/kernel/setup_percpu.c3
-rw-r--r--arch/x86/kernel/signal_compat.c108
-rw-r--r--arch/x86/kernel/smpboot.c30
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/tboot.c25
-rw-r--r--arch/x86/kernel/test_rodata.c5
-rw-r--r--arch/x86/kernel/traps.c22
-rw-r--r--arch/x86/kernel/tsc.c102
-rw-r--r--arch/x86/kernel/tsc_msr.c68
-rw-r--r--arch/x86/kernel/vm86_32.c5
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c6
-rw-r--r--arch/x86/kernel/x86_init.c3
-rw-r--r--arch/x86/kvm/cpuid.c26
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/emulate.c1
-rw-r--r--arch/x86/kvm/iommu.c2
-rw-r--r--arch/x86/kvm/irq.c2
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c11
-rw-r--r--arch/x86/kvm/mtrr.c1
-rw-r--r--arch/x86/kvm/svm.c21
-rw-r--r--arch/x86/kvm/vmx.c146
-rw-r--r--arch/x86/kvm/x86.c62
-rw-r--r--arch/x86/kvm/x86.h7
-rw-r--r--arch/x86/lguest/boot.c17
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/cache-smp.c2
-rw-r--r--arch/x86/lib/copy_user_64.S8
-rw-r--r--arch/x86/lib/cpu.c3
-rw-r--r--arch/x86/lib/csum-partial_64.c2
-rw-r--r--arch/x86/lib/csum-wrappers_64.c3
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/getuser.S20
-rw-r--r--arch/x86/lib/hweight.S77
-rw-r--r--arch/x86/lib/insn.c18
-rw-r--r--arch/x86/lib/kaslr.c90
-rw-r--r--arch/x86/lib/memcpy_32.c2
-rw-r--r--arch/x86/lib/mmx_32.c2
-rw-r--r--arch/x86/lib/msr-reg-export.c2
-rw-r--r--arch/x86/lib/msr-smp.c2
-rw-r--r--arch/x86/lib/msr.c3
-rw-r--r--arch/x86/lib/putuser.S10
-rw-r--r--arch/x86/lib/string_32.c2
-rw-r--r--arch/x86/lib/usercopy.c2
-rw-r--r--arch/x86/lib/usercopy_32.c2
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/lib/x86-opcode-map.txt265
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/amdtopology.c1
-rw-r--r--arch/x86/mm/dump_pagetables.c22
-rw-r--r--arch/x86/mm/extable.c15
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/mm/init.c11
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c205
-rw-r--r--arch/x86/mm/iomap_32.c2
-rw-r--r--arch/x86/mm/ioremap.c1
-rw-r--r--arch/x86/mm/kasan_init_64.c4
-rw-r--r--arch/x86/mm/kaslr.c172
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c1
-rw-r--r--arch/x86/mm/kmemcheck/shadow.c2
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--arch/x86/mm/numa.c3
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/pageattr.c49
-rw-r--r--arch/x86/mm/pat.c6
-rw-r--r--arch/x86/mm/pat_rbtree.c1
-rw-r--r--arch/x86/mm/pf_in.c1
-rw-r--r--arch/x86/mm/pgtable.c10
-rw-r--r--arch/x86/mm/pgtable_32.c3
-rw-r--r--arch/x86/mm/physaddr.c2
-rw-r--r--arch/x86/mm/srat.c118
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/pci/acpi.c1
-rw-r--r--arch/x86/pci/intel_mid_pci.c51
-rw-r--r--arch/x86/pci/vmd.c2
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--arch/x86/platform/atom/punit_atom_debug.c20
-rw-r--r--arch/x86/platform/ce4100/ce4100.c2
-rw-r--r--arch/x86/platform/efi/efi.c17
-rw-r--r--arch/x86/platform/efi/efi_32.c3
-rw-r--r--arch/x86/platform/efi/efi_64.c30
-rw-r--r--arch/x86/platform/intel-mid/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile10
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c43
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c99
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_spidev.c50
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c12
-rw-r--r--arch/x86/platform/intel-mid/mrfld.c (renamed from arch/x86/platform/intel-mid/mrfl.c)2
-rw-r--r--arch/x86/platform/intel-mid/pwr.c416
-rw-r--r--arch/x86/platform/intel-mid/sfi.c31
-rw-r--r--arch/x86/platform/olpc/olpc.c2
-rw-r--r--arch/x86/platform/olpc/olpc_ofw.c5
-rw-r--r--arch/x86/platform/ts5500/ts5500.c6
-rw-r--r--arch/x86/platform/uv/bios_uv.c3
-rw-r--r--arch/x86/platform/uv/uv_irq.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c2
-rw-r--r--arch/x86/power/cpu.c30
-rw-r--r--arch/x86/power/hibernate_64.c97
-rw-r--r--arch/x86/power/hibernate_asm_64.S59
-rw-r--r--arch/x86/ras/mce_amd_inj.c58
-rw-r--r--arch/x86/realmode/init.c5
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk11
-rw-r--r--arch/x86/um/delay.c2
-rw-r--r--arch/x86/xen/apic.c1
-rw-r--r--arch/x86/xen/debugfs.c1
-rw-r--r--arch/x86/xen/efi.c111
-rw-r--r--arch/x86/xen/enlighten.c55
-rw-r--r--arch/x86/xen/grant-table.c57
-rw-r--r--arch/x86/xen/irq.c3
-rw-r--r--arch/x86/xen/mmu.c77
-rw-r--r--arch/x86/xen/p2m.c4
-rw-r--r--arch/x86/xen/platform-pci-unplug.c2
-rw-r--r--arch/x86/xen/pmu.c2
-rw-r--r--arch/x86/xen/setup.c2
-rw-r--r--arch/x86/xen/smp.c18
-rw-r--r--arch/x86/xen/time.c63
-rw-r--r--arch/x86/xen/xen-ops.h1
-rw-r--r--arch/xtensa/include/asm/atomic.h52
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/spinlock.h10
-rw-r--r--arch/xtensa/kernel/perf_event.c26
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--arch/xtensa/platforms/xt2000/setup.c1
1507 files changed, 33036 insertions, 15925 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index d794384a0404..15996290fed4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -226,8 +226,8 @@ config ARCH_INIT_TASK
config ARCH_TASK_STRUCT_ALLOCATOR
bool
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
bool
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
@@ -606,6 +606,9 @@ config HAVE_ARCH_HASH
file which provides platform-specific implementations of some
functions in <linux/hash.h> or fs/namei.c.
+config ISA_BUS_API
+ def_bool ISA
+
#
# ABI hall of shame
#
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 572b228c44c7..498933a7df97 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -46,10 +46,9 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
- smp_mb(); \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
" " #asm_op " %0,%3,%2\n" \
@@ -61,7 +60,23 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_mb(); \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op, asm_op) \
+static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ long temp, result; \
+ __asm__ __volatile__( \
+ "1: ldl_l %2,%1\n" \
+ " " #asm_op " %2,%3,%0\n" \
+ " stl_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
+ :"Ir" (i), "m" (v->counter) : "memory"); \
return result; \
}
@@ -82,10 +97,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long temp, result; \
- smp_mb(); \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
" " #asm_op " %0,%3,%2\n" \
@@ -97,34 +111,77 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_mb(); \
+ return result; \
+}
+
+#define ATOMIC64_FETCH_OP(op, asm_op) \
+static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+{ \
+ long temp, result; \
+ __asm__ __volatile__( \
+ "1: ldq_l %2,%1\n" \
+ " " #asm_op " %2,%3,%0\n" \
+ " stq_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
+ :"Ir" (i), "m" (v->counter) : "memory"); \
return result; \
}
#define ATOMIC_OPS(op) \
ATOMIC_OP(op, op##l) \
ATOMIC_OP_RETURN(op, op##l) \
+ ATOMIC_FETCH_OP(op, op##l) \
ATOMIC64_OP(op, op##q) \
- ATOMIC64_OP_RETURN(op, op##q)
+ ATOMIC64_OP_RETURN(op, op##q) \
+ ATOMIC64_FETCH_OP(op, op##q)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
-ATOMIC_OP(and, and)
-ATOMIC_OP(andnot, bic)
-ATOMIC_OP(or, bis)
-ATOMIC_OP(xor, xor)
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(andnot, bic)
-ATOMIC64_OP(or, bis)
-ATOMIC64_OP(xor, xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm) \
+ ATOMIC_OP(op, asm) \
+ ATOMIC_FETCH_OP(op, asm) \
+ ATOMIC64_OP(op, asm) \
+ ATOMIC64_FETCH_OP(op, asm)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, bis)
+ATOMIC_OPS(xor, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index aab14a019c20..c2ebb6f36c9d 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return ret;
}
@@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 0131a7058778..77873d0ad293 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -25,8 +25,8 @@ static inline void __down_read(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
- oldcount = sem->count;
- sem->count += RWSEM_ACTIVE_READ_BIAS;
+ oldcount = sem->count.counter;
+ sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
#else
long temp;
__asm__ __volatile__(
@@ -52,13 +52,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
{
long old, new, res;
- res = sem->count;
+ res = atomic_long_read(&sem->count);
do {
new = res + RWSEM_ACTIVE_READ_BIAS;
if (new <= 0)
break;
old = res;
- res = cmpxchg(&sem->count, old, new);
+ res = atomic_long_cmpxchg(&sem->count, old, new);
} while (res != old);
return res >= 0 ? 1 : 0;
}
@@ -67,8 +67,8 @@ static inline long ___down_write(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
- oldcount = sem->count;
- sem->count += RWSEM_ACTIVE_WRITE_BIAS;
+ oldcount = sem->count.counter;
+ sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
#else
long temp;
__asm__ __volatile__(
@@ -106,7 +106,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
if (ret == RWSEM_UNLOCKED_VALUE)
return 1;
@@ -117,8 +117,8 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
- oldcount = sem->count;
- sem->count -= RWSEM_ACTIVE_READ_BIAS;
+ oldcount = sem->count.counter;
+ sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
#else
long temp;
__asm__ __volatile__(
@@ -142,8 +142,8 @@ static inline void __up_write(struct rw_semaphore *sem)
{
long count;
#ifndef CONFIG_SMP
- sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
- count = sem->count;
+ sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
+ count = sem->count.counter;
#else
long temp;
__asm__ __volatile__(
@@ -171,8 +171,8 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
- oldcount = sem->count;
- sem->count -= RWSEM_WAITING_BIAS;
+ oldcount = sem->count.counter;
+ sem->count.counter -= RWSEM_WAITING_BIAS;
#else
long temp;
__asm__ __volatile__(
@@ -191,47 +191,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
rwsem_downgrade_wake(sem);
}
-static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
-{
-#ifndef CONFIG_SMP
- sem->count += val;
-#else
- long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%2,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (sem->count)
- :"Ir" (val), "m" (sem->count));
-#endif
-}
-
-static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
-{
-#ifndef CONFIG_SMP
- sem->count += val;
- return sem->count;
-#else
- long ret, temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " addq %0,%3,%0\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
- :"Ir" (val), "m" (sem->count));
-
- return ret;
-#endif
-}
-
#endif /* __KERNEL__ */
#endif /* _ALPHA_RWSEM_H */
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index fed9c6f44c19..a40b9fc0c6c3 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -3,6 +3,8 @@
#include <linux/kernel.h>
#include <asm/current.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -13,8 +15,11 @@
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
-#define arch_spin_unlock_wait(x) \
- do { cpu_relax(); } while ((x)->lock)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->lock, !VAL);
+}
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 4a905bd667e2..83e9eee57a55 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -147,7 +147,7 @@ retry:
/* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo
the fault. */
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 0dcbacfdea4b..0d3e59f56974 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
def_bool y
config ARCH_DISCONTIGMEM_ENABLE
- def_bool y
+ def_bool n
config ARCH_FLATMEM_ENABLE
def_bool y
@@ -186,9 +186,6 @@ if SMP
config ARC_HAS_COH_CACHES
def_bool n
-config ARC_HAS_REENTRANT_IRQ_LV2
- def_bool n
-
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
@@ -366,25 +363,10 @@ config NODES_SHIFT
if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS
- bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+ bool "Setup Timer IRQ as high Priority"
default n
- # Timer HAS to be high priority, for any other high priority config
- select ARC_IRQ3_LV2
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
- depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
-
-if ARC_COMPACT_IRQ_LEVELS
-
-config ARC_IRQ3_LV2
- bool
-
-config ARC_IRQ5_LV2
- bool
-
-config ARC_IRQ6_LV2
- bool
-
-endif #ARC_COMPACT_IRQ_LEVELS
+ depends on !SMP
config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
@@ -407,11 +389,6 @@ config ARC_HAS_LLSC
default y
depends on !ARC_CANT_LLSC
-config ARC_STAR_9000923308
- bool "Workaround for llock/scond livelock"
- default n
- depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
-
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
@@ -471,7 +448,7 @@ config LINUX_LINK_BASE
config HIGHMEM
bool "High Memory Support"
- select DISCONTIGMEM
+ select ARCH_DISCONTIGMEM_ENABLE
help
With ARC 2G:2G address split, only upper 2G is directly addressable by
kernel. Enable this to potentially allow access to rest of 2G and PAE
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 02fabef2891c..601ed173080b 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -66,8 +66,6 @@ endif
endif
-cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
-
# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
ifeq ($(atleast_gcc48),y)
cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
@@ -76,9 +74,7 @@ endif
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3
# Note: No need to add to cflags-y as that happens anyways
-#
-# Disable the false maybe-uninitialized warings gcc spits out at -O3
-ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
+ARCH_CFLAGS += -O3
endif
# small data is default for elf32 tool-chain. If not usable, disable it
@@ -127,7 +123,7 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
-#default target for make without any arguements.
+#default target for make without any arguments.
KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE)
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
index 3942634f805a..02410b211433 100644
--- a/arch/arc/boot/dts/abilis_tb100.dtsi
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -23,8 +23,6 @@
/ {
- clock-frequency = <500000000>; /* 500 MHZ */
-
soc100 {
bus-frequency = <166666666>;
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
index b0467229a5c4..f9e7686044eb 100644
--- a/arch/arc/boot/dts/abilis_tb101.dtsi
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -23,8 +23,6 @@
/ {
- clock-frequency = <500000000>; /* 500 MHZ */
-
soc100 {
bus-frequency = <166666666>;
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 3e02f152edcb..6ae2c476ad82 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -15,7 +15,6 @@
/ {
compatible = "snps,arc";
- clock-frequency = <750000000>; /* 750 MHZ */
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 378e455a94c4..14df46f141bf 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
- clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 64c94b2860ab..3d6cfa32bf51 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
- clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts
index b89f6c3eb352..1e0d225791c1 100644
--- a/arch/arc/boot/dts/eznps.dts
+++ b/arch/arc/boot/dts/eznps.dts
@@ -18,7 +18,6 @@
/ {
compatible = "ezchip,arc-nps";
- clock-frequency = <83333333>; /* 83.333333 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index 5d5e373e0ebc..63970513e4ae 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -11,7 +11,6 @@
/ {
compatible = "snps,nsim";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index b5b060adce8a..763d66c883da 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci";
- clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
index 325e73090a18..4eb97c584b18 100644
--- a/arch/arc/boot/dts/nsimosci_hs.dts
+++ b/arch/arc/boot/dts/nsimosci_hs.dts
@@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci_hs";
- clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
index ee03d7126581..853f897eb2a3 100644
--- a/arch/arc/boot/dts/nsimosci_hs_idu.dts
+++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts
@@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci_hs";
- clock-frequency = <5000000>; /* 5 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 3a10cc633e2b..65808fe0a290 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -13,7 +13,6 @@
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
index 71fd308a9298..2dfe8037dfbb 100644
--- a/arch/arc/boot/dts/skeleton_hs.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -8,7 +8,6 @@
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
index d1cb25a66989..4c11079f3565 100644
--- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -8,7 +8,6 @@
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index ad4ee43bd2ac..0fd6ba985b16 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
- clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index a3cb6263c581..82214cd7ba0c 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -15,7 +15,6 @@
/ {
compatible = "snps,arc";
- clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5f3dcbbc0cc9..4e3c1b6b0806 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -25,50 +25,17 @@
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay = 1, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " bz 4f \n" \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- "4: ; --- success --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
-
-#else /* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM \
- " bnz 1b \n" \
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
__asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
/* \
* Explicit full memory barrier needed before/after as \
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
@@ -104,6 +67,33 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return val; \
}
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned int val, orig; \
+ \
+ /* \
+ * Explicit full memory barrier needed before/after as \
+ * LLOCK/SCOND thmeselves don't provide any such semantics \
+ */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ "1: llock %[orig], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[orig], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " \n" \
+ : [val] "=&r" (val), \
+ [orig] "=&r" (orig) \
+ : [ctr] "r" (&v->counter), \
+ [i] "ir" (i) \
+ : "cc"); \
+ \
+ smp_mb(); \
+ \
+ return orig; \
+}
+
#else /* !CONFIG_ARC_HAS_LLSC */
#ifndef CONFIG_SMP
@@ -166,25 +156,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned long orig; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ orig = v->counter; \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+ \
+ return orig; \
+}
+
#endif /* !CONFIG_ARC_HAS_LLSC */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op)
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#define atomic_andnot atomic_andnot
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(andnot, &= ~, bic)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
#else /* CONFIG_ARC_PLAT_EZNPS */
@@ -245,22 +254,51 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned int temp = i; \
+ \
+ /* Explicit full memory barrier needed before/after */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ " mov %0, r2" \
+ : "+r"(temp) \
+ : "r"(&v->counter), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+ \
+ smp_mb(); \
+ \
+ return temp; \
+}
+
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op)
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
#define atomic_sub(i, v) atomic_add(-(i), (v))
#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
-ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
-ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
-ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
+ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
+ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
#endif /* CONFIG_ARC_PLAT_EZNPS */
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index e0e1faf03c50..14c310f2e0b1 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -76,8 +76,8 @@
* We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR.
- * Above brlo alone would treat it as a valid L1-L2 sceanrio
- * instead of shouting alound
+ * Above brlo alone would treat it as a valid L1-L2 scenario
+ * instead of shouting around
* The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 1fd467ef658f..b0b87f2447f5 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
local_flush_tlb_all();
/*
- * Above checke for rollover of 8 bit ASID in 32 bit container.
+ * Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 86ed671286df..3749234b7419 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
__get_order_pte());
return pte;
@@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_pg;
struct page *page;
- pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
if (!pte_pg)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 034bbdc0ff61..0f92d97432a2 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -47,7 +47,7 @@
* Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos
@@ -110,7 +110,7 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index f9048994b22f..16b630fbeb6a 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -78,7 +78,7 @@ struct task_struct;
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 991380438d6b..89fdd1b0a76e 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 800e7c430ca5..233d5ffe6ec7 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -15,15 +15,13 @@
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
- do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
-#ifdef CONFIG_ARC_HAS_LLSC
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->slock, !VAL);
+}
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
+#ifdef CONFIG_ARC_HAS_LLSC
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
@@ -238,293 +236,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
smp_mb();
}
-#else /* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- " \n" \
- "4: ; --- done --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- smp_mb();
-
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
- smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * zero means writer holds the lock exclusively, deny Reader.
- * Otherwise grant lock to first/subseq reader
- *
- * if (rw->counter > 0) {
- * rw->counter--;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
- " sub %[val], %[val], 1 \n" /* reader lock */
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
- " sub %[val], %[val], 1 \n" /* counter-- */
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
- * deny writer. Otherwise if unlocked grant to writer
- * Hence the claim that Linux rwlocks are unfair to writers.
- * (can be starved for an indefinite time by readers).
- *
- * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
- * rw->counter = 0;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n"
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter++;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " add %[val], %[val], 1 \n"
- " scond %[val], [%[rwlock]] \n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter))
- : "memory", "cc");
-
- smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " scond %[UNLOCKED], [%[rwlock]]\n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif /* CONFIG_ARC_STAR_9000923308 */
-
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 3af67455659a..2d79e527fa50 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index d1da6032b715..a78d5670884f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -32,7 +32,7 @@
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code.
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 095599a73195..71f3918b0fc3 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -74,7 +74,7 @@
__tmp ^ __in; \
})
-#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \
({ \
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 0cb0abaa0479..98812c1248df 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1)
VECTOR instr_service ; 0x10, Instrn Error (0x2)
; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
-VECTOR handle_interrupt_level2
-#else
-VECTOR handle_interrupt_level1
-#endif
-
-VECTOR handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR handle_interrupt_level2
-#else
-VECTOR handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
-.rept 25
+.rept 28
VECTOR handle_interrupt_level1 ; Other devices
.endr
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index c5cceca36118..ce9deb953ca9 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -28,10 +28,8 @@ void arc_init_IRQ(void)
{
int level_mask = 0;
- /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+ /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+ level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
/*
* Write to register, even if no LV2 IRQs configured to reset it
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 6fd48021324b..08f03d9b5b3e 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
int64_t delta = new_raw_count - prev_raw_count;
/*
- * We don't afaraid of hwc->prev_count changing beneath our feet
+ * We aren't afraid of hwc->prev_count changing beneath our feet
* because there's no way for us to re-enter this function anytime.
*/
local64_set(&hwc->prev_count, new_raw_count);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index f63b8bfefb0c..a946400a86d0 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/cache.h>
#include <asm/sections.h>
#include <asm/arcregs.h>
@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
/*
* If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline,
- * Appent to embedded DT cmdline.
+ * append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line
*/
if (uboot_tag == 1) {
@@ -435,12 +435,6 @@ void __init setup_arch(char **cmdline_p)
static int __init customize_machine(void)
{
- /*
- * Traverses flattened DeviceTree - registering platform devices
- * (if any) complete with their resources
- */
- of_platform_default_populate(NULL, NULL, NULL);
-
if (machine_desc->init_machine)
machine_desc->init_machine();
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 004b7f0bc76c..6cb3736b6b83 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -34,7 +34,7 @@
* -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission)
- * This was cauing TLB entries to be overwritten on unrelated indexes
+ * This was causing TLB entries to be overwritten on unrelated indexes
*
* Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes,
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index e0efff15a5ae..b9192a653b7e 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
* prelogue is setup (callee regs saved and then fp set and not other
* way around
*/
- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
return 0;
#endif
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 4549ab255dd1..f927b8dc6edd 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -116,19 +116,19 @@ static struct clocksource arc_counter_gfrc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init arc_cs_setup_gfrc(struct device_node *node)
+static int __init arc_cs_setup_gfrc(struct device_node *node)
{
int exists = cpuinfo_arc700[0].extn.gfrc;
int ret;
if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
- return;
+ return -ENXIO;
ret = arc_get_timer_clk(node);
if (ret)
- return;
+ return ret;
- clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
+ return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
}
CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
@@ -172,25 +172,25 @@ static struct clocksource arc_counter_rtc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init arc_cs_setup_rtc(struct device_node *node)
+static int __init arc_cs_setup_rtc(struct device_node *node)
{
int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
int ret;
if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
- return;
+ return -ENXIO;
/* Local to CPU hence not usable in SMP */
if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
- return;
+ return -EINVAL;
ret = arc_get_timer_clk(node);
if (ret)
- return;
+ return ret;
write_aux_reg(AUX_RTC_CTRL, 1);
- clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
+ return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
}
CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
@@ -213,23 +213,23 @@ static struct clocksource arc_counter_timer1 = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init arc_cs_setup_timer1(struct device_node *node)
+static int __init arc_cs_setup_timer1(struct device_node *node)
{
int ret;
/* Local to CPU hence not usable in SMP */
if (IS_ENABLED(CONFIG_SMP))
- return;
+ return -EINVAL;
ret = arc_get_timer_clk(node);
if (ret)
- return;
+ return ret;
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
- clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
+ return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
}
/********** Clock Event Device *********/
@@ -296,73 +296,76 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int arc_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+
+static int arc_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
evt->cpumask = cpumask_of(smp_processor_id());
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- clockevents_config_and_register(evt, arc_timer_freq,
- 0, ULONG_MAX);
- enable_percpu_irq(arc_timer_irq, 0);
- break;
- case CPU_DYING:
- disable_percpu_irq(arc_timer_irq);
- break;
- }
-
- return NOTIFY_OK;
+ clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
+ enable_percpu_irq(arc_timer_irq, 0);
+ return 0;
}
-static struct notifier_block arc_timer_cpu_nb = {
- .notifier_call = arc_timer_cpu_notify,
-};
+static int arc_timer_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(arc_timer_irq);
+ return 0;
+}
/*
* clockevent setup for boot CPU
*/
-static void __init arc_clockevent_setup(struct device_node *node)
+static int __init arc_clockevent_setup(struct device_node *node)
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int ret;
- register_cpu_notifier(&arc_timer_cpu_nb);
-
arc_timer_irq = irq_of_parse_and_map(node, 0);
- if (arc_timer_irq <= 0)
- panic("clockevent: missing irq");
+ if (arc_timer_irq <= 0) {
+ pr_err("clockevent: missing irq");
+ return -EINVAL;
+ }
ret = arc_get_timer_clk(node);
- if (ret)
- panic("clockevent: missing clk");
-
- evt->irq = arc_timer_irq;
- evt->cpumask = cpumask_of(smp_processor_id());
- clockevents_config_and_register(evt, arc_timer_freq,
- 0, ARC_TIMER_MAX);
+ if (ret) {
+ pr_err("clockevent: missing clk");
+ return ret;
+ }
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
"Timer0 (per-cpu-tick)", evt);
- if (ret)
- panic("clockevent: unable to request irq\n");
+ if (ret) {
+ pr_err("clockevent: unable to request irq\n");
+ return ret;
+ }
- enable_percpu_irq(arc_timer_irq, 0);
+ ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
+ "AP_ARC_TIMER_STARTING",
+ arc_timer_starting_cpu,
+ arc_timer_dying_cpu);
+ if (ret) {
+ pr_err("Failed to setup hotplug state");
+ return ret;
+ }
+ return 0;
}
-static void __init arc_of_timer_init(struct device_node *np)
+static int __init arc_of_timer_init(struct device_node *np)
{
static int init_count = 0;
+ int ret;
if (!init_count) {
init_count = 1;
- arc_clockevent_setup(np);
+ ret = arc_clockevent_setup(np);
} else {
- arc_cs_setup_timer1(np);
+ ret = arc_cs_setup_timer1(np);
}
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index a6f91e88ce36..934150e7ac48 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
return 0;
}
-/* called on user read(): display the couters */
+/* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9e5eddbb856f..5a294b2c3cb3 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -215,7 +215,7 @@ slc_chk:
* ------------------
* This ver of MMU supports variable page sizes (1k-16k): although Linux will
* only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * However from hardware perspective, smaller page sizes aggravate aliasing
* meaning more vaddr bits needed to disambiguate the cache-line-op ;
* the existing scheme of piggybacking won't work for certain configurations.
* Two new registers IC_PTAG and DC_PTAG inttoduced.
@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
/*
* This is technically for MMU v4, using the MMU v3 programming model
- * Special work for HS38 aliasing I-cache configuratino with PAE40
+ * Special work for HS38 aliasing I-cache configuration with PAE40
* - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits)
* Note that PTAG_HI is hoisted outside the line loop
@@ -936,7 +936,7 @@ void arc_cache_init(void)
ic->ver, CONFIG_ARC_MMU_VER);
/*
- * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+ * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
if (is_isa_arcv2() && ic->alias)
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 8c8e36fa5659..ab74b5d9186c 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -10,7 +10,7 @@
* DMA Coherent API Notes
*
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessintg it using a kernel virtual address, with
+ * implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry.
*
* The default DMA address == Phy address which is 0x8000_0000 based.
@@ -92,7 +92,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- struct page *page = virt_to_page(dma_handle);
+ phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
+ struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index af63f4a13e60..e94e5aa33985 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -137,7 +137,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
if (unlikely(fatal_signal_pending(current))) {
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 49b8abd1115c..f52b7db67fd3 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(ioremap);
/*
* ioremap with access flags
* Cache semantics wise it is same as ioremap - "forced" uncached.
- * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
* ARC hardware uncached region, this one still goes thru the MMU as caller
* might need finer access control (R/W/X)
*/
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1bf9a108e99b..9ec5f5588dc5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -355,10 +355,10 @@ config ARM_SINGLE_ARMV7M
config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
select AUTO_ZRELADDR
- select CLKSRC_MMIO
select COMMON_CLK
select CPU_ARM720T
select GENERIC_CLOCKEVENTS
+ select CLPS711X_TIMER
select GPIOLIB
select MFD_SYSCON
select SOC_BUS
@@ -1184,6 +1184,60 @@ config ARM_ERRATA_773022
loop buffer may deliver incorrect instructions. This
workaround disables the loop buffer to avoid the erratum.
+config ARM_ERRATA_818325_852422
+ bool "ARM errata: A12: some seqs of opposed cond code instrs => deadlock or corruption"
+ depends on CPU_V7
+ help
+ This option enables the workaround for:
+ - Cortex-A12 818325: Execution of an UNPREDICTABLE STR or STM
+ instruction might deadlock. Fixed in r0p1.
+ - Cortex-A12 852422: Execution of a sequence of instructions might
+ lead to either a data corruption or a CPU deadlock. Not fixed in
+ any Cortex-A12 cores yet.
+ This workaround for all both errata involves setting bit[12] of the
+ Feature Register. This bit disables an optimisation applied to a
+ sequence of 2 instructions that use opposing condition codes.
+
+config ARM_ERRATA_821420
+ bool "ARM errata: A12: sequence of VMOV to core registers might lead to a dead lock"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 821420 Cortex-A12
+ (all revs) erratum. In very rare timing conditions, a sequence
+ of VMOV to Core registers instructions, for which the second
+ one is in the shadow of a branch or abort, can lead to a
+ deadlock when the VMOV instructions are issued out-of-order.
+
+config ARM_ERRATA_825619
+ bool "ARM errata: A12: DMB NSHST/ISHST mixed ... might cause deadlock"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 825619 Cortex-A12
+ (all revs) erratum. Within rare timing constraints, executing a
+ DMB NSHST or DMB ISHST instruction followed by a mix of Cacheable
+ and Device/Strongly-Ordered loads and stores might cause deadlock
+
+config ARM_ERRATA_852421
+ bool "ARM errata: A17: DMB ST might fail to create order between stores"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 852421 Cortex-A17
+ (r1p0, r1p1, r1p2) erratum. Under very rare timing conditions,
+ execution of a DMB ST instruction might fail to properly order
+ stores from GroupA and stores from GroupB.
+
+config ARM_ERRATA_852423
+ bool "ARM errata: A17: some seqs of opposed cond code instrs => deadlock or corruption"
+ depends on CPU_V7
+ help
+ This option enables the workaround for:
+ - Cortex-A17 852423: Execution of a sequence of instructions might
+ lead to either a data corruption or a CPU deadlock. Not fixed in
+ any Cortex-A17 cores yet.
+ This is identical to Cortex-A12 erratum 852422. It is a separate
+ config option from the A12 erratum due to the way errata are checked
+ for and handled.
+
endmenu
source "arch/arm/common/Kconfig"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 274e8a6582f1..229afaf2058b 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -327,6 +327,7 @@ zImage: Image
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+ @$(kecho) ' Kernel: $(boot)/$@ is ready'
$(INSTALL_TARGETS):
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 5be33a2d59a9..bdc1d5af03d2 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -31,7 +31,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
- @$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
+ @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)'
$(obj)/Image $(obj)/zImage: FORCE
@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@@ -46,14 +46,12 @@ $(obj)/xipImage: FORCE
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
- @$(kecho) ' Kernel: $@ is ready'
$(obj)/compressed/vmlinux: $(obj)/Image FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
- @$(kecho) ' Kernel: $@ is ready'
endif
@@ -78,14 +76,12 @@ fi
$(obj)/uImage: $(obj)/zImage FORCE
@$(check_for_multiple_loadaddr)
$(call if_changed,uimage)
- @$(kecho) ' Image $@ is ready'
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
- @$(kecho) ' Kernel: $@ is ready'
PHONY += initrd install zinstall uinstall
initrd:
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 06b6c2d695bf..414b42710a36 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -741,6 +741,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
sun7i-a20-olimex-som-evb.dtb \
sun7i-a20-olinuxino-lime.dtb \
sun7i-a20-olinuxino-lime2.dtb \
+ sun7i-a20-olinuxino-lime2-emmc.dtb \
sun7i-a20-olinuxino-micro.dtb \
sun7i-a20-orangepi.dtb \
sun7i-a20-orangepi-mini.dtb \
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 52be48bbd2dd..7fa295155543 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -766,7 +766,6 @@
ale_entries = <1024>;
bd_ram_size = <0x2000>;
no_bd_ram = <0>;
- rx_descs = <64>;
mac_control = <0x20>;
slaves = <2>;
active_slave = <0>;
@@ -789,7 +788,7 @@
status = "disabled";
davinci_mdio: mdio@4a101000 {
- compatible = "ti,davinci_mdio";
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "davinci_mdio";
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 12fcde4d4d2e..cd81ecf12731 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -626,7 +626,6 @@
ale_entries = <1024>;
bd_ram_size = <0x2000>;
no_bd_ram = <0>;
- rx_descs = <64>;
mac_control = <0x20>;
slaves = <2>;
active_slave = <0>;
@@ -636,7 +635,7 @@
syscon = <&scm_conf>;
davinci_mdio: mdio@4a101000 {
- compatible = "ti,am4372-mdio","ti,davinci_mdio";
+ compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio";
reg = <0x4a101000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index d82dd6e3f9b1..5687d6b4da60 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -418,7 +418,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
tps@24 {
compatible = "ti,tps65218";
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 81d6c3033b51..c4d04c5293b9 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -86,7 +86,7 @@
led@3 {
label = "beagle-x15:usr3";
gpios = <&gpio7 15 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "ide-disk";
+ linux,default-trigger = "disk-activity";
default-state = "off";
};
};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index b01a5948cdd0..0e63b9dff6e7 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -60,10 +60,26 @@
tps659038_pmic {
compatible = "ti,tps659038-pmic";
+
+ smps12-in-supply = <&vmain>;
+ smps3-in-supply = <&vmain>;
+ smps45-in-supply = <&vmain>;
+ smps6-in-supply = <&vmain>;
+ smps7-in-supply = <&vmain>;
+ smps8-in-supply = <&vmain>;
+ smps9-in-supply = <&vmain>;
+ ldo1-in-supply = <&vmain>;
+ ldo2-in-supply = <&vmain>;
+ ldo3-in-supply = <&vmain>;
+ ldo4-in-supply = <&vmain>;
+ ldo9-in-supply = <&vmain>;
+ ldoln-in-supply = <&vmain>;
+ ldousb-in-supply = <&vmain>;
+ ldortc-in-supply = <&vmain>;
+
regulators {
smps12_reg: smps12 {
/* VDD_MPU */
- vin-supply = <&vmain>;
regulator-name = "smps12";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
@@ -73,7 +89,6 @@
smps3_reg: smps3 {
/* VDD_DDR EMIF1 EMIF2 */
- vin-supply = <&vmain>;
regulator-name = "smps3";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
@@ -84,7 +99,6 @@
smps45_reg: smps45 {
/* VDD_DSPEVE on AM572 */
/* VDD_IVA + VDD_DSP on AM571 */
- vin-supply = <&vmain>;
regulator-name = "smps45";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
@@ -94,7 +108,6 @@
smps6_reg: smps6 {
/* VDD_GPU */
- vin-supply = <&vmain>;
regulator-name = "smps6";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
@@ -104,7 +117,6 @@
smps7_reg: smps7 {
/* VDD_CORE */
- vin-supply = <&vmain>;
regulator-name = "smps7";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1150000>;
@@ -115,13 +127,11 @@
smps8_reg: smps8 {
/* 5728 - VDD_IVAHD */
/* 5718 - N.C. test point */
- vin-supply = <&vmain>;
regulator-name = "smps8";
};
smps9_reg: smps9 {
/* VDD_3_3D */
- vin-supply = <&vmain>;
regulator-name = "smps9";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -132,7 +142,6 @@
ldo1_reg: ldo1 {
/* VDDSHV8 - VSDMMC */
/* NOTE: on rev 1.3a, data supply */
- vin-supply = <&vmain>;
regulator-name = "ldo1";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@@ -142,7 +151,6 @@
ldo2_reg: ldo2 {
/* VDDSH18V */
- vin-supply = <&vmain>;
regulator-name = "ldo2";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -152,7 +160,6 @@
ldo3_reg: ldo3 {
/* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
- vin-supply = <&vmain>;
regulator-name = "ldo3";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -162,7 +169,6 @@
ldo4_reg: ldo4 {
/* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
- vin-supply = <&vmain>;
regulator-name = "ldo4";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -174,7 +180,6 @@
ldo9_reg: ldo9 {
/* VDD_RTC */
- vin-supply = <&vmain>;
regulator-name = "ldo9";
regulator-min-microvolt = <840000>;
regulator-max-microvolt = <1160000>;
@@ -184,7 +189,6 @@
ldoln_reg: ldoln {
/* VDDA_1V8_PLL */
- vin-supply = <&vmain>;
regulator-name = "ldoln";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -194,7 +198,6 @@
ldousb_reg: ldousb {
/* VDDA_3V_USB: VDDA_USBHS33 */
- vin-supply = <&vmain>;
regulator-name = "ldousb";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -204,7 +207,6 @@
ldortc_reg: ldortc {
/* VDDA_RTC */
- vin-supply = <&vmain>;
regulator-name = "ldortc";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 8450944b28e6..22f7a13e20b4 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -58,8 +58,8 @@
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
+ MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
internal-regs {
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index def9e783b5c6..6a40ed7d0502 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -206,6 +206,11 @@
brcm,nand-has-wp;
};
+ rng: rng@33000 {
+ compatible = "brcm,bcm-nsp-rng";
+ reg = <0x33000 0x14>;
+ };
+
ccbtimer0: timer@34000 {
compatible = "arm,sp804";
reg = <0x34000 0x1000>;
@@ -266,6 +271,48 @@
<0x30028 0x04>,
<0x3f408 0x04>;
};
+
+ sata_phy: sata_phy@40100 {
+ compatible = "brcm,iproc-nsp-sata-phy";
+ reg = <0x40100 0x340>;
+ reg-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ sata: ahci@41000 {
+ compatible = "brcm,bcm-nsp-ahci";
+ reg-names = "ahci", "top-ctrl";
+ reg = <0x41000 0x1000>, <0x40020 0x1c>;
+ interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ phy-names = "sata-phy";
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ phy-names = "sata-phy";
+ };
+ };
};
pcie0: pcie@18012000 {
diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts
index e298450b49b2..2d8422632b2b 100644
--- a/arch/arm/boot/dts/bcm958625k.dts
+++ b/arch/arm/boot/dts/bcm958625k.dts
@@ -68,6 +68,18 @@
status = "okay";
};
+&sata_phy0 {
+ status = "okay";
+};
+
+&sata_phy1 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
&nand {
nandcs@0 {
compatible = "brcm,nandcs";
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
index cbc17b0794b1..4128fa91823c 100644
--- a/arch/arm/boot/dts/dm8148-evm.dts
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -93,6 +93,10 @@
};
};
+&mmc1 {
+ status = "disabled";
+};
+
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&sd1_pins>;
@@ -101,6 +105,10 @@
cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
};
+&mmc3 {
+ status = "disabled";
+};
+
&pincntl {
sd1_pins: pinmux_sd1_pins {
pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
index 5d4313fd5a46..3f184863e0c5 100644
--- a/arch/arm/boot/dts/dm8148-t410.dts
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -45,6 +45,14 @@
phy-mode = "rgmii";
};
+&mmc1 {
+ status = "disabled";
+};
+
+&mmc2 {
+ status = "disabled";
+};
+
&mmc3 {
pinctrl-names = "default";
pinctrl-0 = <&sd2_pins>;
@@ -53,6 +61,7 @@
dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */
&edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */
dma-names = "tx", "rx";
+ non-removable;
};
&pincntl {
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index d4537dc61497..f23cae0c2179 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -509,7 +509,6 @@
ale_entries = <1024>;
bd_ram_size = <0x2000>;
no_bd_ram = <0>;
- rx_descs = <64>;
mac_control = <0x20>;
slaves = <2>;
active_slave = <0>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index e0074014385a..de559f6e4fee 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1451,6 +1451,8 @@
ti,hwmods = "gpmc";
reg = <0x50000000 0x37c>; /* device IO registers */
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&edma_xbar 4 0>;
+ dma-names = "rxtx";
gpmc,num-cs = <8>;
gpmc,num-waitpins = <2>;
#address-cells = <2>;
@@ -1626,7 +1628,6 @@
ale_entries = <1024>;
bd_ram_size = <0x2000>;
no_bd_ram = <0>;
- rx_descs = <64>;
mac_control = <0x20>;
slaves = <2>;
active_slave = <0>;
@@ -1661,7 +1662,7 @@
status = "disabled";
davinci_mdio: mdio@48485000 {
- compatible = "ti,davinci_mdio";
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "davinci_mdio";
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 4220eeffc65a..5e06020f450b 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -107,8 +107,8 @@
reg = <0x58000000 0x80>,
<0x58004054 0x4>,
<0x58004300 0x20>,
- <0x58005054 0x4>,
- <0x58005300 0x20>;
+ <0x58009054 0x4>,
+ <0x58009300 0x20>;
reg-names = "dss", "pll1_clkctrl", "pll1",
"pll2_clkctrl", "pll2";
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index ddfe1f558c10..fa14f77df563 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -242,7 +242,7 @@
hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
ports {
- port0 {
+ port {
dp_out: endpoint {
remote-endpoint = <&bridge_in>;
};
@@ -485,13 +485,20 @@
edid-emulation = <5>;
ports {
- port0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
- port1 {
+ port@1 {
+ reg = <1>;
+
bridge_in: endpoint {
remote-endpoint = <&dp_out>;
};
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index f9d2e4f1a0e0..1de972d46a87 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -163,7 +163,7 @@
hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
ports {
- port0 {
+ port {
dp_out: endpoint {
remote-endpoint = <&bridge_in>;
};
@@ -631,13 +631,20 @@
use-external-pwm;
ports {
- port0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
- port1 {
+ port@1 {
+ reg = <1>;
+
bridge_in: endpoint {
remote-endpoint = <&dp_out>;
};
diff --git a/arch/arm/boot/dts/imx28-m28.dtsi b/arch/arm/boot/dts/imx28-m28.dtsi
index 6cebaa6b8833..214bb1506b53 100644
--- a/arch/arm/boot/dts/imx28-m28.dtsi
+++ b/arch/arm/boot/dts/imx28-m28.dtsi
@@ -37,7 +37,7 @@
status = "okay";
rtc: rtc@68 {
- compatible = "stm,m41t62";
+ compatible = "st,m41t62";
reg = <0x68>;
};
};
diff --git a/arch/arm/boot/dts/imx51-ts4800.dts b/arch/arm/boot/dts/imx51-ts4800.dts
index 0ff76a1bc0f1..30f44b5565b9 100644
--- a/arch/arm/boot/dts/imx51-ts4800.dts
+++ b/arch/arm/boot/dts/imx51-ts4800.dts
@@ -102,7 +102,7 @@
status = "okay";
rtc: m41t00@68 {
- compatible = "stm,m41t00";
+ compatible = "st,m41t00";
reg = <0x68>;
};
};
diff --git a/arch/arm/boot/dts/imx53-m53.dtsi b/arch/arm/boot/dts/imx53-m53.dtsi
index 87a7fc709c2d..d259f57bfd98 100644
--- a/arch/arm/boot/dts/imx53-m53.dtsi
+++ b/arch/arm/boot/dts/imx53-m53.dtsi
@@ -84,7 +84,7 @@
};
rtc: rtc@68 {
- compatible = "stm,m41t62";
+ compatible = "st,m41t62";
reg = <0x68>;
};
};
diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
index 364578d707a5..905907325f3b 100644
--- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
+++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
@@ -282,7 +282,7 @@
};
rtc: m41t62@68 {
- compatible = "stm,m41t62";
+ compatible = "st,m41t62";
reg = <0x68>;
};
};
diff --git a/arch/arm/boot/dts/kirkwood-ns2lite.dts b/arch/arm/boot/dts/kirkwood-ns2lite.dts
index 1f2ca60d8b3d..2c661add0cc0 100644
--- a/arch/arm/boot/dts/kirkwood-ns2lite.dts
+++ b/arch/arm/boot/dts/kirkwood-ns2lite.dts
@@ -26,7 +26,7 @@
blue-sata {
label = "ns2:blue:sata";
gpios = <&gpio0 30 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "ide-disk";
+ linux,default-trigger = "disk-activity";
};
};
};
diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts
index f5c8c0dd41dc..1e9a72100a45 100644
--- a/arch/arm/boot/dts/kirkwood-topkick.dts
+++ b/arch/arm/boot/dts/kirkwood-topkick.dts
@@ -129,7 +129,7 @@
disk {
label = "topkick:yellow:disk";
gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "ide-disk";
+ linux,default-trigger = "disk-activity";
};
system2 {
label = "topkick:red:system";
diff --git a/arch/arm/boot/dts/meson8-minix-neo-x8.dts b/arch/arm/boot/dts/meson8-minix-neo-x8.dts
index 4f536bb1f002..8bceb8d343f6 100644
--- a/arch/arm/boot/dts/meson8-minix-neo-x8.dts
+++ b/arch/arm/boot/dts/meson8-minix-neo-x8.dts
@@ -80,6 +80,7 @@
pmic@32 {
compatible = "ricoh,rn5t618";
reg = <0x32>;
+ system-power-controller;
regulators {
};
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts
index 76056ba92ced..ed449827c3d3 100644
--- a/arch/arm/boot/dts/omap3-evm-37xx.dts
+++ b/arch/arm/boot/dts/omap3-evm-37xx.dts
@@ -85,7 +85,7 @@
OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
- OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+ OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
>;
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 41f5d386f21f..f4f2ce46d681 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -188,6 +188,7 @@
vmmc-supply = <&vmmc1>;
vmmc_aux-supply = <&vsim>;
bus-width = <4>;
+ cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
};
&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index d6f839cab649..b6971060648a 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -194,6 +194,12 @@
OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
>;
};
+
+ mmc1_wp_pins: pinmux_mmc1_cd_pins {
+ pinctrl-single,pins = <
+ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4) /* etk_d15.gpio_29 */
+ >;
+ };
};
&i2c3 {
@@ -250,3 +256,8 @@
};
};
};
+
+&mmc1 {
+ pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
+ wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
+};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index d9e2d9c6e999..2b74a81d1de2 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -288,7 +288,7 @@
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
@@ -300,7 +300,7 @@
modem_pins: pinmux_modem {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */
- OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */
+ OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4) /* gpio 72 => ape_rst_rq */
OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */
OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */
OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index a00ca761675d..927b17fc4ed8 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -97,7 +97,7 @@
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */
@@ -110,7 +110,7 @@
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */
OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */
@@ -120,7 +120,7 @@
modem_pins1: pinmux_modem_core1_pins {
pinctrl-single,pins = <
- OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
+ OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */
OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */
>;
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
index f19170bdcc1f..c29b41dc7b95 100644
--- a/arch/arm/boot/dts/omap3-zoom3.dts
+++ b/arch/arm/boot/dts/omap3-zoom3.dts
@@ -98,7 +98,7 @@
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */
OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
- OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
+ OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
>;
};
@@ -107,7 +107,7 @@
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */
OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */
- OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+ OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
>;
};
@@ -125,7 +125,7 @@
pinctrl-single,pins = <
OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */
OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */
- OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+ OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */
OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */
>;
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index dc759a3028b7..5d5b620b7d9b 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -14,6 +14,29 @@
display0 = &hdmi0;
};
+ vmain: fixedregulator-vmain {
+ compatible = "regulator-fixed";
+ regulator-name = "vmain";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vsys_cobra: fixedregulator-vsys_cobra {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_cobra";
+ vin-supply = <&vmain>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vdds_1v8_main: fixedregulator-vdds_1v8_main {
+ compatible = "regulator-fixed";
+ regulator-name = "vdds_1v8_main";
+ vin-supply = <&smps7_reg>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
vmmcsd_fixed: fixedregulator-mmcsd {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
@@ -309,7 +332,7 @@
wlcore_irq_pin: pinmux_wlcore_irq_pin {
pinctrl-single,pins = <
- OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
+ OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
>;
};
};
@@ -409,6 +432,26 @@
ti,ldo6-vibrator;
+ smps123-in-supply = <&vsys_cobra>;
+ smps45-in-supply = <&vsys_cobra>;
+ smps6-in-supply = <&vsys_cobra>;
+ smps7-in-supply = <&vsys_cobra>;
+ smps8-in-supply = <&vsys_cobra>;
+ smps9-in-supply = <&vsys_cobra>;
+ smps10_out2-in-supply = <&vsys_cobra>;
+ smps10_out1-in-supply = <&vsys_cobra>;
+ ldo1-in-supply = <&vsys_cobra>;
+ ldo2-in-supply = <&vsys_cobra>;
+ ldo3-in-supply = <&vdds_1v8_main>;
+ ldo4-in-supply = <&vdds_1v8_main>;
+ ldo5-in-supply = <&vsys_cobra>;
+ ldo6-in-supply = <&vdds_1v8_main>;
+ ldo7-in-supply = <&vsys_cobra>;
+ ldo8-in-supply = <&vsys_cobra>;
+ ldo9-in-supply = <&vmmcsd_fixed>;
+ ldoln-in-supply = <&vsys_cobra>;
+ ldousb-in-supply = <&vsys_cobra>;
+
regulators {
smps123_reg: smps123 {
/* VDD_OPP_MPU */
@@ -600,7 +643,8 @@
pinctrl-0 = <&twl6040_pins>;
interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
- ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */
+
+ /* audpwron gpio defined in the board specific dts */
vio-supply = <&smps7_reg>;
v2v1-supply = <&smps9_reg>;
diff --git a/arch/arm/boot/dts/omap5-igep0050.dts b/arch/arm/boot/dts/omap5-igep0050.dts
index 46ecb1dd3b5c..f75ce02fb398 100644
--- a/arch/arm/boot/dts/omap5-igep0050.dts
+++ b/arch/arm/boot/dts/omap5-igep0050.dts
@@ -35,6 +35,22 @@
};
};
+/* LDO4 is VPP1 - ball AD9 */
+&ldo4_reg {
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+};
+
+/*
+ * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33,
+ * VDDA_HDMI - ball AN25
+ */
+&ldo7_reg {
+ status = "okay";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+};
+
&omap5_pmx_core {
i2c4_pins: pinmux_i2c4_pins {
pinctrl-single,pins = <
@@ -52,3 +68,13 @@
<&gpio7 3 0>; /* 195, SDA */
};
+&twl6040 {
+ ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>; /* gpio line 144 */
+};
+
+&twl6040_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_144 */
+ OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6) /* perslimbus2_clock.gpio5_145 */
+ >;
+};
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 60b3fbb3bf07..a51e60518eb6 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -51,3 +51,13 @@
<&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */
<&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */
};
+
+&twl6040 {
+ ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */
+};
+
+&twl6040_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_141 */
+ >;
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index df96ccdc9bb4..e318d04319a0 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -247,7 +247,8 @@
};
timer@200a000 {
- compatible = "qcom,kpss-timer", "qcom,msm-timer";
+ compatible = "qcom,kpss-timer",
+ "qcom,kpss-wdt-apq8064", "qcom,msm-timer";
interrupts = <1 1 0x301>,
<1 2 0x301>,
<1 3 0x301>;
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 5c08d19066c2..e625656a608a 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -252,7 +252,7 @@
};
watchdog@b017000 {
- compatible = "qcom,kpss-standalone";
+ compatible = "qcom,kpss-wdt", "qcom,kpss-wdt-ipq4019";
reg = <0xb017000 0x40>;
clocks = <&sleep_clk>;
timeout-sec = <10>;
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 2601a907947b..2e375576ffd0 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -122,7 +122,8 @@
};
timer@200a000 {
- compatible = "qcom,kpss-timer", "qcom,msm-timer";
+ compatible = "qcom,kpss-timer",
+ "qcom,kpss-wdt-ipq8064", "qcom,msm-timer";
interrupts = <1 1 0x301>,
<1 2 0x301>,
<1 3 0x301>,
diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
index da05e28a81a7..288f56e0ccf5 100644
--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
@@ -87,7 +87,8 @@
};
timer@200a000 {
- compatible = "qcom,kpss-timer", "qcom,msm-timer";
+ compatible = "qcom,kpss-timer",
+ "qcom,kpss-wdt-msm8960", "qcom,msm-timer";
interrupts = <1 1 0x301>,
<1 2 0x301>,
<1 3 0x301>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 3b44ef3cff12..3ebee530f2b0 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -539,8 +539,9 @@
gmac: ethernet@ff290000 {
compatible = "rockchip,rk3288-gmac";
reg = <0xff290000 0x10000>;
- interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "macirq";
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq";
rockchip,grf = <&grf>;
clocks = <&cru SCLK_MAC>,
<&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 2827e7ab5ebc..5dd2734e67ba 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -232,7 +232,7 @@
};
usb1: ohci@00400000 {
- compatible = "atmel,at91rm9200-ohci", "usb-ohci";
+ compatible = "atmel,sama5d2-ohci", "usb-ohci";
reg = <0x00400000 0x100000>;
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index 17e81dc9213e..5820b70c95b3 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -621,6 +621,22 @@
compatible = "altr,socfpga-a10-ocram-ecc";
reg = <0xff8c3000 0x400>;
};
+
+ emac0-rx-ecc@ff8c0800 {
+ compatible = "altr,socfpga-eth-mac-ecc";
+ reg = <0xff8c0800 0x400>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>,
+ <36 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ emac0-tx-ecc@ff8c0c00 {
+ compatible = "altr,socfpga-eth-mac-ecc";
+ reg = <0xff8c0c00 0x400>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>,
+ <37 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
rst: rstmgr@ffd05000 {
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
index e1a61f20873f..d79853775061 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
@@ -52,7 +52,7 @@
status = "okay";
rtc: rtc@68 {
- compatible = "stm,m41t82";
+ compatible = "st,m41t82";
reg = <0x68>;
};
};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index a3601e4c0a2e..b844473601d2 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -136,6 +136,7 @@
&gmac1 {
status = "okay";
phy-mode = "rgmii";
+ phy-handle = <&phy1>;
snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index ad8ba10764a3..d294e82447a2 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -24,18 +24,21 @@
compatible = "shared-dma-pool";
reg = <0x40000000 0x01000000>;
no-map;
+ status = "disabled";
};
gp1_reserved: rproc@41000000 {
compatible = "shared-dma-pool";
reg = <0x41000000 0x01000000>;
no-map;
+ status = "disabled";
};
audio_reserved: rproc@42000000 {
compatible = "shared-dma-pool";
reg = <0x42000000 0x01000000>;
no-map;
+ status = "disabled";
};
dmu_reserved: rproc@43000000 {
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index a03e56fb5dbc..ca58eb279d55 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -65,8 +65,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&dram_gates 26>;
status = "disabled";
};
@@ -74,8 +75,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&ahb_gates 46>,
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&ahb_gates 46>,
<&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
@@ -84,9 +86,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
- <&ahb_gates 46>, <&dram_gates 25>,
- <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>, <&ahb_gates 46>,
+ <&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
@@ -94,8 +96,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
- clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
- <&ahb_gates 44>, <&ahb_gates 46>,
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+ <&ahb_gates 36>, <&ahb_gates 44>,
+ <&ahb_gates 46>,
<&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index bddd0de88af6..367f33012493 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -65,8 +65,8 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>;
status = "disabled";
};
@@ -74,7 +74,8 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>;
status = "disabled";
};
@@ -82,8 +83,8 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
- <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+ <&ahb_gates 36>, <&ahb_gates 44>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
index a8d8b4582397..f694482bdeb6 100644
--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
+++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
@@ -52,7 +52,7 @@
/ {
model = "NextThing C.H.I.P.";
- compatible = "nextthing,chip", "allwinner,sun5i-r8";
+ compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
aliases {
i2c0 = &i2c0;
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index 68b479b8772c..73c133f5e79c 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -176,8 +176,6 @@
};
&reg_dc1sw {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
regulator-name = "vcc-lcd";
};
diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
index 360adfb1e9ca..d6ad6196a768 100644
--- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
@@ -135,8 +135,6 @@
&reg_dc1sw {
regulator-name = "vcc-lcd-usb2";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
};
&reg_dc5ldo {
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index febdf4c72fb0..2c34bbbb9570 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -67,8 +67,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&dram_gates 26>;
status = "disabled";
};
@@ -76,8 +77,8 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
- <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>, <&dram_gates 26>;
status = "disabled";
};
@@ -85,7 +86,7 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&pll5 1>,
+ clocks = <&pll3>, <&pll5 1>,
<&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
<&dram_gates 5>, <&dram_gates 26>;
status = "disabled";
@@ -231,6 +232,7 @@
pll3x2: pll3x2_clk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
+ clocks = <&pll3>;
clock-div = <1>;
clock-mult = <2>;
clock-output-names = "pll3-2x";
@@ -272,6 +274,7 @@
pll7x2: pll7x2_clk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
+ clocks = <&pll7>;
clock-div = <1>;
clock-mult = <2>;
clock-output-names = "pll7-2x";
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index 4a4926b0b0ed..9871bad34742 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -42,8 +42,10 @@
#include "skeleton.dtsi"
+#include <dt-bindings/clock/sun8i-h3-ccu.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include <dt-bindings/reset/sun8i-h3-ccu.h>
/ {
interrupt-parent = <&gic>;
@@ -104,191 +106,6 @@
clock-output-names = "osc32k";
};
- pll1: clk@01c20000 {
- #clock-cells = <0>;
- compatible = "allwinner,sun8i-a23-pll1-clk";
- reg = <0x01c20000 0x4>;
- clocks = <&osc24M>;
- clock-output-names = "pll1";
- };
-
- /* dummy clock until actually implemented */
- pll5: pll5_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <0>;
- clock-output-names = "pll5";
- };
-
- pll6: clk@01c20028 {
- #clock-cells = <1>;
- compatible = "allwinner,sun6i-a31-pll6-clk";
- reg = <0x01c20028 0x4>;
- clocks = <&osc24M>;
- clock-output-names = "pll6", "pll6x2";
- };
-
- pll6d2: pll6d2_clk {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <2>;
- clock-mult = <1>;
- clocks = <&pll6 0>;
- clock-output-names = "pll6d2";
- };
-
- /* dummy clock until pll6 can be reused */
- pll8: pll8_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <1>;
- clock-output-names = "pll8";
- };
-
- cpu: cpu_clk@01c20050 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-cpu-clk";
- reg = <0x01c20050 0x4>;
- clocks = <&osc32k>, <&osc24M>, <&pll1>, <&pll1>;
- clock-output-names = "cpu";
- };
-
- axi: axi_clk@01c20050 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-axi-clk";
- reg = <0x01c20050 0x4>;
- clocks = <&cpu>;
- clock-output-names = "axi";
- };
-
- ahb1: ahb1_clk@01c20054 {
- #clock-cells = <0>;
- compatible = "allwinner,sun6i-a31-ahb1-clk";
- reg = <0x01c20054 0x4>;
- clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
- clock-output-names = "ahb1";
- };
-
- ahb2: ahb2_clk@01c2005c {
- #clock-cells = <0>;
- compatible = "allwinner,sun8i-h3-ahb2-clk";
- reg = <0x01c2005c 0x4>;
- clocks = <&ahb1>, <&pll6d2>;
- clock-output-names = "ahb2";
- };
-
- apb1: apb1_clk@01c20054 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-apb0-clk";
- reg = <0x01c20054 0x4>;
- clocks = <&ahb1>;
- clock-output-names = "apb1";
- };
-
- apb2: apb2_clk@01c20058 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-apb1-clk";
- reg = <0x01c20058 0x4>;
- clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
- clock-output-names = "apb2";
- };
-
- bus_gates: clk@01c20060 {
- #clock-cells = <1>;
- compatible = "allwinner,sun8i-h3-bus-gates-clk";
- reg = <0x01c20060 0x14>;
- clocks = <&ahb1>, <&ahb2>, <&apb1>, <&apb2>;
- clock-names = "ahb1", "ahb2", "apb1", "apb2";
- clock-indices = <5>, <6>, <8>,
- <9>, <10>, <13>,
- <14>, <17>, <18>,
- <19>, <20>,
- <21>, <23>,
- <24>, <25>,
- <26>, <27>,
- <28>, <29>,
- <30>, <31>, <32>,
- <35>, <36>, <37>,
- <40>, <41>, <43>,
- <44>, <52>, <53>,
- <54>, <64>,
- <65>, <69>, <72>,
- <76>, <77>, <78>,
- <96>, <97>, <98>,
- <112>, <113>,
- <114>, <115>,
- <116>, <128>, <135>;
- clock-output-names = "bus_ce", "bus_dma", "bus_mmc0",
- "bus_mmc1", "bus_mmc2", "bus_nand",
- "bus_sdram", "bus_gmac", "bus_ts",
- "bus_hstimer", "bus_spi0",
- "bus_spi1", "bus_otg",
- "bus_otg_ehci0", "bus_ehci1",
- "bus_ehci2", "bus_ehci3",
- "bus_otg_ohci0", "bus_ohci1",
- "bus_ohci2", "bus_ohci3", "bus_ve",
- "bus_lcd0", "bus_lcd1", "bus_deint",
- "bus_csi", "bus_tve", "bus_hdmi",
- "bus_de", "bus_gpu", "bus_msgbox",
- "bus_spinlock", "bus_codec",
- "bus_spdif", "bus_pio", "bus_ths",
- "bus_i2s0", "bus_i2s1", "bus_i2s2",
- "bus_i2c0", "bus_i2c1", "bus_i2c2",
- "bus_uart0", "bus_uart1",
- "bus_uart2", "bus_uart3",
- "bus_scr", "bus_ephy", "bus_dbg";
- };
-
- mmc0_clk: clk@01c20088 {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c20088 0x4>;
- clocks = <&osc24M>, <&pll6 0>, <&pll8>;
- clock-output-names = "mmc0",
- "mmc0_output",
- "mmc0_sample";
- };
-
- mmc1_clk: clk@01c2008c {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c2008c 0x4>;
- clocks = <&osc24M>, <&pll6 0>, <&pll8>;
- clock-output-names = "mmc1",
- "mmc1_output",
- "mmc1_sample";
- };
-
- mmc2_clk: clk@01c20090 {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c20090 0x4>;
- clocks = <&osc24M>, <&pll6 0>, <&pll8>;
- clock-output-names = "mmc2",
- "mmc2_output",
- "mmc2_sample";
- };
-
- usb_clk: clk@01c200cc {
- #clock-cells = <1>;
- #reset-cells = <1>;
- compatible = "allwinner,sun8i-h3-usb-clk";
- reg = <0x01c200cc 0x4>;
- clocks = <&osc24M>;
- clock-output-names = "usb_phy0", "usb_phy1",
- "usb_phy2", "usb_phy3",
- "usb_ohci0", "usb_ohci1",
- "usb_ohci2", "usb_ohci3";
- };
-
- mbus_clk: clk@01c2015c {
- #clock-cells = <0>;
- compatible = "allwinner,sun8i-a23-mbus-clk";
- reg = <0x01c2015c 0x4>;
- clocks = <&osc24M>, <&pll6 1>, <&pll5>;
- clock-output-names = "mbus";
- };
-
apb0: apb0_clk {
compatible = "fixed-factor-clock";
#clock-cells = <0>;
@@ -327,23 +144,23 @@
compatible = "allwinner,sun8i-h3-dma";
reg = <0x01c02000 0x1000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bus_gates 6>;
- resets = <&ahb_rst 6>;
+ clocks = <&ccu CLK_BUS_DMA>;
+ resets = <&ccu RST_BUS_DMA>;
#dma-cells = <1>;
};
mmc0: mmc@01c0f000 {
compatible = "allwinner,sun5i-a13-mmc";
reg = <0x01c0f000 0x1000>;
- clocks = <&bus_gates 8>,
- <&mmc0_clk 0>,
- <&mmc0_clk 1>,
- <&mmc0_clk 2>;
+ clocks = <&ccu CLK_BUS_MMC0>,
+ <&ccu CLK_MMC0>,
+ <&ccu CLK_MMC0_OUTPUT>,
+ <&ccu CLK_MMC0_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb_rst 8>;
+ resets = <&ccu RST_BUS_MMC0>;
reset-names = "ahb";
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -354,15 +171,15 @@
mmc1: mmc@01c10000 {
compatible = "allwinner,sun5i-a13-mmc";
reg = <0x01c10000 0x1000>;
- clocks = <&bus_gates 9>,
- <&mmc1_clk 0>,
- <&mmc1_clk 1>,
- <&mmc1_clk 2>;
+ clocks = <&ccu CLK_BUS_MMC1>,
+ <&ccu CLK_MMC1>,
+ <&ccu CLK_MMC1_OUTPUT>,
+ <&ccu CLK_MMC1_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb_rst 9>;
+ resets = <&ccu RST_BUS_MMC1>;
reset-names = "ahb";
interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -373,15 +190,15 @@
mmc2: mmc@01c11000 {
compatible = "allwinner,sun5i-a13-mmc";
reg = <0x01c11000 0x1000>;
- clocks = <&bus_gates 10>,
- <&mmc2_clk 0>,
- <&mmc2_clk 1>,
- <&mmc2_clk 2>;
+ clocks = <&ccu CLK_BUS_MMC2>,
+ <&ccu CLK_MMC2>,
+ <&ccu CLK_MMC2_OUTPUT>,
+ <&ccu CLK_MMC2_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb_rst 10>;
+ resets = <&ccu RST_BUS_MMC2>;
reset-names = "ahb";
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -401,18 +218,18 @@
"pmu1",
"pmu2",
"pmu3";
- clocks = <&usb_clk 8>,
- <&usb_clk 9>,
- <&usb_clk 10>,
- <&usb_clk 11>;
+ clocks = <&ccu CLK_USB_PHY0>,
+ <&ccu CLK_USB_PHY1>,
+ <&ccu CLK_USB_PHY2>,
+ <&ccu CLK_USB_PHY3>;
clock-names = "usb0_phy",
"usb1_phy",
"usb2_phy",
"usb3_phy";
- resets = <&usb_clk 0>,
- <&usb_clk 1>,
- <&usb_clk 2>,
- <&usb_clk 3>;
+ resets = <&ccu RST_USB_PHY0>,
+ <&ccu RST_USB_PHY1>,
+ <&ccu RST_USB_PHY2>,
+ <&ccu RST_USB_PHY3>;
reset-names = "usb0_reset",
"usb1_reset",
"usb2_reset",
@@ -425,8 +242,8 @@
compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
reg = <0x01c1b000 0x100>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bus_gates 25>, <&bus_gates 29>;
- resets = <&ahb_rst 25>, <&ahb_rst 29>;
+ clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>;
+ resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
phy-names = "usb";
status = "disabled";
@@ -436,9 +253,9 @@
compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
reg = <0x01c1b400 0x100>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bus_gates 29>, <&bus_gates 25>,
- <&usb_clk 17>;
- resets = <&ahb_rst 29>, <&ahb_rst 25>;
+ clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>,
+ <&ccu CLK_USB_OHCI1>;
+ resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
phy-names = "usb";
status = "disabled";
@@ -448,8 +265,8 @@
compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
reg = <0x01c1c000 0x100>;
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bus_gates 26>, <&bus_gates 30>;
- resets = <&ahb_rst 26>, <&ahb_rst 30>;
+ clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>;
+ resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
phys = <&usbphy 2>;
phy-names = "usb";
status = "disabled";
@@ -459,9 +276,9 @@
compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
reg = <0x01c1c400 0x100>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bus_gates 30>, <&bus_gates 26>,
- <&usb_clk 18>;
- resets = <&ahb_rst 30>, <&ahb_rst 26>;
+ clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>,
+ <&ccu CLK_USB_OHCI2>;
+ resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
phys = <&usbphy 2>;
phy-names = "usb";
status = "disabled";
@@ -471,8 +288,8 @@
compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
reg = <0x01c1d000 0x100>;
interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bus_gates 27>, <&bus_gates 31>;
- resets = <&ahb_rst 27>, <&ahb_rst 31>;
+ clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>;
+ resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
phys = <&usbphy 3>;
phy-names = "usb";
status = "disabled";
@@ -482,20 +299,29 @@
compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
reg = <0x01c1d400 0x100>;
interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bus_gates 31>, <&bus_gates 27>,
- <&usb_clk 19>;
- resets = <&ahb_rst 31>, <&ahb_rst 27>;
+ clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>,
+ <&ccu CLK_USB_OHCI3>;
+ resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
phys = <&usbphy 3>;
phy-names = "usb";
status = "disabled";
};
+ ccu: clock@01c20000 {
+ compatible = "allwinner,sun8i-h3-ccu";
+ reg = <0x01c20000 0x400>;
+ clocks = <&osc24M>, <&osc32k>;
+ clock-names = "hosc", "losc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
pio: pinctrl@01c20800 {
compatible = "allwinner,sun8i-h3-pinctrl";
reg = <0x01c20800 0x400>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bus_gates 69>;
+ clocks = <&ccu CLK_BUS_PIO>;
gpio-controller;
#gpio-cells = <3>;
interrupt-controller;
@@ -542,24 +368,6 @@
};
};
- ahb_rst: reset@01c202c0 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-ahb1-reset";
- reg = <0x01c202c0 0xc>;
- };
-
- apb1_rst: reset@01c202d0 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-clock-reset";
- reg = <0x01c202d0 0x4>;
- };
-
- apb2_rst: reset@01c202d8 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-clock-reset";
- reg = <0x01c202d8 0x4>;
- };
-
timer@01c20c00 {
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0xa0>;
@@ -580,8 +388,8 @@
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&bus_gates 112>;
- resets = <&apb2_rst 16>;
+ clocks = <&ccu CLK_BUS_UART0>;
+ resets = <&ccu RST_BUS_UART0>;
dmas = <&dma 6>, <&dma 6>;
dma-names = "rx", "tx";
status = "disabled";
@@ -593,8 +401,8 @@
interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&bus_gates 113>;
- resets = <&apb2_rst 17>;
+ clocks = <&ccu CLK_BUS_UART1>;
+ resets = <&ccu RST_BUS_UART1>;
dmas = <&dma 7>, <&dma 7>;
dma-names = "rx", "tx";
status = "disabled";
@@ -606,8 +414,8 @@
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&bus_gates 114>;
- resets = <&apb2_rst 18>;
+ clocks = <&ccu CLK_BUS_UART2>;
+ resets = <&ccu RST_BUS_UART2>;
dmas = <&dma 8>, <&dma 8>;
dma-names = "rx", "tx";
status = "disabled";
@@ -619,8 +427,8 @@
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&bus_gates 115>;
- resets = <&apb2_rst 19>;
+ clocks = <&ccu CLK_BUS_UART3>;
+ resets = <&ccu RST_BUS_UART3>;
dmas = <&dma 9>, <&dma 9>;
dma-names = "rx", "tx";
status = "disabled";
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index 1eca3b28ac64..b6da15d823a6 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -1843,7 +1843,7 @@
ldo5_reg: ldo5 {
regulator-name = "vddio_sdmmc,avdd_vdac";
- regulator-min-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
@@ -1914,6 +1914,7 @@
sdhci@78000000 {
status = "okay";
+ vqmmc-supply = <&ldo5_reg>;
cd-gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio TEGRA_GPIO(T, 3) GPIO_ACTIVE_HIGH>;
power-gpios = <&gpio TEGRA_GPIO(D, 7) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 6c60b7f91104..5c1fcab4a6f7 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -85,187 +85,199 @@
reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
+
+ switch0: switch0@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ dsa,member = <0 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ switch0port5: port@5 {
+ reg = <5>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch1port6
+ &switch2port9>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&fec1>;
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+ };
+ };
};
mdio_mux_2: mdio@2 {
reg = <2>;
#address-cells = <1>;
#size-cells = <0>;
- };
-
- mdio_mux_4: mdio@4 {
- reg = <4>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mdio_mux_8: mdio@8 {
- reg = <8>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
- };
-
- dsa {
- compatible = "marvell,dsa";
- #address-cells = <2>;
- #size-cells = <0>;
- dsa,ethernet = <&fec1>;
- dsa,mii-bus = <&mdio_mux_1>;
-
- /* 6352 - Primary - 7 ports */
- switch0: switch@0-0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x00 0>;
- eeprom-length = <512>;
- port@0 {
+ switch1: switch1@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0>;
- label = "lan0";
- };
-
- port@1 {
- reg = <1>;
- label = "lan1";
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- };
-
- switch0port5: port@5 {
- reg = <5>;
- label = "dsa";
- phy-mode = "rgmii-txid";
- link = <&switch1port6
- &switch2port9>;
-
- fixed-link {
- speed = <1000>;
- full-duplex;
+ dsa,member = <0 1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan3";
+ phy-handle = <&switch1phy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan4";
+ phy-handle = <&switch1phy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan5";
+ phy-handle = <&switch1phy2>;
+ };
+
+ switch1port5: port@5 {
+ reg = <5>;
+ label = "dsa";
+ link = <&switch2port9>;
+ phy-mode = "rgmii-txid";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ switch1port6: port@6 {
+ reg = <6>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch0port5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
};
- };
-
- port@6 {
- reg = <6>;
- label = "cpu";
-
- fixed-link {
- speed = <100>;
- full-duplex;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy0: switch1phy0@0 {
+ reg = <0>;
+ };
+ switch1phy1: switch1phy0@1 {
+ reg = <1>;
+ };
+ switch1phy2: switch1phy0@2 {
+ reg = <2>;
+ };
};
};
-
};
- /* 6352 - Secondary - 7 ports */
- switch1: switch@0-1 {
+ mdio_mux_4: mdio@4 {
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x00 1>;
- eeprom-length = <512>;
- mii-bus = <&mdio_mux_2>;
+ reg = <4>;
- port@0 {
+ switch2: switch2@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0>;
- label = "lan3";
- };
-
- port@1 {
- reg = <1>;
- label = "lan4";
- };
-
- port@2 {
- reg = <2>;
- label = "lan5";
- };
-
- switch1port5: port@5 {
- reg = <5>;
- label = "dsa";
- link = <&switch2port9>;
- phy-mode = "rgmii-txid";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
-
- switch1port6: port@6 {
- reg = <6>;
- label = "dsa";
- phy-mode = "rgmii-txid";
- link = <&switch0port5>;
-
- fixed-link {
- speed = <1000>;
- full-duplex;
+ dsa,member = <0 2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan6";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan7";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan8";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "optical3";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ link-gpios = <&gpio6 2
+ GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "optical4";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ link-gpios = <&gpio6 3
+ GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ switch2port9: port@9 {
+ reg = <9>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch1port5
+ &switch0port5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
};
};
};
- /* 6185 - 10 ports */
- switch2: switch@0-2 {
+ mdio_mux_8: mdio@8 {
+ reg = <8>;
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x00 2>;
- mii-bus = <&mdio_mux_4>;
-
- port@0 {
- reg = <0>;
- label = "lan6";
- };
-
- port@1 {
- reg = <1>;
- label = "lan7";
- };
-
- port@2 {
- reg = <2>;
- label = "lan8";
- };
-
- port@3 {
- reg = <3>;
- label = "optical3";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- link-gpios = <&gpio6 2
- GPIO_ACTIVE_HIGH>;
- };
- };
-
- port@4 {
- reg = <4>;
- label = "optical4";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- link-gpios = <&gpio6 3
- GPIO_ACTIVE_HIGH>;
- };
- };
-
- switch2port9: port@9 {
- reg = <9>;
- label = "dsa";
- phy-mode = "rgmii-txid";
- link = <&switch1port5
- &switch0port5>;
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
};
};
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 6c56ad086c7c..52dbad5619e2 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -76,7 +76,7 @@ CONFIG_LEDS_CLASS=y
CONFIG_LEDS_LOCOMO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 10f49ab5328e..47195e8690b4 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -82,6 +82,7 @@ CONFIG_TOUCHSCREEN_MMS114=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_MAX77693_HAPTIC=y
CONFIG_INPUT_MAX8997_HAPTIC=y
+CONFIG_KEYBOARD_SAMSUNG=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 24636cfdf6df..cf4918a2c51f 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -180,7 +180,7 @@ CONFIG_LEDS_FSG=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_ISL1208=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 8f857564657f..8a5fff1b7f6f 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -264,6 +264,7 @@ CONFIG_KEYBOARD_TEGRA=y
CONFIG_KEYBOARD_SPEAR=y
CONFIG_KEYBOARD_ST_KEYSCAN=y
CONFIG_KEYBOARD_CROS_EC=m
+CONFIG_KEYBOARD_SAMSUNG=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_CYAPA=m
CONFIG_MOUSE_ELAN_I2C=y
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 03a39fe29246..1568cb5cd870 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -154,30 +154,23 @@ static int ghash_async_init(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
- if (!may_use_simd()) {
- memcpy(cryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
- return crypto_ahash_init(cryptd_req);
- } else {
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
-
- desc->tfm = child;
- desc->flags = req->base.flags;
- return crypto_shash_init(desc);
- }
+ desc->tfm = child;
+ desc->flags = req->base.flags;
+ return crypto_shash_init(desc);
}
static int ghash_async_update(struct ahash_request *req)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd()) {
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_update(cryptd_req);
@@ -190,12 +183,12 @@ static int ghash_async_update(struct ahash_request *req)
static int ghash_async_final(struct ahash_request *req)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd()) {
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_final(cryptd_req);
@@ -212,7 +205,8 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd()) {
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_digest(cryptd_req);
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index b2bc8e11471d..4eaea2173bf8 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -480,13 +480,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro uaccess_save, tmp
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
mrc p15, 0, \tmp, c3, c0, 0
- str \tmp, [sp, #S_FRAME_SIZE]
+ str \tmp, [sp, #SVC_DACR]
#endif
.endm
.macro uaccess_restore
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r0, [sp, #S_FRAME_SIZE]
+ ldr r0, [sp, #SVC_DACR]
mcr p15, 0, r0, c3, c0, 0
#endif
.endm
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 9e10c4567eb4..66d0e215a773 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -77,8 +77,36 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
return result; \
}
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result, val; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
+"1: ldrex %0, [%4]\n" \
+" " #asm_op " %1, %0, %5\n" \
+" strex %2, %1, [%4]\n" \
+" teq %2, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
@@ -159,6 +187,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return val; \
}
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int val; \
+ \
+ raw_local_irq_save(flags); \
+ val = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ \
+ return val; \
+}
+
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -187,19 +229,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op)
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#define atomic_andnot atomic_andnot
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(andnot, &= ~, bic)
-ATOMIC_OP(or, |=, orr)
-ATOMIC_OP(xor, ^=, eor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, orr)
+ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -317,24 +366,61 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
return result; \
}
+#define ATOMIC64_FETCH_OP(op, op1, op2) \
+static inline long long \
+atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
+{ \
+ long long result, val; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
+"1: ldrexd %0, %H0, [%4]\n" \
+" " #op1 " %Q1, %Q0, %Q5\n" \
+" " #op2 " %R1, %R0, %R5\n" \
+" strexd %2, %1, %H1, [%4]\n" \
+" teq %2, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
- ATOMIC64_OP_RETURN(op, op1, op2)
+ ATOMIC64_OP_RETURN(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
#define atomic64_andnot atomic64_andnot
-ATOMIC64_OP(and, and, and)
-ATOMIC64_OP(andnot, bic, bic)
-ATOMIC64_OP(or, orr, orr)
-ATOMIC64_OP(xor, eor, eor)
+ATOMIC64_OPS(and, and, and)
+ATOMIC64_OPS(andnot, bic, bic)
+ATOMIC64_OPS(or, orr, orr)
+ATOMIC64_OPS(xor, eor, eor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 112cc1a5d47f..f5d698182d50 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -44,9 +44,7 @@ extern void arm_heavy_mb(void);
#define __arm_heavy_mb(x...) dsb(x)
#endif
-#ifdef CONFIG_ARCH_HAS_BARRIERS
-#include <mach/barriers.h>
-#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
+#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
#define mb() __arm_heavy_mb()
#define rmb() dsb()
#define wmb() __arm_heavy_mb(st)
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index dff714d886d5..b7a428154355 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -10,8 +10,8 @@
#include <asm/param.h> /* HZ */
#define MAX_UDELAY_MS 2
-#define UDELAY_MULT ((UL(2199023) * HZ) >> 11)
-#define UDELAY_SHIFT 30
+#define UDELAY_MULT UL(2047 * HZ + 483648 * HZ / 1000000)
+#define UDELAY_SHIFT 31
#ifndef __ASSEMBLY__
@@ -34,7 +34,7 @@ extern struct arm_delay_ops {
* it, it means that you're calling udelay() with an out of range value.
*
* With currently imposed limits, this means that we support a max delay
- * of 2000us. Further limits: HZ<=1000 and bogomips<=3355
+ * of 2000us. Further limits: HZ<=1000
*/
extern void __bad_udelay(void);
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index a708fa1f0905..766bf9b78160 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -28,10 +28,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
-#define arch_efi_call_virt(f, args...) \
+#define arch_efi_call_virt(p, f, args...) \
({ \
efi_##f##_t *__f; \
- __f = efi.systab->runtime->f; \
+ __f = p->f; \
__f(args); \
})
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index f4882553fbb0..85a34cc8316a 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -17,7 +17,7 @@
#define fd_outb(val,port) \
do { \
- if ((port) == FD_DOR) \
+ if ((port) == (u32)FD_DOR) \
fd_setdor((val)); \
else \
outb((val),(port)); \
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 781ef5fe235d..021692c64de3 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -282,7 +282,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* These perform PCI memory accesses via an ioremap region. They don't
* take an address as such, but a cookie.
*
- * Again, this are defined to perform little endian accesses. See the
+ * Again, these are defined to perform little endian accesses. See the
* IO port primitives for more information.
*/
#ifndef readl
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 19cfab526d13..b2902a5cd780 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -29,7 +29,7 @@
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ return (pmd_t *)get_zeroed_page(GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
static inline void clean_pte_table(pte_t *pte)
{
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index aeddd28b3595..92fd2c8a9af0 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define pmd_large(pmd) (pmd_val(pmd) & 2)
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+#define pmd_present(pmd) (pmd_val(pmd))
#define copy_pmd(pmdpd,pmdps) \
do { \
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index fa70db7c714b..2a029bceaf2f 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
: !!(pmd_val(pmd) & (val)))
#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
+#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
static inline pte_t pte_mkspecial(pte_t pte)
@@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
- return __pmd(0);
+ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 348caabb7625..d62204060cbe 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_present(pmd) (pmd_val(pmd))
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 51622ba7c4a6..e9c9a117bd25 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -13,10 +13,20 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
struct pt_regs {
unsigned long uregs[18];
};
+struct svc_pt_regs {
+ struct pt_regs regs;
+ u32 dacr;
+ u32 addr_limit;
+};
+
+#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
+
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 0fa418463f49..4bec45442072 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -6,6 +6,8 @@
#endif
#include <linux/prefetch.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
@@ -50,8 +52,21 @@ static inline void dsb_sev(void)
* memory.
*/
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ u16 owner = READ_ONCE(lock->tickets.owner);
+
+ for (;;) {
+ arch_spinlock_t tmp = READ_ONCE(*lock);
+
+ if (tmp.tickets.owner == tmp.tickets.next ||
+ tmp.tickets.owner != owner)
+ break;
+
+ wfe();
+ }
+ smp_acquire__after_ctrl_dep();
+}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 3cadb726ec88..1e25cd80589e 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -209,17 +209,38 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
tlb_flush(tlb);
}
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
+ if (tlb->nr == tlb->max)
+ return true;
tlb->pages[tlb->nr++] = page;
- VM_BUG_ON(tlb->nr > tlb->max);
- return tlb->max - tlb->nr;
+ return false;
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (!__tlb_remove_page(tlb, page))
+ if (__tlb_remove_page(tlb, page)) {
tlb_flush_mmu(tlb);
+ __tlb_remove_page(tlb, page);
+ }
+}
+
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return tlb_remove_page(tlb, page);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 35c9db857ebe..62a6f65029e6 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -104,14 +104,6 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a) == (b))
-#define __addr_ok(addr) ({ \
- unsigned long flag; \
- __asm__("cmp %2, %0; movlo %0, #0" \
- : "=&r" (flag) \
- : "0" (current_thread_info()->addr_limit), "r" (addr) \
- : "cc"); \
- (flag == 0); })
-
/* We use 33-bit arithmetic here... */
#define __range_ok(addr, size) ({ \
unsigned long flag, roksum; \
@@ -238,49 +230,23 @@ extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2, __p, __e, __l, __s) \
- __asm__ __volatile__ ( \
- __asmeq("%0", "r0") __asmeq("%2", "r2") \
- __asmeq("%3", "r1") \
- "bl __put_user_" #__s \
- : "=&r" (__e) \
- : "0" (__p), "r" (__r2), "r" (__l) \
- : "ip", "lr", "cc")
-
-#define __put_user_check(x, p) \
+#define __put_user_check(__pu_val, __ptr, __err, __s) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
- const typeof(*(p)) __user *__tmp_p = (p); \
- register const typeof(*(p)) __r2 asm("r2") = (x); \
- register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
+ register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
+ register const void __user *__p asm("r0") = __ptr; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
- unsigned int __ua_flags = uaccess_save_and_enable(); \
- switch (sizeof(*(__p))) { \
- case 1: \
- __put_user_x(__r2, __p, __e, __l, 1); \
- break; \
- case 2: \
- __put_user_x(__r2, __p, __e, __l, 2); \
- break; \
- case 4: \
- __put_user_x(__r2, __p, __e, __l, 4); \
- break; \
- case 8: \
- __put_user_x(__r2, __p, __e, __l, 8); \
- break; \
- default: __e = __put_user_bad(); break; \
- } \
- uaccess_restore(__ua_flags); \
- __e; \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
+ : "ip", "lr", "cc"); \
+ __err = __e; \
})
-#define put_user(x, p) \
- ({ \
- might_fault(); \
- __put_user_check(x, p); \
- })
-
#else /* CONFIG_MMU */
/*
@@ -298,7 +264,7 @@ static inline void set_fs(mm_segment_t fs)
}
#define get_user(x, p) __get_user(x, p)
-#define put_user(x, p) __put_user(x, p)
+#define __put_user_check __put_user_nocheck
#endif /* CONFIG_MMU */
@@ -389,36 +355,54 @@ do { \
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
+
+#define __put_user_switch(x, ptr, __err, __fn) \
+ do { \
+ const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ unsigned int __ua_flags; \
+ might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
+ case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
+ case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
+ case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ uaccess_restore(__ua_flags); \
+ } while (0)
+
+#define put_user(x, ptr) \
+({ \
+ int __pu_err = 0; \
+ __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
+ __pu_err; \
+})
+
#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
- __put_user_err((x), (ptr), __pu_err); \
+ __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
__pu_err; \
})
#define __put_user_error(x, ptr, err) \
({ \
- __put_user_err((x), (ptr), err); \
+ __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
(void) 0; \
})
-#define __put_user_err(x, ptr, err) \
-do { \
- unsigned long __pu_addr = (unsigned long)(ptr); \
- unsigned int __ua_flags; \
- __typeof__(*(ptr)) __pu_val = (x); \
- __chk_user_ptr(ptr); \
- might_fault(); \
- __ua_flags = uaccess_save_and_enable(); \
- switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
- case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
- case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \
- case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
- default: __put_user_bad(); \
- } \
- uaccess_restore(__ua_flags); \
-} while (0)
+#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
+ do { \
+ unsigned long __pu_addr = (unsigned long)__pu_ptr; \
+ __put_user_nocheck_##__size(x, __pu_addr, __err); \
+ } while (0)
+
+#define __put_user_nocheck_1 __put_user_asm_byte
+#define __put_user_nocheck_2 __put_user_asm_half
+#define __put_user_nocheck_4 __put_user_asm_word
+#define __put_user_nocheck_8 __put_user_asm_dword
#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index b6b962d70db9..9d874db13c0e 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -52,6 +52,7 @@ int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
int HYPERVISOR_physdev_op(int cmd, void *arg);
int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
int HYPERVISOR_tmem_op(void *arg);
+int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
int HYPERVISOR_platform_op_raw(void *arg);
static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
{
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
new file mode 100644
index 000000000000..ec154e719b11
--- /dev/null
+++ b/arch/arm/include/asm/xen/xen-ops.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_XEN_OPS_H
+#define _ASM_XEN_OPS_H
+
+void xen_efi_runtime_setup(void);
+
+#endif /* _ASM_XEN_OPS_H */
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index df3f60cb1168..a2b3eb313a25 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -139,8 +139,8 @@ struct kvm_arch_memory_slot {
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
-#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
-#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
+#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
+#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
/* Normal registers are mapped as coprocessor 16. */
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 27d05813ff09..608008229c7d 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -107,7 +107,10 @@ int main(void)
DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc));
DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr));
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
- DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
+ DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
+ DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit));
+ DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
BLANK();
#ifdef CONFIG_CACHE_L2X0
DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index a44b268e12e1..7dccc964d75f 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -47,18 +47,13 @@ int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
* This function calls the underlying arch specific low level PM code as
* registered at the init time.
*
- * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the
- * callback otherwise.
+ * Returns the result of the suspend callback.
*/
int arm_cpuidle_suspend(int index)
{
- int ret = -EOPNOTSUPP;
int cpu = smp_processor_id();
- if (cpuidle_ops[cpu].suspend)
- ret = cpuidle_ops[cpu].suspend(index);
-
- return ret;
+ return cpuidle_ops[cpu].suspend(index);
}
/**
@@ -92,7 +87,8 @@ static const struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
* process.
*
* Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
- * no cpuidle_ops is registered for the 'enable-method'.
+ * no cpuidle_ops is registered for the 'enable-method', or if either init or
+ * suspend callback isn't defined.
*/
static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
{
@@ -110,6 +106,12 @@ static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
return -EOPNOTSUPP;
}
+ if (!ops->init || !ops->suspend) {
+ pr_warn("cpuidle_ops '%s': no init or suspend callback\n",
+ enable_method);
+ return -EOPNOTSUPP;
+ }
+
cpuidle_ops[cpu] = *ops; /* structure copy */
pr_notice("cpuidle: enable-method property '%s'"
@@ -129,7 +131,8 @@ static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
* Returns:
* 0 on success,
* -ENODEV if it fails to find the cpu node in the device tree,
- * -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu,
+ * -EOPNOTSUPP if it does not find a registered and valid cpuidle_ops for
+ * this cpu,
* -ENOENT if it fails to find an 'enable-method' property,
* -ENXIO if the HW reports a failure or a misconfiguration,
* -ENOMEM if the HW report an memory allocation failure
@@ -143,7 +146,7 @@ int __init arm_cpuidle_init(int cpu)
return -ENODEV;
ret = arm_cpuidle_read_ops(cpu_node, cpu);
- if (!ret && cpuidle_ops[cpu].init)
+ if (!ret)
ret = cpuidle_ops[cpu].init(cpu_node, cpu);
of_node_put(cpu_node);
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 2e26016a91a5..40ecd5f514a2 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -23,6 +23,7 @@
#include <asm/cputype.h>
#include <asm/setup.h>
#include <asm/page.h>
+#include <asm/prom.h>
#include <asm/smp_plat.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -213,6 +214,8 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
#if defined(CONFIG_ARCH_MULTIPLATFORM) || defined(CONFIG_ARM_SINGLE_ARMV7M)
DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
+ .l2c_aux_val = 0x0,
+ .l2c_aux_mask = ~0x0,
MACHINE_END
mdesc_best = &__mach_desc_GENERIC_DT;
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index e2550500486d..bc5f50799d75 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -92,7 +92,7 @@
* Invalid mode handlers
*/
.macro inv_entry, reason
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - lr} )
THUMB( stmia sp, {r0 - r12} )
THUMB( str sp, [sp, #S_SP] )
@@ -152,7 +152,7 @@ ENDPROC(__und_invalid)
.macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
- sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
+ sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp )
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
- add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
+ add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
@@ -185,6 +185,12 @@ ENDPROC(__und_invalid)
@
stmia r7, {r2 - r6}
+ get_thread_info tsk
+ ldr r0, [tsk, #TI_ADDR_LIMIT]
+ mov r1, #TASK_SIZE
+ str r1, [tsk, #TI_ADDR_LIMIT]
+ str r0, [sp, #SVC_ADDR_LIMIT]
+
uaccess_save r0
.if \uaccess
uaccess_disable r0
@@ -213,7 +219,6 @@ __irq_svc:
irq_handler
#ifdef CONFIG_PREEMPT
- get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
@@ -366,17 +371,17 @@ ENDPROC(__fiq_abt)
/*
* User mode handlers
*
- * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
+ * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
*/
-#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
.macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 30a7228eaceb..10c3283d6c19 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -145,7 +145,7 @@ ENTRY(vector_swi)
#ifdef CONFIG_CPU_V7M
v7m_exception_entry
#else
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
ARM( add r8, sp, #S_PC )
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 0d22ad206d52..6391728c8f03 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -90,7 +90,7 @@
@ Linux expects to have irqs off. Do it here before taking stack space
cpsid i
- sub sp, #S_FRAME_SIZE-S_IP
+ sub sp, #PT_REGS_SIZE-S_IP
stmdb sp!, {r0-r11}
@ load saved r12, lr, return address and xPSR.
@@ -160,7 +160,7 @@
ldmia sp!, {r0-r11}
@ restore main sp
- add sp, sp, #S_FRAME_SIZE-S_IP
+ add sp, sp, #PT_REGS_SIZE-S_IP
cpsie i
bx lr
@@ -215,7 +215,9 @@
blne trace_hardirqs_off
#endif
.endif
+ ldr r1, [sp, #SVC_ADDR_LIMIT]
uaccess_restore
+ str r1, [tsk, #TI_ADDR_LIMIT]
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
@@ -259,7 +261,9 @@
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
+ ldr r1, [sp, #SVC_ADDR_LIMIT]
uaccess_restore
+ str r1, [tsk, #TI_ADDR_LIMIT]
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp
@@ -307,7 +311,7 @@
.endif
mov r0, r0 @ ARMv5T and earlier require a nop
@ after ldm {}^
- add sp, sp, #\offset + S_FRAME_SIZE
+ add sp, sp, #\offset + PT_REGS_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
#elif defined(CONFIG_CPU_V7M)
@ V7M restore.
@@ -334,7 +338,7 @@
.else
ldmdb sp, {r0 - r12} @ get calling r0 - r12
.endif
- add sp, sp, #S_FRAME_SIZE - S_SP
+ add sp, sp, #PT_REGS_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
#endif /* !CONFIG_THUMB2_KERNEL */
.endm
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 907534f97053..abcf47848525 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -73,7 +73,7 @@ __irq_entry:
@ correctness they don't need to be restored. So only r8-r11 must be
@ restored here. The easiest way to do so is to restore r0-r7, too.
ldmia sp!, {r0-r11}
- add sp, #S_FRAME_SIZE-S_IP
+ add sp, #PT_REGS_SIZE-S_IP
cpsie i
bx lr
ENDPROC(__irq_entry)
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 4a803c5a1ff7..612eb530f33f 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -96,19 +96,23 @@ void __show_regs(struct pt_regs *regs)
unsigned long flags;
char buf[64];
#ifndef CONFIG_CPU_V7M
- unsigned int domain;
+ unsigned int domain, fs;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
- if (user_mode(regs))
+ if (user_mode(regs)) {
domain = DACR_UACCESS_ENABLE;
- else
- domain = *(unsigned int *)(regs + 1);
+ fs = get_fs();
+ } else {
+ domain = to_svc_pt_regs(regs)->dacr;
+ fs = to_svc_pt_regs(regs)->addr_limit;
+ }
#else
domain = get_domain();
+ fs = get_fs();
#endif
#endif
@@ -144,7 +148,7 @@ void __show_regs(struct pt_regs *regs)
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
- else if (get_fs() == get_ds())
+ else if (fs == get_ds())
segment = "kernel";
else
segment = "user";
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ef9119f7462e..ce131ed5939d 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
if (ret)
return ret;
- vfp_flush_hwstate(thread);
thread->vfpstate.hard = new_vfp;
+ vfp_flush_hwstate(thread);
return 0;
}
@@ -932,18 +932,19 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
{
current_thread_info()->syscall = scno;
- /* Do the secure computing check first; failures should be fast. */
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+ /* Do seccomp after ptrace; syscall may have changed. */
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
- if (secure_computing() == -1)
+ if (secure_computing(NULL) == -1)
return -1;
#else
/* XXX: remove this once OABI gets fixed */
- secure_computing_strict(scno);
+ secure_computing_strict(current_thread_info()->syscall);
#endif
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
-
+ /* Tracer or seccomp may have changed syscall. */
scno = current_thread_info()->syscall;
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7b5350060612..da2f6c360f6b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -19,7 +19,6 @@
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
-#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/kexec.h>
@@ -844,7 +843,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
struct resource *res;
kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
@@ -903,14 +902,9 @@ static int __init customize_machine(void)
* machine from the device tree, if no callback is provided,
* otherwise we would always need an init_machine callback.
*/
- of_iommu_init();
if (machine_desc->init_machine)
machine_desc->init_machine();
-#ifdef CONFIG_OF
- else
- of_platform_populate(NULL, of_default_bus_match_table,
- NULL, NULL);
-#endif
+
return 0;
}
arch_initcall(customize_machine);
@@ -1064,6 +1058,7 @@ void __init setup_arch(char **cmdline_p)
early_paging_init(mdesc);
#endif
setup_dma_zone(mdesc);
+ xen_early_init();
efi_init();
sanity_check_meminfo();
arm_memblock_init(mdesc);
@@ -1080,7 +1075,6 @@ void __init setup_arch(char **cmdline_p)
arm_dt_init_cpu_maps();
psci_dt_init();
- xen_early_init();
#ifdef CONFIG_SMP
if (is_smp()) {
if (!mdesc->smp_init || !mdesc->smp_init()) {
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index df90bc59bfce..861521606c6d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -486,7 +486,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
- trace_ipi_raise(target, ipi_types[ipinr]);
+ trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 2e72be4f623e..22313cb53362 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -93,17 +93,53 @@ void erratum_a15_798181_init(void)
unsigned int revidr = read_cpuid(CPUID_REVIDR);
/* Brahma-B15 r0p0..r0p2 affected
- * Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
- if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2)
+ * Cortex-A15 r0p0..r3p3 w/o ECO fix affected
+ * Fixes applied to A15 with respect to the revision and revidr are:
+ *
+ * r0p0-r2p1: No fixes applied
+ * r2p2,r2p3:
+ * REVIDR[4]: 798181 Moving a virtual page that is being accessed
+ * by an active process can lead to unexpected behavior
+ * REVIDR[9]: Not defined
+ * r2p4,r3p0,r3p1,r3p2:
+ * REVIDR[4]: 798181 Moving a virtual page that is being accessed
+ * by an active process can lead to unexpected behavior
+ * REVIDR[9]: 798181 Moving a virtual page that is being accessed
+ * by an active process can lead to unexpected behavior
+ * - This is an update to a previously released ECO.
+ * r3p3:
+ * REVIDR[4]: Reserved
+ * REVIDR[9]: 798181 Moving a virtual page that is being accessed
+ * by an active process can lead to unexpected behavior
+ * - This is an update to a previously released ECO.
+ *
+ * Handling:
+ * REVIDR[9] set -> No WA
+ * REVIDR[4] set, REVIDR[9] cleared -> Partial WA
+ * Both cleared -> Full WA
+ */
+ if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) {
erratum_a15_798181_handler = erratum_a15_798181_broadcast;
- else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr <= 0x413fc0f2 &&
- (revidr & 0x210) != 0x210) {
+ } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f2) {
+ erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+ } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f4) {
if (revidr & 0x10)
erratum_a15_798181_handler =
erratum_a15_798181_partial;
else
erratum_a15_798181_handler =
erratum_a15_798181_broadcast;
+ } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x413fc0f3) {
+ if ((revidr & 0x210) == 0)
+ erratum_a15_798181_handler =
+ erratum_a15_798181_broadcast;
+ else if (revidr & 0x10)
+ erratum_a15_798181_handler =
+ erratum_a15_798181_partial;
+ } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x414fc0f0) {
+ if ((revidr & 0x200) == 0)
+ erratum_a15_798181_handler =
+ erratum_a15_798181_partial;
}
}
#endif
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 1bfa7a7f5533..02d5e5e8d44c 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -310,24 +310,17 @@ static void twd_timer_setup(void)
enable_percpu_irq(clk->irq, 0);
}
-static int twd_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int twd_timer_starting_cpu(unsigned int cpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- twd_timer_setup();
- break;
- case CPU_DYING:
- twd_timer_stop();
- break;
- }
-
- return NOTIFY_OK;
+ twd_timer_setup();
+ return 0;
}
-static struct notifier_block twd_timer_cpu_nb = {
- .notifier_call = twd_timer_cpu_notify,
-};
+static int twd_timer_dying_cpu(unsigned int cpu)
+{
+ twd_timer_stop();
+ return 0;
+}
static int __init twd_local_timer_common_register(struct device_node *np)
{
@@ -345,9 +338,9 @@ static int __init twd_local_timer_common_register(struct device_node *np)
goto out_free;
}
- err = register_cpu_notifier(&twd_timer_cpu_nb);
- if (err)
- goto out_irq;
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
+ "AP_ARM_TWD_STARTING",
+ twd_timer_starting_cpu, twd_timer_dying_cpu);
twd_get_clock(np);
if (!of_property_read_bool(np, "always-on"))
@@ -365,8 +358,6 @@ static int __init twd_local_timer_common_register(struct device_node *np)
return 0;
-out_irq:
- free_percpu_irq(twd_ppi, twd_evt);
out_free:
iounmap(twd_base);
twd_base = NULL;
@@ -390,7 +381,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
}
#ifdef CONFIG_OF
-static void __init twd_local_timer_of_register(struct device_node *np)
+static int __init twd_local_timer_of_register(struct device_node *np)
{
int err;
@@ -410,6 +401,7 @@ static void __init twd_local_timer_of_register(struct device_node *np)
out:
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
+ return err;
}
CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index e2c6da096cef..99420fc1f066 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -125,6 +125,8 @@ SECTIONS
#ifdef CONFIG_DEBUG_ALIGN_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
+ _etext = .; /* End of text section */
+
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
@@ -155,8 +157,6 @@ SECTIONS
NOTES
- _etext = .; /* End of text and rodata section */
-
#ifdef CONFIG_DEBUG_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#else
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 893941ec98dc..f1bde7c4e736 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
+ kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index d8a780799506..27f4d96258a2 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -29,7 +29,10 @@ else
lib-y += io-readsw-armv4.o io-writesw-armv4.o
endif
-lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
+ifeq ($(CONFIG_ARCH_RPC),y)
+ lib-y += ecard.o io-acorn.o floppydma.o
+ AFLAGS_delay-loop.o += -march=armv4
+endif
$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 518bf6e93f78..792c59d885bc 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/delay.h>
+
.text
.LC0: .word loops_per_jiffy
@@ -17,7 +18,6 @@
/*
* r0 <= 2000
- * lpj <= 0x01ffffff (max. 3355 bogomips)
* HZ <= 1000
*/
@@ -25,16 +25,11 @@ ENTRY(__loop_udelay)
ldr r2, .LC1
mul r0, r2, r0
ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
- mov r1, #-1
ldr r2, .LC0
- ldr r2, [r2] @ max = 0x01ffffff
- add r0, r0, r1, lsr #32-14
- mov r0, r0, lsr #14 @ max = 0x0001ffff
- add r2, r2, r1, lsr #32-10
- mov r2, r2, lsr #10 @ max = 0x00007fff
- mul r0, r2, r0 @ max = 2^32-1
- add r0, r0, r1, lsr #32-6
- movs r0, r0, lsr #6
+ ldr r2, [r2]
+ umull r1, r0, r2, r0
+ adds r1, r1, #0xffffffff
+ adcs r0, r0, r0
reteq lr
/*
diff --git a/arch/arm/mach-artpec/board-artpec6.c b/arch/arm/mach-artpec/board-artpec6.c
index 71513df3374e..a0b1979c2c2c 100644
--- a/arch/arm/mach-artpec/board-artpec6.c
+++ b/arch/arm/mach-artpec/board-artpec6.c
@@ -13,7 +13,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk-provider.h>
@@ -44,8 +43,6 @@ static void __init artpec6_init_machine(void)
regmap_write(regmap, ARTPEC6_DMACFG_REGNUM,
ARTPEC6_DMACFG_UARTS_BURST);
};
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static void artpec6_l2c310_write_sec(unsigned long val, unsigned reg)
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 63b4fa25b48a..d068ec3cd1f6 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -30,7 +30,7 @@ static void __init at91rm9200_dt_device_init(void)
if (soc != NULL)
soc_dev = soc_device_to_device(soc);
- of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+ of_platform_default_populate(NULL, NULL, soc_dev);
at91rm9200_pm_init();
}
diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c
index cada2a6412b3..ba28e9cc584d 100644
--- a/arch/arm/mach-at91/at91sam9.c
+++ b/arch/arm/mach-at91/at91sam9.c
@@ -61,7 +61,7 @@ static void __init at91sam9_common_init(void)
if (soc != NULL)
soc_dev = soc_device_to_device(soc);
- of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+ of_platform_default_populate(NULL, NULL, soc_dev);
}
static void __init at91sam9_dt_device_init(void)
diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c
index 922b85f07cd2..b272c45b400f 100644
--- a/arch/arm/mach-at91/sama5.c
+++ b/arch/arm/mach-at91/sama5.c
@@ -68,7 +68,7 @@ static void __init sama5_dt_device_init(void)
if (soc != NULL)
soc_dev = soc_device_to_device(soc);
- of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+ of_platform_default_populate(NULL, NULL, soc_dev);
sama5_pm_init();
}
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 8e2ccd2ccf54..5f63f513d9c9 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -89,6 +89,7 @@ config ARCH_BCM_MOBILE
select HAVE_ARM_ARCH_TIMER
select PINCTRL
select ARCH_BCM_MOBILE_SMP if SMP
+ select BCM_KONA_TIMER
help
This enables support for systems based on Broadcom mobile SoCs.
@@ -143,6 +144,7 @@ config ARCH_BCM2835
select ARM_TIMER_SP804
select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
select CLKSRC_OF
+ select BCM2835_TIMER
select PINCTRL
select PINCTRL_BCM2835
help
diff --git a/arch/arm/mach-bcm/board_bcm21664.c b/arch/arm/mach-bcm/board_bcm21664.c
index 82ad5687771f..0d7034c57334 100644
--- a/arch/arm/mach-bcm/board_bcm21664.c
+++ b/arch/arm/mach-bcm/board_bcm21664.c
@@ -12,7 +12,6 @@
*/
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
@@ -60,7 +59,6 @@ static void bcm21664_restart(enum reboot_mode mode, const char *cmd)
static void __init bcm21664_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
kona_l2_cache_init();
}
diff --git a/arch/arm/mach-bcm/board_bcm281xx.c b/arch/arm/mach-bcm/board_bcm281xx.c
index 2e367bd7c600..b81bb386951d 100644
--- a/arch/arm/mach-bcm/board_bcm281xx.c
+++ b/arch/arm/mach-bcm/board_bcm281xx.c
@@ -13,7 +13,6 @@
#include <linux/clocksource.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <asm/mach/arch.h>
@@ -58,7 +57,6 @@ static void bcm281xx_restart(enum reboot_mode mode, const char *cmd)
static void __init bcm281xx_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
kona_l2_cache_init();
}
diff --git a/arch/arm/mach-bcm/board_bcm2835.c b/arch/arm/mach-bcm/board_bcm2835.c
index 834d67684e20..0c1edfc98696 100644
--- a/arch/arm/mach-bcm/board_bcm2835.c
+++ b/arch/arm/mach-bcm/board_bcm2835.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/clk/bcm2835.h>
#include <asm/mach/arch.h>
@@ -23,16 +22,7 @@
static void __init bcm2835_init(void)
{
- int ret;
-
bcm2835_init_clocks();
-
- ret = of_platform_populate(NULL, of_default_bus_match_table, NULL,
- NULL);
- if (ret) {
- pr_err("of_platform_populate failed: %d\n", ret);
- BUG();
- }
}
static const char * const bcm2835_compat[] = {
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 9b1dc223d8d3..03da3813f1ab 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -395,8 +395,7 @@ static void __init cns3xxx_init(void)
pm_power_off = cns3xxx_power_off;
- of_platform_populate(NULL, of_default_bus_match_table,
- cns3xxx_auxdata, NULL);
+ of_platform_default_populate(NULL, cns3xxx_auxdata, NULL);
}
static const char *const cns3xxx_dt_compat[] __initconst = {
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 68cc09907828..ab47b8eb1b15 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -288,7 +288,7 @@ static struct gpio_led evm_leds[] = {
{ .name = "DS2", .active_low = 1,
.default_trigger = "mmc0", },
{ .name = "DS1", .active_low = 1,
- .default_trigger = "ide-disk", },
+ .default_trigger = "disk-activity", },
};
static const struct gpio_led_platform_data evm_led_data = {
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 7f2d804fbddf..ecf139f31c4c 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -61,7 +61,6 @@ config ARCH_EXYNOS4
select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
select CPU_EXYNOS4210
select GIC_NON_BANKED
- select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
select MIGHT_HAVE_CACHE_L2X0
help
Samsung EXYNOS4 (Cortex-A9) SoC based systems
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 52ccf247e079..dea410adee7e 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/irqchip.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
@@ -217,8 +216,6 @@ static void __init exynos_dt_machine_init(void)
of_machine_is_compatible("samsung,exynos3250") ||
of_machine_is_compatible("samsung,exynos5250"))
platform_device_register(&exynos_cpuidle);
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static char const *const exynos_dt_compat[] __initconst = {
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 6050a14faee6..07f60986dc2c 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -23,7 +23,6 @@
#include <linux/pl320-ipc.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
#include <linux/amba/bus.h>
@@ -163,8 +162,6 @@ static void __init highbank_init(void)
pl320_ipc_register_notifier(&hb_keys_nb);
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-
if (psci_ops.cpu_suspend)
platform_device_register(&highbank_cpuidle_device);
}
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index 7296c31849a8..40564c9b1226 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -52,8 +52,6 @@ static void __init imx51_dt_init(void)
{
imx51_ipu_mipi_setup();
imx_src_init();
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static void __init imx51_init_late(void)
diff --git a/arch/arm/mach-imx/mach-imx53.c b/arch/arm/mach-imx/mach-imx53.c
index d251cecd9bd2..07c2e8dca494 100644
--- a/arch/arm/mach-imx/mach-imx53.c
+++ b/arch/arm/mach-imx/mach-imx53.c
@@ -32,8 +32,6 @@ static void __init imx53_dt_init(void)
{
imx_src_init();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-
imx_aips_allow_unprivileged_access("fsl,imx53-aipstz");
}
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index b31890f1e4a4..68c16d59b808 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -278,7 +278,7 @@ static void __init imx6q_init_machine(void)
imx6q_enet_phy_init();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
+ of_platform_default_populate(NULL, NULL, parent);
imx_anatop_init();
cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init();
diff --git a/arch/arm/mach-imx/mach-imx6sl.c b/arch/arm/mach-imx/mach-imx6sl.c
index f9a9a362a88a..04084900d810 100644
--- a/arch/arm/mach-imx/mach-imx6sl.c
+++ b/arch/arm/mach-imx/mach-imx6sl.c
@@ -52,7 +52,7 @@ static void __init imx6sl_init_machine(void)
if (parent == NULL)
pr_warn("failed to initialize soc device\n");
- of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
+ of_platform_default_populate(NULL, NULL, parent);
imx6sl_fec_init();
imx_anatop_init();
diff --git a/arch/arm/mach-imx/mach-imx6sx.c b/arch/arm/mach-imx/mach-imx6sx.c
index 07a3a340f151..7f52d9b1e8a4 100644
--- a/arch/arm/mach-imx/mach-imx6sx.c
+++ b/arch/arm/mach-imx/mach-imx6sx.c
@@ -72,7 +72,7 @@ static void __init imx6sx_init_machine(void)
if (parent == NULL)
pr_warn("failed to initialize soc device\n");
- of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
+ of_platform_default_populate(NULL, NULL, parent);
imx6sx_enet_init();
imx_anatop_init();
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index a38b16b69923..5d9bfab279dd 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
static void __init imx6ul_enet_phy_init(void)
{
if (IS_BUILTIN(CONFIG_PHYLIB))
- phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
+ phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
ksz8081_phy_fixup);
}
@@ -64,7 +64,6 @@ static void __init imx6ul_init_machine(void)
if (parent == NULL)
pr_warn("failed to initialize soc device\n");
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
imx6ul_enet_init();
imx_anatop_init();
imx6ul_pm_init();
diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c
index b450f525a670..f388e6bd46ec 100644
--- a/arch/arm/mach-imx/mach-imx7d.c
+++ b/arch/arm/mach-imx/mach-imx7d.c
@@ -93,7 +93,6 @@ static void __init imx7d_init_machine(void)
if (parent == NULL)
pr_warn("failed to initialize soc device\n");
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
imx_anatop_init();
imx7d_enet_init();
}
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index bc764a89615e..599f973e10d8 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -20,7 +20,7 @@ if ARCH_INTEGRATOR
config ARCH_INTEGRATOR_AP
bool "Support Integrator/AP and Integrator/PP2 platforms"
- select CLKSRC_MMIO
+ select INTEGRATOR_AP_TIMER
select MIGHT_HAVE_PCI
select SERIAL_AMBA_PL010 if TTY
select SERIAL_AMBA_PL010_CONSOLE if TTY
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 2b118f20c62c..c7bb83205f5b 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -240,8 +240,7 @@ static void __init ap_init_of(void)
if (!ebi_base)
return;
- of_platform_populate(NULL, of_default_bus_match_table,
- ap_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, ap_auxdata_lookup, NULL);
sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET);
for (i = 0; i < 4; i++) {
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 6f6b051e81e0..825298349bf5 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -231,8 +231,7 @@ static void __init intcp_init_of(void)
if (!intcp_con_base)
return;
- of_platform_populate(NULL, of_default_bus_match_table,
- intcp_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, intcp_auxdata_lookup, NULL);
}
static const char * intcp_dt_board_compat[] = {
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index ea955f6db8b7..bac577badc7e 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -4,7 +4,7 @@ config ARCH_KEYSTONE
depends on ARM_PATCH_PHYS_VIRT
select ARM_GIC
select HAVE_ARM_ARCH_TIMER
- select CLKSRC_MMIO
+ select KEYSTONE_TIMER
select ARM_ERRATA_798181 if SMP
select COMMON_CLK_KEYSTONE
select ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index a33a296b00dc..84613abf35a3 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -60,7 +60,6 @@ static void __init keystone_init(void)
bus_register_notifier(&platform_bus_type, &platform_nb);
}
keystone_pm_runtime_init();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static long long __init keystone_pv_fixup(void)
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index 81265e80302d..0e4cbbe980eb 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -191,8 +191,7 @@ static void __init lpc3250_machine_init(void)
LPC32XX_CLKPWR_TESTCLK_TESTCLK2_EN,
LPC32XX_CLKPWR_TEST_CLK_SEL);
- of_platform_populate(NULL, of_default_bus_match_table,
- lpc32xx_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, lpc32xx_auxdata_lookup, NULL);
}
static const char *const lpc32xx_dt_compat[] __initconst = {
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index 6eca2363b81a..f69e28b85e88 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -3,7 +3,7 @@ menuconfig ARCH_MOXART
depends on ARCH_MULTI_V4
select CPU_FA526
select ARM_DMA_MEM_BUFFERABLE
- select CLKSRC_MMIO
+ select MOXART_TIMER
select GENERIC_IRQ_CHIP
select GPIOLIB
select PHYLIB if NETDEVICES
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
index ecf9e0c3b107..e53c6cfcab51 100644
--- a/arch/arm/mach-mvebu/Makefile
+++ b/arch/arm/mach-mvebu/Makefile
@@ -7,9 +7,15 @@ CFLAGS_pmsu.o := -march=armv7-a
obj-$(CONFIG_MACH_MVEBU_ANY) += system-controller.o mvebu-soc-id.o
ifeq ($(CONFIG_MACH_MVEBU_V7),y)
-obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o pm.o pm-board.o
+obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
+
+obj-$(CONFIG_PM) += pm.o pm-board.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
endif
obj-$(CONFIG_MACH_DOVE) += dove.o
-obj-$(CONFIG_MACH_KIRKWOOD) += kirkwood.o kirkwood-pm.o
+
+ifeq ($(CONFIG_MACH_KIRKWOOD),y)
+obj-y += kirkwood.o
+obj-$(CONFIG_PM) += kirkwood-pm.o
+endif
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index 1648edd515a2..ccca95173e17 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/clocksource.h>
#include <linux/dma-mapping.h>
@@ -144,8 +143,6 @@ static void __init mvebu_dt_init(void)
{
if (of_machine_is_compatible("marvell,armadaxp"))
i2c_quirk();
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * const armada_370_xp_dt_compat[] __initconst = {
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 7e989d61159c..ae2a018b9305 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -111,20 +111,12 @@ static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = {
.notifier_call = mvebu_hwcc_notifier,
};
-static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int armada_xp_clear_l2_starting(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- armada_xp_clear_shared_l2();
-
- return NOTIFY_OK;
+ armada_xp_clear_shared_l2();
+ return 0;
}
-static struct notifier_block armada_xp_clear_shared_l2_notifier = {
- .notifier_call = armada_xp_clear_shared_l2_notifier_func,
- .priority = 100,
-};
-
static void __init armada_370_coherency_init(struct device_node *np)
{
struct resource res;
@@ -155,29 +147,24 @@ static void __init armada_370_coherency_init(struct device_node *np)
of_node_put(cpu_config_np);
- register_cpu_notifier(&armada_xp_clear_shared_l2_notifier);
-
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
+ "AP_ARM_MVEBU_COHERENCY",
+ armada_xp_clear_l2_starting, NULL);
exit:
set_cpu_coherent();
}
/*
- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
- * is needed as a workaround for a deadlock issue between the PCIe
- * interface and the cache controller.
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
*/
static void __iomem *
-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
- unsigned int mtype, void *caller)
+armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
- struct resource pcie_mem;
-
- mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
-
- if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
- mtype = MT_UNCACHED;
-
+ mtype = MT_UNCACHED;
return __arm_ioremap_caller(phys_addr, size, mtype, caller);
}
@@ -186,7 +173,8 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
struct device_node *cache_dn;
coherency_cpu_base = of_iomap(np, 0);
- arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
+ arch_ioremap_caller = armada_wa_ioremap_caller;
+ pci_ioremap_set_mem_type(MT_UNCACHED);
/*
* We should switch the PL310 to I/O coherency mode only if
diff --git a/arch/arm/mach-mvebu/dove.c b/arch/arm/mach-mvebu/dove.c
index 1aebb82e3d7b..d076c5771adc 100644
--- a/arch/arm/mach-mvebu/dove.c
+++ b/arch/arm/mach-mvebu/dove.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/soc/dove/pmu.h>
#include <asm/hardware/cache-tauros2.h>
#include <asm/mach/arch.h>
@@ -26,7 +25,6 @@ static void __init dove_init(void)
#endif
BUG_ON(mvebu_mbus_dt_init(false));
dove_init_pmu();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * const dove_dt_compat[] __initconst = {
diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c
index a3f4029b25d7..7d9f2fd9e450 100644
--- a/arch/arm/mach-mvebu/kirkwood.c
+++ b/arch/arm/mach-mvebu/kirkwood.c
@@ -179,7 +179,7 @@ static void __init kirkwood_dt_init(void)
kirkwood_pm_init();
kirkwood_dt_eth_fixup();
- of_platform_populate(NULL, of_default_bus_match_table, auxdata, NULL);
+ of_platform_default_populate(NULL, auxdata, NULL);
}
static const char * const kirkwood_dt_board_compat[] __initconst = {
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 5d9e5fce7937..cb429bc6dc0d 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -15,8 +15,8 @@ config SOC_IMX28
config ARCH_MXS
bool "Freescale MXS (i.MX23, i.MX28) support"
depends on ARCH_MULTI_V5
- select CLKSRC_MMIO
select GPIOLIB
+ select MXS_TIMER
select PINCTRL
select SOC_BUS
select SOC_IMX23
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index f1ea4700efcf..0b7fe74ff46d 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -498,8 +498,7 @@ static void __init mxs_machine_init(void)
else if (of_machine_is_compatible("msr,m28cu3"))
m28cu3_init();
- of_platform_populate(NULL, of_default_bus_match_table,
- NULL, parent);
+ of_platform_default_populate(NULL, NULL, parent);
mxs_restart_init();
diff --git a/arch/arm/mach-nspire/Kconfig b/arch/arm/mach-nspire/Kconfig
index bc41f26c1a12..d4985305cab2 100644
--- a/arch/arm/mach-nspire/Kconfig
+++ b/arch/arm/mach-nspire/Kconfig
@@ -7,5 +7,6 @@ config ARCH_NSPIRE
select ARM_AMBA
select ARM_VIC
select ARM_TIMER_SP804
+ select NSPIRE_TIMER
help
This enables support for systems using the TI-NSPIRE CPU
diff --git a/arch/arm/mach-nspire/nspire.c b/arch/arm/mach-nspire/nspire.c
index 34c2a1b32e7d..f0808fcc5acc 100644
--- a/arch/arm/mach-nspire/nspire.c
+++ b/arch/arm/mach-nspire/nspire.c
@@ -57,8 +57,7 @@ static struct of_dev_auxdata nspire_auxdata[] __initdata = {
static void __init nspire_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table,
- nspire_auxdata, NULL);
+ of_platform_default_populate(NULL, nspire_auxdata, NULL);
}
static void nspire_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 5d7fb596bf4a..bf608441b357 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -43,8 +43,8 @@
#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
/* IRQ handler register bitmasks */
-#define DEFERRED_FIQ_MASK (0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
-#define GPIO_BANK1_MASK (0x1 << INT_GPIO_BANK1)
+#define DEFERRED_FIQ_MASK OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
+#define GPIO_BANK1_MASK OMAP_IRQ_BIT(INT_GPIO_BANK1)
/* Driver buffer byte offsets */
#define BUF_MASK (FIQ_MASK * 4)
@@ -110,7 +110,7 @@ ENTRY(qwerty_fiqin_start)
mov r8, #2 @ reset FIQ agreement
str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
- cmp r10, #INT_GPIO_BANK1 @ is it GPIO bank interrupt?
+ cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY) @ is it GPIO interrupt?
beq gpio @ yes - process it
mov r8, #1
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index d1f12095f315..ec760ae2f917 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -109,7 +109,8 @@ void __init ams_delta_init_fiq(void)
* Since no set_type() method is provided by OMAP irq chip,
* switch to edge triggered interrupt type manually.
*/
- offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+ offset = IRQ_ILR0_REG_OFFSET +
+ ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
@@ -149,7 +150,7 @@ void __init ams_delta_init_fiq(void)
/*
* Redirect GPIO interrupts to FIQ
*/
- offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+ offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
val = omap_readl(OMAP_IH1_BASE + offset) | 1;
omap_writel(val, OMAP_IH1_BASE + offset);
}
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 209aecb0df68..4dfb99504810 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -172,7 +172,7 @@ static struct gpio_led tps_leds[] = {
* Also, D9 requires non-battery power.
*/
{ .gpio = OSK_TPS_GPIO_LED_D9, .name = "d9",
- .default_trigger = "ide-disk", },
+ .default_trigger = "disk-activity", },
{ .gpio = OSK_TPS_GPIO_LED_D2, .name = "d2", },
{ .gpio = OSK_TPS_GPIO_LED_D3, .name = "d3", .active_low = 1,
.default_trigger = "heartbeat", },
diff --git a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
index adb5e7649659..6dfc3e1210a3 100644
--- a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
+++ b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
@@ -14,6 +14,8 @@
#ifndef __AMS_DELTA_FIQ_H
#define __AMS_DELTA_FIQ_H
+#include <mach/irqs.h>
+
/*
* Interrupt number used for passing control from FIQ to IRQ.
* IRQ12, described as reserved, has been selected.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index b5d3c4c75626..5a0b380a8166 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -17,6 +17,7 @@ config ARCH_OMAP3
select PM_OPP if PM
select PM if CPU_IDLE
select SOC_HAS_OMAP2_SDRC
+ select ARM_ERRATA_430973
config ARCH_OMAP4
bool "TI OMAP4"
@@ -36,6 +37,7 @@ config ARCH_OMAP4
select PM if CPU_IDLE
select ARM_ERRATA_754322
select ARM_ERRATA_775420
+ select OMAP_INTERCONNECT
config SOC_OMAP5
bool "TI OMAP5"
@@ -67,6 +69,8 @@ config SOC_AM43XX
select HAVE_ARM_SCU
select GENERIC_CLOCKEVENTS_BROADCAST
select HAVE_ARM_TWD
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_775420
config SOC_DRA7XX
bool "TI DRA7XX"
@@ -240,4 +244,12 @@ endmenu
endif
+config OMAP5_ERRATA_801819
+ bool "Errata 801819: An eviction from L1 data cache might stall indefinitely"
+ depends on SOC_OMAP5 || SOC_DRA7XX
+ help
+ A livelock can occur in the L2 cache arbitration that might prevent
+ a snoop from completing. Under certain conditions this can cause the
+ system to deadlock.
+
endmenu
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index af2851fbcdf0..bae263fba640 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -46,6 +46,7 @@
#define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109
#define OMAP5_MON_AMBA_IF_INDEX 0x108
+#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
/* Secure PPA(Primary Protected Application) APIs */
#define OMAP4_PPA_L2_POR_INDEX 0x23
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index c625cc10d9f9..8cd1de914ee4 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -50,6 +50,39 @@ void __iomem *omap4_get_scu_base(void)
return scu_base;
}
+#ifdef CONFIG_OMAP5_ERRATA_801819
+void omap5_erratum_workaround_801819(void)
+{
+ u32 acr, revidr;
+ u32 acr_mask;
+
+ /* REVIDR[3] indicates erratum fix available on silicon */
+ asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
+ if (revidr & (0x1 << 3))
+ return;
+
+ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+ /*
+ * BIT(27) - Disables streaming. All write-allocate lines allocate in
+ * the L1 or L2 cache.
+ * BIT(25) - Disables streaming. All write-allocate lines allocate in
+ * the L1 cache.
+ */
+ acr_mask = (0x3 << 25) | (0x3 << 27);
+ /* do we already have it done.. if yes, skip expensive smc */
+ if ((acr & acr_mask) == acr_mask)
+ return;
+
+ acr |= acr_mask;
+ omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+ pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
+ __func__, smp_processor_id());
+}
+#else
+static inline void omap5_erratum_workaround_801819(void) { }
+#endif
+
static void omap4_secondary_init(unsigned int cpu)
{
/*
@@ -64,12 +97,15 @@ static void omap4_secondary_init(unsigned int cpu)
omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
4, 0, 0, 0, 0, 0);
- /*
- * Configure the CNTFRQ register for the secondary cpu's which
- * indicates the frequency of the cpu local timers.
- */
- if (soc_is_omap54xx() || soc_is_dra7xx())
+ if (soc_is_omap54xx() || soc_is_dra7xx()) {
+ /*
+ * Configure the CNTFRQ register for the secondary cpu's which
+ * indicates the frequency of the cpu local timers.
+ */
set_cntfreq();
+ /* Configure ACR to disable streaming WA for 801819 */
+ omap5_erratum_workaround_801819();
+ }
/*
* Synchronise with the boot thread.
@@ -218,6 +254,8 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
if (cpu_is_omap446x())
startup_addr = omap4460_secondary_startup;
+ if (soc_is_dra74x() || soc_is_omap54xx())
+ omap5_erratum_workaround_801819();
/*
* Write the address of secondary startup routine into the
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 78af6d8cf2e2..daf2753de7aa 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -186,8 +186,9 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
trace_state = (PWRDM_TRACE_STATES_FLAG |
((next & OMAP_POWERSTATE_MASK) << 8) |
((prev & OMAP_POWERSTATE_MASK) << 0));
- trace_power_domain_target(pwrdm->name, trace_state,
- smp_processor_id());
+ trace_power_domain_target_rcuidle(pwrdm->name,
+ trace_state,
+ smp_processor_id());
}
break;
default:
@@ -523,8 +524,8 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
/* Trace the pwrdm desired target state */
- trace_power_domain_target(pwrdm->name, pwrst,
- smp_processor_id());
+ trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
+ smp_processor_id());
/* Program the pwrdm desired target state */
ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
}
diff --git a/arch/arm/mach-omap2/powerdomains7xx_data.c b/arch/arm/mach-omap2/powerdomains7xx_data.c
index 0ec2d00f4237..eb350a673133 100644
--- a/arch/arm/mach-omap2/powerdomains7xx_data.c
+++ b/arch/arm/mach-omap2/powerdomains7xx_data.c
@@ -36,14 +36,7 @@ static struct powerdomain iva_7xx_pwrdm = {
.prcm_offs = DRA7XX_PRM_IVA_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 4,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* hwa_mem */
- [1] = PWRSTS_OFF_RET, /* sl2_mem */
- [2] = PWRSTS_OFF_RET, /* tcm1_mem */
- [3] = PWRSTS_OFF_RET, /* tcm2_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* hwa_mem */
[1] = PWRSTS_ON, /* sl2_mem */
@@ -76,12 +69,7 @@ static struct powerdomain ipu_7xx_pwrdm = {
.prcm_offs = DRA7XX_PRM_IPU_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 2,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* aessmem */
- [1] = PWRSTS_OFF_RET, /* periphmem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* aessmem */
[1] = PWRSTS_ON, /* periphmem */
@@ -95,11 +83,7 @@ static struct powerdomain dss_7xx_pwrdm = {
.prcm_offs = DRA7XX_PRM_DSS_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dss_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dss_mem */
},
@@ -111,13 +95,8 @@ static struct powerdomain l4per_7xx_pwrdm = {
.name = "l4per_pwrdm",
.prcm_offs = DRA7XX_PRM_L4PER_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
- .pwrsts = PWRSTS_RET_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
+ .pwrsts = PWRSTS_ON,
.banks = 2,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* nonretained_bank */
- [1] = PWRSTS_OFF_RET, /* retained_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* nonretained_bank */
[1] = PWRSTS_ON, /* retained_bank */
@@ -132,9 +111,6 @@ static struct powerdomain gpu_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* gpu_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* gpu_mem */
},
@@ -148,8 +124,6 @@ static struct powerdomain wkupaon_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* wkup_bank */
},
@@ -161,15 +135,7 @@ static struct powerdomain core_7xx_pwrdm = {
.prcm_offs = DRA7XX_PRM_CORE_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
.banks = 5,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* core_nret_bank */
- [1] = PWRSTS_OFF_RET, /* core_ocmram */
- [2] = PWRSTS_OFF_RET, /* core_other_bank */
- [3] = PWRSTS_OFF_RET, /* ipu_l2ram */
- [4] = PWRSTS_OFF_RET, /* ipu_unicache */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* core_nret_bank */
[1] = PWRSTS_ON, /* core_ocmram */
@@ -226,11 +192,7 @@ static struct powerdomain vpe_7xx_pwrdm = {
.prcm_offs = DRA7XX_PRM_VPE_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* vpe_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* vpe_bank */
},
@@ -260,14 +222,8 @@ static struct powerdomain l3init_7xx_pwrdm = {
.name = "l3init_pwrdm",
.prcm_offs = DRA7XX_PRM_L3INIT_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
- .pwrsts = PWRSTS_RET_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
+ .pwrsts = PWRSTS_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* gmac_bank */
- [1] = PWRSTS_OFF_RET, /* l3init_bank1 */
- [2] = PWRSTS_OFF_RET, /* l3init_bank2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* gmac_bank */
[1] = PWRSTS_ON, /* l3init_bank1 */
@@ -283,9 +239,6 @@ static struct powerdomain eve3_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve3_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve3_bank */
},
@@ -299,9 +252,6 @@ static struct powerdomain emu_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* emu_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* emu_bank */
},
@@ -314,11 +264,6 @@ static struct powerdomain dsp2_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dsp2_edma */
- [1] = PWRSTS_OFF_RET, /* dsp2_l1 */
- [2] = PWRSTS_OFF_RET, /* dsp2_l2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dsp2_edma */
[1] = PWRSTS_ON, /* dsp2_l1 */
@@ -334,11 +279,6 @@ static struct powerdomain dsp1_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dsp1_edma */
- [1] = PWRSTS_OFF_RET, /* dsp1_l1 */
- [2] = PWRSTS_OFF_RET, /* dsp1_l2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dsp1_edma */
[1] = PWRSTS_ON, /* dsp1_l1 */
@@ -354,9 +294,6 @@ static struct powerdomain cam_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* vip_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* vip_bank */
},
@@ -370,9 +307,6 @@ static struct powerdomain eve4_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve4_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve4_bank */
},
@@ -386,9 +320,6 @@ static struct powerdomain eve2_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve2_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve2_bank */
},
@@ -402,9 +333,6 @@ static struct powerdomain eve1_7xx_pwrdm = {
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve1_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve1_bank */
},
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 5b385bb8aff9..cb9497a20fb3 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -496,8 +496,7 @@ void __init omap_init_time(void)
__omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
2, "timer_sys_ck", NULL, false);
- if (of_have_populated_dt())
- clocksource_probe();
+ clocksource_probe();
}
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void)
{
__omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
2, "timer_sys_ck", NULL, false);
+
+ clocksource_probe();
}
#endif /* CONFIG_ARCH_OMAP3 */
@@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void)
{
__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
1, "timer_sys_ck", "ti,timer-alwon", true);
+
+ clocksource_probe();
}
#endif
diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c
index 6f4c2c4ae2a5..3d36f1d95196 100644
--- a/arch/arm/mach-orion5x/board-dt.c
+++ b/arch/arm/mach-orion5x/board-dt.c
@@ -63,8 +63,7 @@ static void __init orion5x_dt_init(void)
if (of_machine_is_compatible("maxtor,shared-storage-2"))
mss2_init();
- of_platform_populate(NULL, of_default_bus_match_table,
- orion5x_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, orion5x_auxdata_lookup, NULL);
}
static const char *orion5x_dt_compat[] = {
diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c
index ec79fea82704..4e3d6d5c82cd 100644
--- a/arch/arm/mach-picoxcell/common.c
+++ b/arch/arm/mach-picoxcell/common.c
@@ -10,7 +10,6 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/reboot.h>
#include <asm/mach/arch.h>
@@ -54,7 +53,6 @@ static void __init picoxcell_map_io(void)
static void __init picoxcell_init_machine(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
picoxcell_setup_restart();
}
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 3c4717c4b16b..85e874a97337 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -28,6 +28,7 @@ config ARCH_ATLAS7
default y
select ARM_GIC
select CPU_V7
+ select ATLAS7_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_SMP
help
@@ -38,6 +39,7 @@ config ARCH_PRIMA2
default y
select SIRF_IRQ
select ZONE_DMA
+ select PRIMA2_TIMER
help
Support for CSR SiRFSoC ARM Cortex A9 Platform
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index bd7cd8b6a286..1080580b1343 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -464,7 +464,7 @@ static struct gpio_led spitz_gpio_leds[] = {
},
{
.name = "spitz:green:hddactivity",
- .default_trigger = "ide-disk",
+ .default_trigger = "disk-activity",
.gpio = SPITZ_GPIO_LED_GREEN,
},
};
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index beb71da5d9c8..a7ab9ec141f8 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -73,7 +73,6 @@ static void __init rockchip_timer_init(void)
static void __init rockchip_dt_init(void)
{
rockchip_suspend_init();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * const rockchip_board_dt_compat[] = {
diff --git a/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c b/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c
index 5f028ff84cfe..c83c076578dd 100644
--- a/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c
+++ b/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c
@@ -17,7 +17,6 @@
#include <linux/clocksource.h>
#include <linux/irqchip.h>
-#include <linux/of_platform.h>
#include <linux/serial_s3c.h>
#include <asm/mach/arch.h>
@@ -35,7 +34,6 @@ static void __init s3c2416_dt_map_io(void)
static void __init s3c2416_dt_machine_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
s3c_pm_init();
}
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
index bbf74edd3dd9..5bf9afae752d 100644
--- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
+++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
@@ -8,8 +8,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/of_platform.h>
-
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/system_misc.h>
@@ -48,7 +46,6 @@ static void __init s3c64xx_dt_map_io(void)
static void __init s3c64xx_dt_init_machine(void)
{
samsung_wdt_reset_of_init();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static void s3c64xx_dt_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index db6dbfbaf9f1..3849eef0d3a7 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -18,7 +18,6 @@
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
-#include <linux/of_platform.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
@@ -77,8 +76,6 @@ static void __init r8a7740_init_irq_of(void)
static void __init r8a7740_generic_init(void)
{
r8a7740_meram_workaround();
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char *const r8a7740_boards_compat_dt[] __initconst = {
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 99a2004cac76..a25ff188e403 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/of_platform.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/io.h>
@@ -55,7 +54,6 @@ static void __init sh73a0_generic_init(void)
/* Shared attribute override enable, 64K*8way */
l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
#endif
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char *const sh73a0_boards_compat_dt[] __initconst = {
diff --git a/arch/arm/mach-spear/spear1310.c b/arch/arm/mach-spear/spear1310.c
index cd5d375d91f0..a7d4f136836f 100644
--- a/arch/arm/mach-spear/spear1310.c
+++ b/arch/arm/mach-spear/spear1310.c
@@ -14,7 +14,6 @@
#define pr_fmt(fmt) "SPEAr1310: " fmt
#include <linux/amba/pl022.h>
-#include <linux/of_platform.h>
#include <linux/pata_arasan_cf_data.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -27,7 +26,6 @@
static void __init spear1310_dt_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
platform_device_register_simple("spear-cpufreq", -1, NULL, 0);
}
diff --git a/arch/arm/mach-spear/spear1340.c b/arch/arm/mach-spear/spear1340.c
index 94594d5a446c..a212af90c0bc 100644
--- a/arch/arm/mach-spear/spear1340.c
+++ b/arch/arm/mach-spear/spear1340.c
@@ -19,7 +19,6 @@
static void __init spear1340_dt_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
platform_device_register_simple("spear-cpufreq", -1, NULL, 0);
}
diff --git a/arch/arm/mach-spear/spear300.c b/arch/arm/mach-spear/spear300.c
index 5b32edda2276..325b89579be1 100644
--- a/arch/arm/mach-spear/spear300.c
+++ b/arch/arm/mach-spear/spear300.c
@@ -194,8 +194,7 @@ static void __init spear300_dt_init(void)
pl080_plat_data.slave_channels = spear300_dma_info;
pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear300_dma_info);
- of_platform_populate(NULL, of_default_bus_match_table,
- spear300_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, spear300_auxdata_lookup, NULL);
}
static const char * const spear300_dt_board_compat[] = {
diff --git a/arch/arm/mach-spear/spear310.c b/arch/arm/mach-spear/spear310.c
index 86a44ac7ff67..59e173dc85cf 100644
--- a/arch/arm/mach-spear/spear310.c
+++ b/arch/arm/mach-spear/spear310.c
@@ -236,8 +236,7 @@ static void __init spear310_dt_init(void)
pl080_plat_data.slave_channels = spear310_dma_info;
pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear310_dma_info);
- of_platform_populate(NULL, of_default_bus_match_table,
- spear310_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, spear310_auxdata_lookup, NULL);
}
static const char * const spear310_dt_board_compat[] = {
diff --git a/arch/arm/mach-spear/spear320.c b/arch/arm/mach-spear/spear320.c
index d45d751926c5..0958f68a21e2 100644
--- a/arch/arm/mach-spear/spear320.c
+++ b/arch/arm/mach-spear/spear320.c
@@ -240,8 +240,7 @@ static void __init spear320_dt_init(void)
pl080_plat_data.slave_channels = spear320_dma_info;
pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear320_dma_info);
- of_platform_populate(NULL, of_default_bus_match_table,
- spear320_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, spear320_auxdata_lookup, NULL);
}
static const char * const spear320_dt_board_compat[] = {
diff --git a/arch/arm/mach-spear/spear6xx.c b/arch/arm/mach-spear/spear6xx.c
index da26fa5b68d7..ccf3573b831c 100644
--- a/arch/arm/mach-spear/spear6xx.c
+++ b/arch/arm/mach-spear/spear6xx.c
@@ -411,8 +411,7 @@ struct of_dev_auxdata spear6xx_auxdata_lookup[] __initdata = {
static void __init spear600_dt_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table,
- spear6xx_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, spear6xx_auxdata_lookup, NULL);
}
static const char *spear600_dt_board_compat[] = {
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 42d7ee9658fa..e01cbca196b5 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -115,7 +115,7 @@ static void __init tegra_dt_init(void)
* devices
*/
out:
- of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
+ of_platform_default_populate(NULL, NULL, parent);
}
static void __init tegra_dt_init_late(void)
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 56e0ef680b79..22dcbf5b76b2 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -3,7 +3,7 @@ menuconfig ARCH_U300
depends on ARCH_MULTI_V5 && MMU
select ARM_AMBA
select ARM_VIC
- select CLKSRC_MMIO
+ select U300_TIMER
select CPU_ARM926T
select GPIOLIB
select HAVE_TCM
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 546338bbacf8..a4910ea6811a 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -391,8 +391,7 @@ static void __init u300_init_machine_dt(void)
pinctrl_register_mappings(u300_pinmux_map,
ARRAY_SIZE(u300_pinmux_map));
- of_platform_populate(NULL, of_default_bus_match_table,
- u300_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, u300_auxdata_lookup, NULL);
/* Enable SEMI self refresh */
val = readw(syscon_base + U300_SYSCON_SMCR) |
diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
index d643b9210dbd..3c8d39c12909 100644
--- a/arch/arm/mach-versatile/versatile_dt.c
+++ b/arch/arm/mach-versatile/versatile_dt.c
@@ -344,8 +344,7 @@ static void __init versatile_dt_init(void)
versatile_dt_pci_init();
- of_platform_populate(NULL, of_default_bus_match_table,
- versatile_auxdata_lookup, NULL);
+ of_platform_default_populate(NULL, versatile_auxdata_lookup, NULL);
}
static const char *const versatile_dt_match[] __initconst = {
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index bc4ea5b241e7..fe488523694c 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
init.name = dev_name(cpu_dev);
init.ops = &clk_spc_ops;
- init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.flags = CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
return devm_clk_register(cpu_dev, &spc->hw);
diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c
index 3bc0dc9a4d69..773c04fdb746 100644
--- a/arch/arm/mach-vt8500/vt8500.c
+++ b/arch/arm/mach-vt8500/vt8500.c
@@ -30,7 +30,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#define LEGACY_GPIO_BASE 0xD8110000
#define LEGACY_PMC_BASE 0xD8130000
@@ -158,8 +157,6 @@ static void __init vt8500_init(void)
pm_power_off = &vt8500_power_off;
else
pr_err("%s: PMC Hibernation register could not be remapped, not enabling power off!\n", __func__);
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * const vt8500_dt_compat[] = {
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index da876d28ccbc..d12002cd63bc 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -141,7 +141,7 @@ out:
* Finished with the static registrations now; fill in the missing
* devices
*/
- of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
+ of_platform_default_populate(NULL, NULL, parent);
platform_device_register(&zynq_cpuidle_device);
}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index cb569b65a54d..d15a7fe51618 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1025,12 +1025,6 @@ config ARM_DMA_MEM_BUFFERABLE
You are recommended say 'Y' here and debug any affected drivers.
-config ARCH_HAS_BARRIERS
- bool
- help
- This option allows the use of custom mandatory barriers
- included via the mach/barriers.h file.
-
config ARM_HEAVY_MB
bool
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c61996c256cc..cc12905ae6f8 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -597,17 +597,16 @@ static void l2c310_configure(void __iomem *base)
L310_POWER_CTRL);
}
-static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
+static int l2c310_starting_cpu(unsigned int cpu)
{
- switch (act & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
- break;
- case CPU_DYING:
- set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
- break;
- }
- return NOTIFY_OK;
+ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
+ return 0;
+}
+
+static int l2c310_dying_cpu(unsigned int cpu)
+{
+ set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
+ return 0;
}
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
@@ -678,10 +677,10 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
}
- if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
- set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
- cpu_notifier(l2c310_cpu_enable_flz, 0);
- }
+ if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
+ cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
+ "AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
+ l2c310_dying_cpu);
}
static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ff7ed5697d3e..b7eed75960fe 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -49,6 +49,7 @@ struct arm_dma_alloc_args {
pgprot_t prot;
const void *caller;
bool want_vaddr;
+ int coherent_flag;
};
struct arm_dma_free_args {
@@ -59,6 +60,9 @@ struct arm_dma_free_args {
bool want_vaddr;
};
+#define NORMAL 0
+#define COHERENT 1
+
struct arm_dma_allocator {
void *(*alloc)(struct arm_dma_alloc_args *args,
struct page **ret_page);
@@ -272,7 +276,7 @@ static u64 get_coherent_dma_mask(struct device *dev)
return mask;
}
-static void __dma_clear_buffer(struct page *page, size_t size)
+static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
@@ -284,17 +288,21 @@ static void __dma_clear_buffer(struct page *page, size_t size)
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
- dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ if (coherent_flag != COHERENT)
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
- outer_flush_range(base, end);
+ if (coherent_flag != COHERENT)
+ outer_flush_range(base, end);
} else {
void *ptr = page_address(page);
memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ if (coherent_flag != COHERENT) {
+ dmac_flush_range(ptr, ptr + size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ }
}
}
@@ -302,7 +310,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
*/
-static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, int coherent_flag)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
@@ -318,7 +327,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
return page;
}
@@ -340,7 +349,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
- const void *caller, bool want_vaddr);
+ const void *caller, bool want_vaddr,
+ int coherent_flag);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
@@ -405,10 +415,13 @@ static int __init atomic_pool_init(void)
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
if (!atomic_pool)
goto out;
-
+ /*
+ * The atomic pool is only used for non-coherent allocations
+ * so we must pass NORMAL for coherent_flag.
+ */
if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
- &page, atomic_pool_init, true);
+ &page, atomic_pool_init, true, NORMAL);
else
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
&page, atomic_pool_init, true);
@@ -522,7 +535,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
{
struct page *page;
void *ptr = NULL;
- page = __dma_alloc_buffer(dev, size, gfp);
+ /*
+ * __alloc_remap_buffer is only called when the device is
+ * non-coherent
+ */
+ page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
if (!page)
return NULL;
if (!want_vaddr)
@@ -577,7 +594,8 @@ static int __free_from_pool(void *start, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
- const void *caller, bool want_vaddr)
+ const void *caller, bool want_vaddr,
+ int coherent_flag)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
@@ -588,7 +606,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
if (!page)
return NULL;
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
if (!want_vaddr)
goto out;
@@ -638,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
#define __free_from_pool(cpu_addr, size) do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
@@ -649,7 +667,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
struct page **ret_page)
{
struct page *page;
- page = __dma_alloc_buffer(dev, size, gfp);
+ /* __alloc_simple_buffer is only called when the device is coherent */
+ page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
if (!page)
return NULL;
@@ -679,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
- args->want_vaddr);
+ args->want_vaddr, args->coherent_flag);
}
static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -746,6 +765,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
.prot = prot,
.caller = caller,
.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+ .coherent_flag = is_coherent ? COHERENT : NORMAL,
};
#ifdef CONFIG_DMA_API_DEBUG
@@ -1253,7 +1273,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
static const int iommu_order_array[] = { 9, 8, 4, 0 };
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, struct dma_attrs *attrs,
+ int coherent_flag)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
@@ -1277,7 +1298,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
if (!page)
goto error;
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
for (i = 0; i < count; i++)
pages[i] = page + i;
@@ -1327,7 +1348,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
pages[i + j] = pages[i] + j;
}
- __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
i += 1 << order;
count -= 1 << order;
}
@@ -1455,13 +1476,16 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
return NULL;
}
-static void *__iommu_alloc_atomic(struct device *dev, size_t size,
- dma_addr_t *handle)
+static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
+ dma_addr_t *handle, int coherent_flag)
{
struct page *page;
void *addr;
- addr = __alloc_from_pool(size, &page);
+ if (coherent_flag == COHERENT)
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else
+ addr = __alloc_from_pool(size, &page);
if (!addr)
return NULL;
@@ -1477,14 +1501,18 @@ err_mapping:
}
static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
- dma_addr_t handle, size_t size)
+ dma_addr_t handle, size_t size, int coherent_flag)
{
__iommu_remove_mapping(dev, handle, size);
- __free_from_pool(cpu_addr, size);
+ if (coherent_flag == COHERENT)
+ __dma_free_buffer(virt_to_page(cpu_addr), size);
+ else
+ __free_from_pool(cpu_addr, size);
}
-static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
+ int coherent_flag)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
struct page **pages;
@@ -1493,8 +1521,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- if (!gfpflags_allow_blocking(gfp))
- return __iommu_alloc_atomic(dev, size, handle);
+ if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
+ return __iommu_alloc_simple(dev, size, gfp, handle,
+ coherent_flag);
/*
* Following is a work-around (a.k.a. hack) to prevent pages
@@ -1505,7 +1534,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*/
gfp &= ~(__GFP_COMP);
- pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
if (!pages)
return NULL;
@@ -1530,7 +1559,19 @@ err_buffer:
return NULL;
}
-static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
+}
+
+static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
+}
+
+static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
@@ -1540,8 +1581,6 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
if (!pages)
return -ENXIO;
@@ -1562,19 +1601,34 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
return 0;
}
+static int arm_iommu_mmap_attrs(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+ return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static int arm_coherent_iommu_mmap_attrs(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
/*
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
-void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
{
struct page **pages;
size = PAGE_ALIGN(size);
- if (__in_atomic_pool(cpu_addr, size)) {
- __iommu_free_atomic(dev, cpu_addr, handle, size);
+ if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
return;
}
@@ -1593,6 +1647,18 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
__iommu_free_buffer(dev, pages, size, attrs);
}
+void arm_iommu_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+{
+ __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
+}
+
+void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+{
+ __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
+}
+
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size, struct dma_attrs *attrs)
@@ -1997,9 +2063,9 @@ struct dma_map_ops iommu_ops = {
};
struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_iommu_alloc_attrs,
- .free = arm_iommu_free_attrs,
- .mmap = arm_iommu_mmap_attrs,
+ .alloc = arm_coherent_iommu_alloc_attrs,
+ .free = arm_coherent_iommu_free_attrs,
+ .mmap = arm_coherent_iommu_mmap_attrs,
.get_sgtable = arm_iommu_get_sgtable,
.map_page = arm_coherent_iommu_map_page,
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index ad5841856007..3a2e678b8d30 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -243,7 +243,7 @@ good_area:
goto out;
}
- return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+ return handle_mm_fault(vma, addr & PAGE_MASK, flags);
check_stack:
/* Don't allow expansion below FIRST_USER_ADDRESS */
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index b8d477321730..c1c1a5c67da1 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -23,7 +23,7 @@
#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
#define __pgd_free(pgd) kfree(pgd)
#else
-#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, 2)
+#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
#endif
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6fcaac8e200f..a7123b4e129d 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -362,6 +362,39 @@ __ca15_errata:
#endif
b __errata_finish
+__ca12_errata:
+#ifdef CONFIG_ARM_ERRATA_818325_852422
+ mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orr r10, r10, #1 << 12 @ set bit #12
+ mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_821420
+ mrc p15, 0, r10, c15, c0, 2 @ read internal feature reg
+ orr r10, r10, #1 << 1 @ set bit #1
+ mcr p15, 0, r10, c15, c0, 2 @ write internal feature reg
+#endif
+#ifdef CONFIG_ARM_ERRATA_825619
+ mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orr r10, r10, #1 << 24 @ set bit #24
+ mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+ b __errata_finish
+
+__ca17_errata:
+#ifdef CONFIG_ARM_ERRATA_852421
+ cmp r6, #0x12 @ only present up to r1p2
+ mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrle r10, r10, #1 << 24 @ set bit #24
+ mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_852423
+ cmp r6, #0x12 @ only present up to r1p2
+ mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrle r10, r10, #1 << 12 @ set bit #12
+ mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+ b __errata_finish
+
__v7_pj4b_setup:
#ifdef CONFIG_CPU_PJ4B
@@ -443,6 +476,16 @@ __v7_setup_cont:
teq r0, r10
beq __ca9_errata
+ /* Cortex-A12 Errata */
+ ldr r10, =0x00000c0d @ Cortex-A12 primary part number
+ teq r0, r10
+ beq __ca12_errata
+
+ /* Cortex-A17 Errata */
+ ldr r10, =0x00000c0e @ Cortex-A17 primary part number
+ teq r0, r10
+ beq __ca17_errata
+
/* Cortex-A15 Errata */
ldr r10, =0x00000c0f @ Cortex-A15 primary part number
teq r0, r10
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 84baa16f4c0b..e93aa6734147 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -68,7 +68,7 @@
#include <linux/platform_data/asoc-s3c.h>
#include <linux/platform_data/spi-s3c64xx.h>
-static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
+#define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
/* AC97 */
#ifdef CONFIG_CPU_S3C2440
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 73085d3482ed..da0b33deba6d 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -643,19 +643,19 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
* hardware state at every thread switch. We clear our held state when
* a CPU has been killed, indicating that the VFP hardware doesn't contain
* a threads VFP state. When a CPU starts up, we re-enable access to the
- * VFP hardware.
- *
- * Both CPU_DYING and CPU_STARTING are called on the CPU which
+ * VFP hardware. The callbacks below are called on the CPU which
* is being offlined/onlined.
*/
-static int vfp_hotplug(struct notifier_block *b, unsigned long action,
- void *hcpu)
+static int vfp_dying_cpu(unsigned int cpu)
{
- if (action == CPU_DYING || action == CPU_DYING_FROZEN)
- vfp_current_hw_state[(long)hcpu] = NULL;
- else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- vfp_enable(NULL);
- return NOTIFY_OK;
+ vfp_force_reload(cpu, current_thread_info());
+ return 0;
+}
+
+static int vfp_starting_cpu(unsigned int unused)
+{
+ vfp_enable(NULL);
+ return 0;
}
void vfp_kmode_exception(void)
@@ -732,6 +732,10 @@ static int __init vfp_init(void)
unsigned int vfpsid;
unsigned int cpu_arch = cpu_architecture();
+ /*
+ * Enable the access to the VFP on all online CPUs so the
+ * following test on FPSID will succeed.
+ */
if (cpu_arch >= CPU_ARCH_ARMv6)
on_each_cpu(vfp_enable, NULL, 1);
@@ -794,7 +798,9 @@ static int __init vfp_init(void)
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
}
- hotcpu_notifier(vfp_hotplug, 0);
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
+ "AP_ARM_VFP_STARTING", vfp_starting_cpu,
+ vfp_dying_cpu);
vfp_vector = vfp_support_entry;
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 12969523414c..227952103b0b 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1,2 @@
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
+obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
new file mode 100644
index 000000000000..16db419f9e90
--- /dev/null
+++ b/arch/arm/xen/efi.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015, Linaro Limited, Shannon Zhao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/efi.h>
+#include <xen/xen-ops.h>
+#include <asm/xen/xen-ops.h>
+
+/* Set XEN EFI runtime services function pointers. Other fields of struct efi,
+ * e.g. efi.systab, will be set like normal EFI.
+ */
+void __init xen_efi_runtime_setup(void)
+{
+ efi.get_time = xen_efi_get_time;
+ efi.set_time = xen_efi_set_time;
+ efi.get_wakeup_time = xen_efi_get_wakeup_time;
+ efi.set_wakeup_time = xen_efi_set_wakeup_time;
+ efi.get_variable = xen_efi_get_variable;
+ efi.get_next_variable = xen_efi_get_next_variable;
+ efi.set_variable = xen_efi_set_variable;
+ efi.query_variable_info = xen_efi_query_variable_info;
+ efi.update_capsule = xen_efi_update_capsule;
+ efi.query_capsule_caps = xen_efi_query_capsule_caps;
+ efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
+ efi.reset_system = NULL; /* Functionality provided by Xen. */
+}
+EXPORT_SYMBOL_GPL(xen_efi_runtime_setup);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 75cd7345c654..b0b82f5ea338 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -12,14 +12,16 @@
#include <xen/page.h>
#include <xen/interface/sched.h>
#include <xen/xen-ops.h>
-#include <asm/paravirt.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include <asm/xen/xen-ops.h>
#include <asm/system_misc.h>
+#include <asm/efi.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/cpuidle.h>
@@ -30,6 +32,7 @@
#include <linux/time64.h>
#include <linux/timekeeping.h>
#include <linux/timekeeper_internal.h>
+#include <linux/acpi.h>
#include <linux/mm.h>
@@ -46,14 +49,16 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
static struct vcpu_info __percpu *xen_vcpu_info;
+/* Linux <-> Xen vCPU id mapping */
+DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
+
/* These are unused until we support booting "pre-ballooned" */
unsigned long xen_released_pages;
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
static __read_mostly unsigned int xen_events_irq;
-static __initdata struct device_node *xen_node;
-
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
@@ -84,19 +89,6 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
-static unsigned long long xen_stolen_accounting(int cpu)
-{
- struct vcpu_runstate_info state;
-
- BUG_ON(cpu != smp_processor_id());
-
- xen_get_runstate_snapshot(&state);
-
- WARN_ON(state.state != RUNSTATE_running);
-
- return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
-}
-
static void xen_read_wallclock(struct timespec64 *ts)
{
u32 version;
@@ -161,12 +153,11 @@ static struct notifier_block xen_pvclock_gtod_notifier = {
.notifier_call = xen_pvclock_gtod_notify,
};
-static void xen_percpu_init(void)
+static int xen_starting_cpu(unsigned int cpu)
{
struct vcpu_register_vcpu_info info;
struct vcpu_info *vcpup;
int err;
- int cpu = get_cpu();
/*
* VCPUOP_register_vcpu_info cannot be called twice for the same
@@ -179,10 +170,14 @@ static void xen_percpu_init(void)
pr_info("Xen: initializing cpu%d\n", cpu);
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
+ /* Direct vCPU id mapping for ARM guests. */
+ per_cpu(xen_vcpu_id, cpu) = cpu;
+
info.mfn = virt_to_gfn(vcpup);
info.offset = xen_offset_in_page(vcpup);
- err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
+ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
+ &info);
BUG_ON(err);
per_cpu(xen_vcpu, cpu) = vcpup;
@@ -190,7 +185,13 @@ static void xen_percpu_init(void)
after_register_vcpu_info:
enable_percpu_irq(xen_events_irq, 0);
- put_cpu();
+ return 0;
+}
+
+static int xen_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(xen_events_irq);
+ return 0;
}
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
@@ -209,32 +210,50 @@ static void xen_power_off(void)
BUG_ON(rc);
}
-static int xen_cpu_notification(struct notifier_block *self,
- unsigned long action,
- void *hcpu)
+static irqreturn_t xen_arm_callback(int irq, void *arg)
{
- switch (action) {
- case CPU_STARTING:
- xen_percpu_init();
- break;
- case CPU_DYING:
- disable_percpu_irq(xen_events_irq);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
+ xen_hvm_evtchn_do_upcall();
+ return IRQ_HANDLED;
}
-static struct notifier_block xen_cpu_notifier = {
- .notifier_call = xen_cpu_notification,
-};
+static __initdata struct {
+ const char *compat;
+ const char *prefix;
+ const char *version;
+ bool found;
+} hyper_node = {"xen,xen", "xen,xen-", NULL, false};
-static irqreturn_t xen_arm_callback(int irq, void *arg)
+static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
+ int depth, void *data)
{
- xen_hvm_evtchn_do_upcall();
- return IRQ_HANDLED;
+ const void *s = NULL;
+ int len;
+
+ if (depth != 1 || strcmp(uname, "hypervisor") != 0)
+ return 0;
+
+ if (of_flat_dt_is_compatible(node, hyper_node.compat))
+ hyper_node.found = true;
+
+ s = of_get_flat_dt_prop(node, "compatible", &len);
+ if (strlen(hyper_node.prefix) + 3 < len &&
+ !strncmp(hyper_node.prefix, s, strlen(hyper_node.prefix)))
+ hyper_node.version = s + strlen(hyper_node.prefix);
+
+ /*
+ * Check if Xen supports EFI by checking whether there is the
+ * "/hypervisor/uefi" node in DT. If so, runtime services are available
+ * through proxy functions (e.g. in case of Xen dom0 EFI implementation
+ * they call special hypercall which executes relevant EFI functions)
+ * and that is why they are always enabled.
+ */
+ if (IS_ENABLED(CONFIG_XEN_EFI)) {
+ if ((of_get_flat_dt_subnode_by_name(node, "uefi") > 0) &&
+ !efi_runtime_disabled())
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ }
+
+ return 0;
}
/*
@@ -244,26 +263,18 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
#define GRANT_TABLE_PHYSADDR 0
void __init xen_early_init(void)
{
- int len;
- const char *s = NULL;
- const char *version = NULL;
- const char *xen_prefix = "xen,xen-";
-
- xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
- if (!xen_node) {
+ of_scan_flat_dt(fdt_find_hyper_node, NULL);
+ if (!hyper_node.found) {
pr_debug("No Xen support\n");
return;
}
- s = of_get_property(xen_node, "compatible", &len);
- if (strlen(xen_prefix) + 3 < len &&
- !strncmp(xen_prefix, s, strlen(xen_prefix)))
- version = s + strlen(xen_prefix);
- if (version == NULL) {
+
+ if (hyper_node.version == NULL) {
pr_debug("Xen version not found\n");
return;
}
- pr_info("Xen %s support found\n", version);
+ pr_info("Xen %s support found\n", hyper_node.version);
xen_domain_type = XEN_HVM_DOMAIN;
@@ -278,28 +289,68 @@ void __init xen_early_init(void)
add_preferred_console("hvc", 0, NULL);
}
+static void __init xen_acpi_guest_init(void)
+{
+#ifdef CONFIG_ACPI
+ struct xen_hvm_param a;
+ int interrupt, trigger, polarity;
+
+ a.domid = DOMID_SELF;
+ a.index = HVM_PARAM_CALLBACK_IRQ;
+
+ if (HYPERVISOR_hvm_op(HVMOP_get_param, &a)
+ || (a.value >> 56) != HVM_PARAM_CALLBACK_TYPE_PPI) {
+ xen_events_irq = 0;
+ return;
+ }
+
+ interrupt = a.value & 0xff;
+ trigger = ((a.value >> 8) & 0x1) ? ACPI_EDGE_SENSITIVE
+ : ACPI_LEVEL_SENSITIVE;
+ polarity = ((a.value >> 8) & 0x2) ? ACPI_ACTIVE_LOW
+ : ACPI_ACTIVE_HIGH;
+ xen_events_irq = acpi_register_gsi(NULL, interrupt, trigger, polarity);
+#endif
+}
+
+static void __init xen_dt_guest_init(void)
+{
+ struct device_node *xen_node;
+
+ xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
+ if (!xen_node) {
+ pr_err("Xen support was detected before, but it has disappeared\n");
+ return;
+ }
+
+ xen_events_irq = irq_of_parse_and_map(xen_node, 0);
+}
+
static int __init xen_guest_init(void)
{
struct xen_add_to_physmap xatp;
struct shared_info *shared_info_page = NULL;
- struct resource res;
- phys_addr_t grant_frames;
if (!xen_domain())
return 0;
- if (of_address_to_resource(xen_node, GRANT_TABLE_PHYSADDR, &res)) {
- pr_err("Xen grant table base address not found\n");
- return -ENODEV;
- }
- grant_frames = res.start;
+ if (!acpi_disabled)
+ xen_acpi_guest_init();
+ else
+ xen_dt_guest_init();
- xen_events_irq = irq_of_parse_and_map(xen_node, 0);
if (!xen_events_irq) {
pr_err("Xen event channel interrupt not found\n");
return -ENODEV;
}
+ /*
+ * The fdt parsing codes have set EFI_RUNTIME_SERVICES if Xen EFI
+ * parameters are found. Force enable runtime services.
+ */
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ xen_efi_runtime_setup();
+
shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL);
if (!shared_info_page) {
@@ -328,7 +379,13 @@ static int __init xen_guest_init(void)
if (xen_vcpu_info == NULL)
return -ENOMEM;
- if (gnttab_setup_auto_xlat_frames(grant_frames)) {
+ /* Direct vCPU id mapping for ARM guests. */
+ per_cpu(xen_vcpu_id, 0) = 0;
+
+ xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
+ if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
+ &xen_auto_xlat_grant_frames.vaddr,
+ xen_auto_xlat_grant_frames.count)) {
free_percpu(xen_vcpu_info);
return -ENOMEM;
}
@@ -351,16 +408,14 @@ static int __init xen_guest_init(void)
return -EINVAL;
}
- xen_percpu_init();
+ xen_time_setup_guest();
- register_cpu_notifier(&xen_cpu_notifier);
-
- pv_time_ops.steal_clock = xen_stolen_accounting;
- static_key_slow_inc(&paravirt_steal_enabled);
if (xen_initial_domain())
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
- return 0;
+ return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
+ "AP_ARM_XEN_STARTING", xen_starting_cpu,
+ xen_dying_cpu);
}
early_initcall(xen_guest_init);
@@ -403,4 +458,5 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
+EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
EXPORT_SYMBOL_GPL(privcmd_call);
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index 9a36f4f49c10..a648dfc3be30 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -91,6 +91,7 @@ HYPERCALL3(vcpu_op);
HYPERCALL1(tmem_op);
HYPERCALL1(platform_op_raw);
HYPERCALL2(multicall);
+HYPERCALL2(vm_assist);
ENTRY(privcmd_call)
stmdb sp!, {r4}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 76747d92bc72..9f8b99e20557 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,9 +4,11 @@ config ARM64
select ACPI_GENERIC_GSI if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_KCOV
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF
@@ -85,8 +87,11 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES if HAVE_KPROBES
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
@@ -113,6 +118,18 @@ config ARCH_PHYS_ADDR_T_64BIT
config MMU
def_bool y
+config ARM64_PAGE_SHIFT
+ int
+ default 16 if ARM64_64K_PAGES
+ default 14 if ARM64_16K_PAGES
+ default 12
+
+config ARM64_CONT_SHIFT
+ int
+ default 5 if ARM64_64K_PAGES
+ default 7 if ARM64_16K_PAGES
+ default 4
+
config ARCH_MMAP_RND_BITS_MIN
default 14 if ARM64_64K_PAGES
default 16 if ARM64_16K_PAGES
@@ -426,6 +443,15 @@ config CAVIUM_ERRATUM_22375
If unsure, say Y.
+config CAVIUM_ERRATUM_23144
+ bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
+ depends on NUMA
+ default y
+ help
+ ITS SYNC command hang for cross node io and collections/cpu mapping.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_23154
bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
default y
@@ -643,6 +669,16 @@ config PARAVIRT_TIME_ACCOUNTING
If in doubt, say N here.
+config KEXEC
+ depends on PM_SLEEP_SMP
+ select KEXEC_CORE
+ bool "kexec system call"
+ ---help---
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+ but it is independent of the system firmware. And like a reboot
+ you can start any kernel with it, not just Linux.
+
config XEN_DOM0
def_bool y
depends on XEN
@@ -851,7 +887,7 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
- select ARM64_MODULE_PLTS
+ select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
help
Randomizes the virtual address at which the kernel image is
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 710fde4ad0f0..0cc758cdd0dc 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -12,7 +12,8 @@ config ARM64_PTDUMP
who are working in architecture specific areas of the kernel.
It is probably not a good idea to enable this feature in a production
kernel.
- If in doubt, say "N"
+
+ If in doubt, say N.
config PID_IN_CONTEXTIDR
bool "Write the current PID to the CONTEXTIDR register"
@@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
value.
config DEBUG_SET_MODULE_RONX
- bool "Set loadable kernel module data as NX and text as RO"
- depends on MODULES
- help
- This option helps catch unintended modifications to loadable
- kernel module's text and read-only data. It also prevents execution
- of module data. Such protection may interfere with run-time code
- patching and dynamic kernel tracing - and they might also protect
- against certain classes of kernel exploits.
- If in doubt, say "N".
+ bool "Set loadable kernel module data as NX and text as RO"
+ depends on MODULES
+ default y
+ help
+ Is this is set, kernel module text and rodata will be made read-only.
+ This is to help catch accidental or malicious attempts to change the
+ kernel's executable code.
+
+ If in doubt, say Y.
config DEBUG_RODATA
bool "Make kernel text and rodata read-only"
@@ -56,7 +57,7 @@ config DEBUG_RODATA
is to help catch accidental or malicious attempts to change the
kernel's executable code.
- If in doubt, say Y
+ If in doubt, say Y.
config DEBUG_ALIGN_RODATA
depends on DEBUG_RODATA
@@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA
alignment and potentially wasted space. Turn on this option if
performance is more important than memory pressure.
- If in doubt, say N
+ If in doubt, say N.
source "drivers/hwtracing/coresight/Kconfig"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 354d75402ace..d59b6908a21a 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -12,7 +12,6 @@
LDFLAGS_vmlinux :=-p --no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
-OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
ifneq ($(CONFIG_RELOCATABLE),)
@@ -60,7 +59,9 @@ head-y := arch/arm64/kernel/head.o
# The byte offset of the kernel image in RAM from the start of RAM.
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
+ int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
+ rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
else
TEXT_OFFSET := 0x00080000
endif
@@ -93,7 +94,7 @@ boot := arch/arm64/boot
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-Image.%: vmlinux
+Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zinstall install:
@@ -119,6 +120,16 @@ archclean:
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=$(boot)/dts
+# We need to generate vdso-offsets.h before compiling certain files in kernel/.
+# In order to do that, we should use the archprepare target, but we can't since
+# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
+# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
+# Therefore we need to generate the header after prepare0 has been made, hence
+# this hack.
+prepare: vdso_prepare
+vdso_prepare: prepare0
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
+
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 305c552b5ec1..1f012c506434 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -14,6 +14,8 @@
# Based on the ia64 boot/Makefile.
#
+OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+
targets := Image Image.gz
$(obj)/Image: vmlinux FORCE
diff --git a/arch/arm64/boot/dts/apm/apm-merlin.dts b/arch/arm64/boot/dts/apm/apm-merlin.dts
index 387c6a8d0da9..b0f64414c1b0 100644
--- a/arch/arm64/boot/dts/apm/apm-merlin.dts
+++ b/arch/arm64/boot/dts/apm/apm-merlin.dts
@@ -83,3 +83,9 @@
status = "ok";
};
};
+
+&mdio {
+ sgenet0phy: phy@0 {
+ reg = <0x0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index 44db32ec5e9c..b7fb5d9295c2 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -79,3 +79,15 @@
&mmc0 {
status = "ok";
};
+
+&mdio {
+ menet0phy: phy@3 {
+ reg = <0x3>;
+ };
+ sgenet0phy: phy@4 {
+ reg = <0x4>;
+ };
+ sgenet1phy: phy@5 {
+ reg = <0x5>;
+ };
+};
diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
index c569f761d090..2e1e5daa1dc7 100644
--- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
@@ -625,10 +625,18 @@
apm,irq-start = <8>;
};
+ mdio: mdio@1f610000 {
+ compatible = "apm,xgene-mdio-xfi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x1f610000 0x0 0xd100>;
+ clocks = <&xge0clk 0>;
+ };
+
sgenet0: ethernet@1f610000 {
compatible = "apm,xgene2-sgenet";
status = "disabled";
- reg = <0x0 0x1f610000 0x0 0x10000>,
+ reg = <0x0 0x1f610000 0x0 0xd100>,
<0x0 0x1f600000 0x0 0Xd100>,
<0x0 0x20000000 0x0 0X20000>;
interrupts = <0 96 4>,
@@ -637,6 +645,7 @@
clocks = <&xge0clk 0>;
local-mac-address = [00 01 73 00 00 01];
phy-connection-type = "sgmii";
+ phy-handle = <&sgenet0phy>;
};
xgenet1: ethernet@1f620000 {
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 5147d7698924..6bf7cbe2e72d 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -237,20 +237,11 @@
clocks = <&socplldiv2 0>;
reg = <0x0 0x1f21c000 0x0 0x1000>;
reg-names = "csr-reg";
- csr-mask = <0x3>;
+ csr-mask = <0xa>;
+ enable-mask = <0xf>;
clock-output-names = "sge0clk";
};
- sge1clk: sge1clk@1f21c000 {
- compatible = "apm,xgene-device-clock";
- #clock-cells = <1>;
- clocks = <&socplldiv2 0>;
- reg = <0x0 0x1f21c000 0x0 0x1000>;
- reg-names = "csr-reg";
- csr-mask = <0xc>;
- clock-output-names = "sge1clk";
- };
-
xge0clk: xge0clk@1f61c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
@@ -921,6 +912,14 @@
clocks = <&rtcclk 0>;
};
+ mdio: mdio@17020000 {
+ compatible = "apm,xgene-mdio-rgmii";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x17020000 0x0 0xd100>;
+ clocks = <&menetclk 0>;
+ };
+
menet: ethernet@17020000 {
compatible = "apm,xgene-enet";
status = "disabled";
@@ -934,7 +933,7 @@
/* mac address will be overwritten by the bootloader */
local-mac-address = [00 00 00 00 00 00];
phy-connection-type = "rgmii";
- phy-handle = <&menetphy>;
+ phy-handle = <&menet0phy>,<&menetphy>;
mdio {
compatible = "apm,xgene-mdio";
#address-cells = <1>;
@@ -960,6 +959,7 @@
clocks = <&sge0clk 0>;
local-mac-address = [00 00 00 00 00 00];
phy-connection-type = "sgmii";
+ phy-handle = <&sgenet0phy>;
};
sgenet1: ethernet@1f210030 {
@@ -973,9 +973,9 @@
<0x0 0xAD 0x4>;
port-id = <1>;
dma-coherent;
- clocks = <&sge1clk 0>;
local-mac-address = [00 00 00 00 00 00];
phy-connection-type = "sgmii";
+ phy-handle = <&sgenet1phy>;
};
xgenet: ethernet@1f610000 {
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index 54ca40c9f711..ea5603fd106a 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -52,6 +52,14 @@
};
};
+&pci_phy0 {
+ status = "ok";
+};
+
+&pci_phy1 {
+ status = "ok";
+};
+
&pcie0 {
status = "ok";
};
@@ -132,3 +140,11 @@
#size-cells = <1>;
};
};
+
+&mdio_mux_iproc {
+ mdio@10 {
+ gphy0: eth-phy@10 {
+ reg = <0x10>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index ec68ec1a80c8..46b78fa89f4c 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -263,6 +263,45 @@
IRQ_TYPE_LEVEL_HIGH)>;
};
+ mdio_mux_iproc: mdio-mux@6602023c {
+ compatible = "brcm,mdio-mux-iproc";
+ reg = <0x6602023c 0x14>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pci_phy0: pci-phy@0 {
+ compatible = "brcm,ns2-pcie-phy";
+ reg = <0x0>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ mdio@7 {
+ reg = <0x7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pci_phy1: pci-phy@0 {
+ compatible = "brcm,ns2-pcie-phy";
+ reg = <0x0>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ mdio@10 {
+ reg = <0x10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
timer0: timer@66030000 {
compatible = "arm,sp804", "arm,primecell";
reg = <0x66030000 0x1000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index f895fc02ab06..40846319be69 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -49,6 +49,10 @@
/ {
model = "LS1043A RDB Board";
+
+ aliases {
+ crypto = &crypto;
+ };
};
&i2c0 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index de0323b48b1e..6bd46c133010 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -159,6 +159,49 @@
big-endian;
};
+ crypto: crypto@1700000 {
+ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
+ "fsl,sec-v4.0";
+ fsl,sec-era = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0x1700000 0x100000>;
+ reg = <0x00 0x1700000 0x0 0x100000>;
+ interrupts = <0 75 0x4>;
+
+ sec_jr0: jr@10000 {
+ compatible = "fsl,sec-v5.4-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x10000 0x10000>;
+ interrupts = <0 71 0x4>;
+ };
+
+ sec_jr1: jr@20000 {
+ compatible = "fsl,sec-v5.4-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x20000 0x10000>;
+ interrupts = <0 72 0x4>;
+ };
+
+ sec_jr2: jr@30000 {
+ compatible = "fsl,sec-v5.4-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x30000 0x10000>;
+ interrupts = <0 73 0x4>;
+ };
+
+ sec_jr3: jr@40000 {
+ compatible = "fsl,sec-v5.4-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x40000 0x10000>;
+ interrupts = <0 74 0x4>;
+ };
+ };
+
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1043a-dcfg", "syscon";
reg = <0x0 0x1ee0000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
index 3a4e9a2ab313..fbafa24cd533 100644
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -125,7 +125,7 @@
#size-cells = <1>;
#interrupts-cells = <3>;
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
interrupt-parent = <&gic>;
ranges;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 05f89c4a5413..77b8c4e388ca 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -168,6 +168,18 @@
};
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ vpu_dma_reserved: vpu_dma_mem_region {
+ compatible = "shared-dma-pool";
+ reg = <0 0xb7000000 0 0x500000>;
+ alignment = <0x1000>;
+ no-map;
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
@@ -312,6 +324,17 @@
clock-names = "spi", "wrap";
};
+ vpu: vpu@10020000 {
+ compatible = "mediatek,mt8173-vpu";
+ reg = <0 0x10020000 0 0x30000>,
+ <0 0x10050000 0 0x100>;
+ reg-names = "tcm", "cfg_reg";
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_SCP_SEL>;
+ clock-names = "main";
+ memory-region = <&vpu_dma_reserved>;
+ };
+
sysirq: intpol-controller@10200620 {
compatible = "mediatek,mt8173-sysirq",
"mediatek,mt6577-sysirq";
@@ -754,6 +777,45 @@
clock-names = "apb", "smi";
};
+ vcodec_enc: vcodec@18002000 {
+ compatible = "mediatek,mt8173-vcodec-enc";
+ reg = <0 0x18002000 0 0x1000>, /* VENC_SYS */
+ <0 0x19002000 0 0x1000>; /* VENC_LT_SYS */
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 202 IRQ_TYPE_LEVEL_LOW>;
+ mediatek,larb = <&larb3>,
+ <&larb5>;
+ iommus = <&iommu M4U_PORT_VENC_RCPU>,
+ <&iommu M4U_PORT_VENC_REC>,
+ <&iommu M4U_PORT_VENC_BSDMA>,
+ <&iommu M4U_PORT_VENC_SV_COMV>,
+ <&iommu M4U_PORT_VENC_RD_COMV>,
+ <&iommu M4U_PORT_VENC_CUR_LUMA>,
+ <&iommu M4U_PORT_VENC_CUR_CHROMA>,
+ <&iommu M4U_PORT_VENC_REF_LUMA>,
+ <&iommu M4U_PORT_VENC_REF_CHROMA>,
+ <&iommu M4U_PORT_VENC_NBM_RDMA>,
+ <&iommu M4U_PORT_VENC_NBM_WDMA>,
+ <&iommu M4U_PORT_VENC_RCPU_SET2>,
+ <&iommu M4U_PORT_VENC_REC_FRM_SET2>,
+ <&iommu M4U_PORT_VENC_BSDMA_SET2>,
+ <&iommu M4U_PORT_VENC_SV_COMA_SET2>,
+ <&iommu M4U_PORT_VENC_RD_COMA_SET2>,
+ <&iommu M4U_PORT_VENC_CUR_LUMA_SET2>,
+ <&iommu M4U_PORT_VENC_CUR_CHROMA_SET2>,
+ <&iommu M4U_PORT_VENC_REF_LUMA_SET2>,
+ <&iommu M4U_PORT_VENC_REC_CHROMA_SET2>;
+ mediatek,vpu = <&vpu>;
+ clocks = <&topckgen CLK_TOP_VENCPLL_D2>,
+ <&topckgen CLK_TOP_VENC_SEL>,
+ <&topckgen CLK_TOP_UNIVPLL1_D2>,
+ <&topckgen CLK_TOP_VENC_LT_SEL>;
+ clock-names = "venc_sel_src",
+ "venc_sel",
+ "venc_lt_sel_src",
+ "venc_lt_sel";
+ };
+
vencltsys: clock-controller@19000000 {
compatible = "mediatek,mt8173-vencltsys", "syscon";
reg = <0 0x19000000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 46f325a143b0..188bbeab92b9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -163,7 +163,7 @@
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -492,6 +492,14 @@
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
};
+ rktimer: rktimer@ff850000 {
+ compatible = "rockchip,rk3399-timer";
+ reg = <0x0 0xff850000 0x0 0x1000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
+ clock-names = "pclk", "timer";
+ };
+
spdif: spdif@ff870000 {
compatible = "rockchip,rk3399-spdif";
reg = <0x0 0xff870000 0x0 0x1000>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index fd2d74d0491e..4ed4756dfa97 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -70,6 +70,7 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA=y
CONFIG_XEN=y
+CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_CPU_IDLE=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index cff532a6744e..f43d2c44c765 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,6 +1,5 @@
generic-y += bug.h
generic-y += bugs.h
-generic-y += checksum.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += current.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index aee323b13802..5420cb0fcb3e 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -113,4 +113,14 @@ static inline const char *acpi_get_enable_method(int cpu)
pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
#endif
+#ifdef CONFIG_ACPI_NUMA
+int arm64_acpi_numa_init(void);
+int acpi_numa_get_nid(unsigned int cpu, u64 hwid);
+#else
+static inline int arm64_acpi_numa_init(void) { return -ENOSYS; }
+static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; }
+#endif /* CONFIG_ACPI_NUMA */
+
+#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE
+
#endif /*_ASM_ACPI_H*/
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index beccbdefa106..8746ff6abd77 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -95,13 +95,11 @@ void apply_alternatives(void *start, size_t length);
* The code that follows this macro will be assembled and linked as
* normal. There are no restrictions on this code.
*/
-.macro alternative_if_not cap, enable = 1
- .if \enable
+.macro alternative_if_not cap
.pushsection .altinstructions, "a"
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
.popsection
661:
- .endif
.endm
/*
@@ -118,27 +116,27 @@ void apply_alternatives(void *start, size_t length);
* alternative sequence it is defined in (branches into an
* alternative sequence are not fixed up).
*/
-.macro alternative_else, enable = 1
- .if \enable
+.macro alternative_else
662: .pushsection .altinstr_replacement, "ax"
663:
- .endif
.endm
/*
* Complete an alternative code sequence.
*/
-.macro alternative_endif, enable = 1
- .if \enable
+.macro alternative_endif
664: .popsection
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
- .endif
.endm
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
+.macro user_alt, label, oldinstr, newinstr, cond
+9999: alternative_insn "\oldinstr", "\newinstr", \cond
+ _ASM_EXTABLE 9999b, \label
+.endm
/*
* Generate the assembly for UAO alternatives with exception table entries.
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 10b017c4bdd8..d5025c69ca81 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -24,6 +24,7 @@
#define __ASM_ASSEMBLER_H
#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
@@ -261,7 +262,16 @@ lr .req x30 // link register
add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
bic \kaddr, \kaddr, \tmp2
-9998: dc \op, \kaddr
+9998:
+ .if (\op == cvau || \op == cvac)
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+ dc \op, \kaddr
+alternative_else
+ dc civac, \kaddr
+alternative_endif
+ .else
+ dc \op, \kaddr
+ .endif
add \kaddr, \kaddr, \tmp1
cmp \kaddr, \size
b.lo 9998b
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index f3a3586a421c..c0235e0ff849 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -76,6 +76,36 @@
#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_add_acquire atomic_fetch_add_acquire
+#define atomic_fetch_add_release atomic_fetch_add_release
+#define atomic_fetch_add atomic_fetch_add
+
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#define atomic_fetch_sub_release atomic_fetch_sub_release
+#define atomic_fetch_sub atomic_fetch_sub
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_and_acquire atomic_fetch_and_acquire
+#define atomic_fetch_and_release atomic_fetch_and_release
+#define atomic_fetch_and atomic_fetch_and
+
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#define atomic_fetch_andnot_release atomic_fetch_andnot_release
+#define atomic_fetch_andnot atomic_fetch_andnot
+
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_or_acquire atomic_fetch_or_acquire
+#define atomic_fetch_or_release atomic_fetch_or_release
+#define atomic_fetch_or atomic_fetch_or
+
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
+#define atomic_fetch_xor_release atomic_fetch_xor_release
+#define atomic_fetch_xor atomic_fetch_xor
+
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new))
#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new))
@@ -125,6 +155,36 @@
#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#define atomic64_fetch_add_release atomic64_fetch_add_release
+#define atomic64_fetch_add atomic64_fetch_add
+
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#define atomic64_fetch_sub_release atomic64_fetch_sub_release
+#define atomic64_fetch_sub atomic64_fetch_sub
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#define atomic64_fetch_and_release atomic64_fetch_and_release
+#define atomic64_fetch_and atomic64_fetch_and
+
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#define atomic64_fetch_andnot atomic64_fetch_andnot
+
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
+#define atomic64_fetch_or_release atomic64_fetch_or_release
+#define atomic64_fetch_or atomic64_fetch_or
+
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
+#define atomic64_fetch_xor_release atomic64_fetch_xor_release
+#define atomic64_fetch_xor atomic64_fetch_xor
+
#define atomic64_xchg_relaxed atomic_xchg_relaxed
#define atomic64_xchg_acquire atomic_xchg_acquire
#define atomic64_xchg_release atomic_xchg_release
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index f61c84f6ba02..f819fdcff1ac 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -77,26 +77,57 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
} \
__LL_SC_EXPORT(atomic_##op##_return##name);
+#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
+__LL_SC_INLINE int \
+__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
+{ \
+ unsigned long tmp; \
+ int val, result; \
+ \
+ asm volatile("// atomic_fetch_" #op #name "\n" \
+" prfm pstl1strm, %3\n" \
+"1: ld" #acq "xr %w0, %3\n" \
+" " #asm_op " %w1, %w0, %w4\n" \
+" st" #rel "xr %w2, %w1, %3\n" \
+" cbnz %w2, 1b\n" \
+" " #mb \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : cl); \
+ \
+ return result; \
+} \
+__LL_SC_EXPORT(atomic_fetch_##op##name);
+
#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
- ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)
-
-#define ATOMIC_OPS_RLX(...) \
- ATOMIC_OPS(__VA_ARGS__) \
+ ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
- ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)
+ ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
+ ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
+ ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
+ ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
+ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS_RLX(add, add)
-ATOMIC_OPS_RLX(sub, sub)
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(...) \
+ ATOMIC_OP(__VA_ARGS__) \
+ ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
+ ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
+ ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
+ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OP(and, and)
-ATOMIC_OP(andnot, bic)
-ATOMIC_OP(or, orr)
-ATOMIC_OP(xor, eor)
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, orr)
+ATOMIC_OPS(xor, eor)
-#undef ATOMIC_OPS_RLX
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -140,26 +171,57 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
} \
__LL_SC_EXPORT(atomic64_##op##_return##name);
+#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
+__LL_SC_INLINE long \
+__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
+{ \
+ long result, val; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_fetch_" #op #name "\n" \
+" prfm pstl1strm, %3\n" \
+"1: ld" #acq "xr %0, %3\n" \
+" " #asm_op " %1, %0, %4\n" \
+" st" #rel "xr %w2, %1, %3\n" \
+" cbnz %w2, 1b\n" \
+" " #mb \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : cl); \
+ \
+ return result; \
+} \
+__LL_SC_EXPORT(atomic64_fetch_##op##name);
+
#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
- ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__)
-
-#define ATOMIC64_OPS_RLX(...) \
- ATOMIC64_OPS(__VA_ARGS__) \
+ ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
- ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__)
+ ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS_RLX(add, add)
-ATOMIC64_OPS_RLX(sub, sub)
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(...) \
+ ATOMIC64_OP(__VA_ARGS__) \
+ ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(andnot, bic)
-ATOMIC64_OP(or, orr)
-ATOMIC64_OP(xor, eor)
+ATOMIC64_OPS(and, and)
+ATOMIC64_OPS(andnot, bic)
+ATOMIC64_OPS(or, orr)
+ATOMIC64_OPS(xor, eor)
-#undef ATOMIC64_OPS_RLX
#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 39c1d340fec5..b5890be8f257 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -26,54 +26,57 @@
#endif
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
-
-static inline void atomic_andnot(int i, atomic_t *v)
-{
- register int w0 asm ("w0") = i;
- register atomic_t *x1 asm ("x1") = v;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
- " stclr %w[i], %[v]\n")
- : [i] "+r" (w0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+#define ATOMIC_OP(op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ register int w0 asm ("w0") = i; \
+ register atomic_t *x1 asm ("x1") = v; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
+" " #asm_op " %w[i], %[v]\n") \
+ : [i] "+r" (w0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS); \
}
-static inline void atomic_or(int i, atomic_t *v)
-{
- register int w0 asm ("w0") = i;
- register atomic_t *x1 asm ("x1") = v;
+ATOMIC_OP(andnot, stclr)
+ATOMIC_OP(or, stset)
+ATOMIC_OP(xor, steor)
+ATOMIC_OP(add, stadd)
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
- " stset %w[i], %[v]\n")
- : [i] "+r" (w0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
-}
+#undef ATOMIC_OP
-static inline void atomic_xor(int i, atomic_t *v)
-{
- register int w0 asm ("w0") = i;
- register atomic_t *x1 asm ("x1") = v;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
- " steor %w[i], %[v]\n")
- : [i] "+r" (w0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
+static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
+{ \
+ register int w0 asm ("w0") = i; \
+ register atomic_t *x1 asm ("x1") = v; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ __LL_SC_ATOMIC(fetch_##op##name), \
+ /* LSE atomics */ \
+" " #asm_op #mb " %w[i], %w[i], %[v]") \
+ : [i] "+r" (w0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+ return w0; \
}
-static inline void atomic_add(int i, atomic_t *v)
-{
- register int w0 asm ("w0") = i;
- register atomic_t *x1 asm ("x1") = v;
+#define ATOMIC_FETCH_OPS(op, asm_op) \
+ ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
+ ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
+ ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
+ ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
- " stadd %w[i], %[v]\n")
- : [i] "+r" (w0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
-}
+ATOMIC_FETCH_OPS(andnot, ldclr)
+ATOMIC_FETCH_OPS(or, ldset)
+ATOMIC_FETCH_OPS(xor, ldeor)
+ATOMIC_FETCH_OPS(add, ldadd)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_FETCH_OPS
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
static inline int atomic_add_return##name(int i, atomic_t *v) \
@@ -119,6 +122,33 @@ static inline void atomic_and(int i, atomic_t *v)
: __LL_SC_CLOBBERS);
}
+#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
+static inline int atomic_fetch_and##name(int i, atomic_t *v) \
+{ \
+ register int w0 asm ("w0") = i; \
+ register atomic_t *x1 asm ("x1") = v; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ " nop\n" \
+ __LL_SC_ATOMIC(fetch_and##name), \
+ /* LSE atomics */ \
+ " mvn %w[i], %w[i]\n" \
+ " ldclr" #mb " %w[i], %w[i], %[v]") \
+ : [i] "+r" (w0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+ return w0; \
+}
+
+ATOMIC_FETCH_OP_AND(_relaxed, )
+ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
+ATOMIC_FETCH_OP_AND(_release, l, "memory")
+ATOMIC_FETCH_OP_AND( , al, "memory")
+
+#undef ATOMIC_FETCH_OP_AND
+
static inline void atomic_sub(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
@@ -164,57 +194,87 @@ ATOMIC_OP_SUB_RETURN(_release, l, "memory")
ATOMIC_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC_OP_SUB_RETURN
-#undef __LL_SC_ATOMIC
-
-#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
-
-static inline void atomic64_andnot(long i, atomic64_t *v)
-{
- register long x0 asm ("x0") = i;
- register atomic64_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
- " stclr %[i], %[v]\n")
- : [i] "+r" (x0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
+static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
+{ \
+ register int w0 asm ("w0") = i; \
+ register atomic_t *x1 asm ("x1") = v; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ " nop\n" \
+ __LL_SC_ATOMIC(fetch_sub##name), \
+ /* LSE atomics */ \
+ " neg %w[i], %w[i]\n" \
+ " ldadd" #mb " %w[i], %w[i], %[v]") \
+ : [i] "+r" (w0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+ return w0; \
}
-static inline void atomic64_or(long i, atomic64_t *v)
-{
- register long x0 asm ("x0") = i;
- register atomic64_t *x1 asm ("x1") = v;
+ATOMIC_FETCH_OP_SUB(_relaxed, )
+ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
+ATOMIC_FETCH_OP_SUB(_release, l, "memory")
+ATOMIC_FETCH_OP_SUB( , al, "memory")
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
- " stset %[i], %[v]\n")
- : [i] "+r" (x0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+#undef ATOMIC_FETCH_OP_SUB
+#undef __LL_SC_ATOMIC
+
+#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
+#define ATOMIC64_OP(op, asm_op) \
+static inline void atomic64_##op(long i, atomic64_t *v) \
+{ \
+ register long x0 asm ("x0") = i; \
+ register atomic64_t *x1 asm ("x1") = v; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
+" " #asm_op " %[i], %[v]\n") \
+ : [i] "+r" (x0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS); \
}
-static inline void atomic64_xor(long i, atomic64_t *v)
-{
- register long x0 asm ("x0") = i;
- register atomic64_t *x1 asm ("x1") = v;
+ATOMIC64_OP(andnot, stclr)
+ATOMIC64_OP(or, stset)
+ATOMIC64_OP(xor, steor)
+ATOMIC64_OP(add, stadd)
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
- " steor %[i], %[v]\n")
- : [i] "+r" (x0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+#undef ATOMIC64_OP
+
+#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
+static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
+{ \
+ register long x0 asm ("x0") = i; \
+ register atomic64_t *x1 asm ("x1") = v; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ __LL_SC_ATOMIC64(fetch_##op##name), \
+ /* LSE atomics */ \
+" " #asm_op #mb " %[i], %[i], %[v]") \
+ : [i] "+r" (x0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+ return x0; \
}
-static inline void atomic64_add(long i, atomic64_t *v)
-{
- register long x0 asm ("x0") = i;
- register atomic64_t *x1 asm ("x1") = v;
+#define ATOMIC64_FETCH_OPS(op, asm_op) \
+ ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
+ ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
+ ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
+ ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
- " stadd %[i], %[v]\n")
- : [i] "+r" (x0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
-}
+ATOMIC64_FETCH_OPS(andnot, ldclr)
+ATOMIC64_FETCH_OPS(or, ldset)
+ATOMIC64_FETCH_OPS(xor, ldeor)
+ATOMIC64_FETCH_OPS(add, ldadd)
+
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_FETCH_OPS
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
static inline long atomic64_add_return##name(long i, atomic64_t *v) \
@@ -260,6 +320,33 @@ static inline void atomic64_and(long i, atomic64_t *v)
: __LL_SC_CLOBBERS);
}
+#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
+static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
+{ \
+ register long x0 asm ("w0") = i; \
+ register atomic64_t *x1 asm ("x1") = v; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ " nop\n" \
+ __LL_SC_ATOMIC64(fetch_and##name), \
+ /* LSE atomics */ \
+ " mvn %[i], %[i]\n" \
+ " ldclr" #mb " %[i], %[i], %[v]") \
+ : [i] "+r" (x0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+ return x0; \
+}
+
+ATOMIC64_FETCH_OP_AND(_relaxed, )
+ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
+ATOMIC64_FETCH_OP_AND(_release, l, "memory")
+ATOMIC64_FETCH_OP_AND( , al, "memory")
+
+#undef ATOMIC64_FETCH_OP_AND
+
static inline void atomic64_sub(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
@@ -306,6 +393,33 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN
+#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
+static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
+{ \
+ register long x0 asm ("w0") = i; \
+ register atomic64_t *x1 asm ("x1") = v; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ " nop\n" \
+ __LL_SC_ATOMIC64(fetch_sub##name), \
+ /* LSE atomics */ \
+ " neg %[i], %[i]\n" \
+ " ldadd" #mb " %[i], %[i], %[v]") \
+ : [i] "+r" (x0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+ return x0; \
+}
+
+ATOMIC64_FETCH_OP_SUB(_relaxed, )
+ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
+ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
+ATOMIC64_FETCH_OP_SUB( , al, "memory")
+
+#undef ATOMIC64_FETCH_OP_SUB
+
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
register long x0 asm ("x0") = (long)v;
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index dae5c49618db..4eea7f618dce 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -91,6 +91,19 @@ do { \
__u.__val; \
})
+#define smp_cond_load_acquire(ptr, cond_expr) \
+({ \
+ typeof(ptr) __PTR = (ptr); \
+ typeof(*ptr) VAL; \
+ for (;;) { \
+ VAL = smp_load_acquire(__PTR); \
+ if (cond_expr) \
+ break; \
+ __cmpwait_relaxed(__PTR, VAL); \
+ } \
+ VAL; \
+})
+
#include <asm-generic/barrier.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
new file mode 100644
index 000000000000..09f65339d66d
--- /dev/null
+++ b/arch/arm64/include/asm/checksum.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CHECKSUM_H
+#define __ASM_CHECKSUM_H
+
+#include <linux/types.h>
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum += (sum >> 16) | (sum << 16);
+ return ~(__force __sum16)(sum >> 16);
+}
+#define csum_fold csum_fold
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ __uint128_t tmp;
+ u64 sum;
+
+ tmp = *(const __uint128_t *)iph;
+ iph += 16;
+ ihl -= 4;
+ tmp += ((tmp >> 64) | (tmp << 64));
+ sum = tmp >> 64;
+ do {
+ sum += *(const u32 *)iph;
+ iph += 4;
+ } while (--ihl);
+
+ sum += ((sum >> 32) | (sum << 32));
+ return csum_fold(sum >> 32);
+}
+#define ip_fast_csum ip_fast_csum
+
+#include <asm-generic/checksum.h>
+
+#endif /* __ASM_CHECKSUM_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 510c7b404454..bd86a79491bc 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -224,4 +224,55 @@ __CMPXCHG_GEN(_mb)
__ret; \
})
+#define __CMPWAIT_CASE(w, sz, name) \
+static inline void __cmpwait_case_##name(volatile void *ptr, \
+ unsigned long val) \
+{ \
+ unsigned long tmp; \
+ \
+ asm volatile( \
+ " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
+ " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
+ " cbnz %" #w "[tmp], 1f\n" \
+ " wfe\n" \
+ "1:" \
+ : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
+ : [val] "r" (val)); \
+}
+
+__CMPWAIT_CASE(w, b, 1);
+__CMPWAIT_CASE(w, h, 2);
+__CMPWAIT_CASE(w, , 4);
+__CMPWAIT_CASE( , , 8);
+
+#undef __CMPWAIT_CASE
+
+#define __CMPWAIT_GEN(sfx) \
+static inline void __cmpwait##sfx(volatile void *ptr, \
+ unsigned long val, \
+ int size) \
+{ \
+ switch (size) { \
+ case 1: \
+ return __cmpwait_case##sfx##_1(ptr, (u8)val); \
+ case 2: \
+ return __cmpwait_case##sfx##_2(ptr, (u16)val); \
+ case 4: \
+ return __cmpwait_case##sfx##_4(ptr, val); \
+ case 8: \
+ return __cmpwait_case##sfx##_8(ptr, val); \
+ default: \
+ BUILD_BUG(); \
+ } \
+ \
+ unreachable(); \
+}
+
+__CMPWAIT_GEN()
+
+#undef __CMPWAIT_GEN
+
+#define __cmpwait_relaxed(ptr, val) \
+ __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 13a6103130cd..889226b4c6e1 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -25,10 +25,12 @@
*/
struct cpuinfo_arm64 {
struct cpu cpu;
+ struct kobject kobj;
u32 reg_ctr;
u32 reg_cntfrq;
u32 reg_dczid;
u32 reg_midr;
+ u32 reg_revidr;
u64 reg_id_aa64dfr0;
u64 reg_id_aa64dfr1;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 224efe730e46..49dd1bd3ea50 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -191,7 +191,9 @@ void __init setup_cpu_features(void);
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info);
+void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
void check_local_cpu_errata(void);
+void __init enable_errata_workarounds(void);
void verify_local_cpu_errata(void);
void verify_local_cpu_capabilities(void);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 87e1985f3be8..9d9fd4b9a72e 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -80,12 +80,14 @@
#define APM_CPU_PART_POTENZA 0x000
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
+#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
#define BRCM_CPU_PART_VULCAN 0x516
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 2fcb9b7c876c..4b6b3f72a215 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -66,6 +66,11 @@
#define CACHE_FLUSH_IS_SAFE 1
+/* kprobes BRK opcodes with ESR encoding */
+#define BRK64_ESR_MASK 0xFFFF
+#define BRK64_ESR_KPROBES 0x0004
+#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
#define DBG_ESR_EVT_VECC 0x5
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 622db3c6474e..a9e54aad15ef 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -14,8 +14,7 @@ extern void efi_init(void);
#endif
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
-
-#define efi_set_mapping_permissions efi_create_mapping
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_setup() \
({ \
@@ -23,10 +22,10 @@ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
efi_virtmap_load(); \
})
-#define arch_efi_call_virt(f, args...) \
+#define arch_efi_call_virt(p, f, args...) \
({ \
efi_##f##_t *__f; \
- __f = efi.systab->runtime->f; \
+ __f = p->f; \
__f(args); \
})
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 7a09c48c0475..579b6e654f2d 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
-#ifdef CONFIG_COMPAT
-
#ifdef __AARCH64EB__
#define COMPAT_ELF_PLATFORM ("v8b")
#else
#define COMPAT_ELF_PLATFORM ("v8l")
#endif
+#ifdef CONFIG_COMPAT
+
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
/* AArch32 registers. */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 77eeb2cc648f..f772e15c4766 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -74,6 +74,7 @@
#define ESR_ELx_EC_SHIFT (26)
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
+#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
#define ESR_ELx_IL (UL(1) << 25)
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 30e50eb54a67..1dbaa901d7e5 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -120,6 +120,29 @@ enum aarch64_insn_register {
AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */
};
+enum aarch64_insn_special_register {
+ AARCH64_INSN_SPCLREG_SPSR_EL1 = 0xC200,
+ AARCH64_INSN_SPCLREG_ELR_EL1 = 0xC201,
+ AARCH64_INSN_SPCLREG_SP_EL0 = 0xC208,
+ AARCH64_INSN_SPCLREG_SPSEL = 0xC210,
+ AARCH64_INSN_SPCLREG_CURRENTEL = 0xC212,
+ AARCH64_INSN_SPCLREG_DAIF = 0xDA11,
+ AARCH64_INSN_SPCLREG_NZCV = 0xDA10,
+ AARCH64_INSN_SPCLREG_FPCR = 0xDA20,
+ AARCH64_INSN_SPCLREG_DSPSR_EL0 = 0xDA28,
+ AARCH64_INSN_SPCLREG_DLR_EL0 = 0xDA29,
+ AARCH64_INSN_SPCLREG_SPSR_EL2 = 0xE200,
+ AARCH64_INSN_SPCLREG_ELR_EL2 = 0xE201,
+ AARCH64_INSN_SPCLREG_SP_EL1 = 0xE208,
+ AARCH64_INSN_SPCLREG_SPSR_INQ = 0xE218,
+ AARCH64_INSN_SPCLREG_SPSR_ABT = 0xE219,
+ AARCH64_INSN_SPCLREG_SPSR_UND = 0xE21A,
+ AARCH64_INSN_SPCLREG_SPSR_FIQ = 0xE21B,
+ AARCH64_INSN_SPCLREG_SPSR_EL3 = 0xF200,
+ AARCH64_INSN_SPCLREG_ELR_EL3 = 0xF201,
+ AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210
+};
+
enum aarch64_insn_variant {
AARCH64_INSN_VARIANT_32BIT,
AARCH64_INSN_VARIANT_64BIT
@@ -223,8 +246,15 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
{ return (val); }
+__AARCH64_INSN_FUNCS(adr_adrp, 0x1F000000, 0x10000000)
+__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
+__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
+__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
+__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
+__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
@@ -273,10 +303,15 @@ __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
+__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
+__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
+__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
+__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
#undef __AARCH64_INSN_FUNCS
@@ -286,6 +321,8 @@ bool aarch64_insn_is_branch_imm(u32 insn);
int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+bool aarch64_insn_uses_literal(u32 insn);
+bool aarch64_insn_is_branch(u32 insn);
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
@@ -367,9 +404,13 @@ bool aarch32_insn_is_wide(u32 insn);
#define A32_RT_OFFSET 12
#define A32_RT2_OFFSET 0
+u32 aarch64_insn_extract_system_reg(u32 insn);
u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
u32 aarch32_insn_mcr_extract_opc2(u32 insn);
u32 aarch32_insn_mcr_extract_crm(u32 insn);
+
+typedef bool (pstate_check_t)(unsigned long);
+extern pstate_check_t * const aarch32_opcode_cond_checks[16];
#endif /* __ASSEMBLY__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 44be1e03ed65..9b6e408cfa51 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -174,13 +174,15 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define iounmap __iounmap
/*
- * io{read,write}{16,32}be() macros
+ * io{read,write}{16,32,64}be() macros
*/
#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
+#define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 11cc941bd107..8c581281fa12 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -110,8 +110,5 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
: : "r" (flags) : "memory"); \
} while (0)
-#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
-#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
-
#endif
#endif
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
new file mode 100644
index 000000000000..04744dc5fb61
--- /dev/null
+++ b/arch/arm64/include/asm/kexec.h
@@ -0,0 +1,48 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARM64_KEXEC_H
+#define _ARM64_KEXEC_H
+
+/* Maximum physical address we can use pages from */
+
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+#define KEXEC_CONTROL_PAGE_SIZE 4096
+
+#define KEXEC_ARCH KEXEC_ARCH_AARCH64
+
+#ifndef __ASSEMBLY__
+
+/**
+ * crash_setup_regs() - save registers for the panic kernel
+ *
+ * @newregs: registers are saved here
+ * @oldregs: registers to be saved (may be %NULL)
+ */
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ /* Empty routine needed to avoid build errors. */
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
index f69f69c8120c..da84645525b9 100644
--- a/arch/arm64/include/asm/kgdb.h
+++ b/arch/arm64/include/asm/kgdb.h
@@ -38,25 +38,54 @@ extern int kgdb_fault_expected;
#endif /* !__ASSEMBLY__ */
/*
- * gdb is expecting the following registers layout.
+ * gdb remote procotol (well most versions of it) expects the following
+ * register layout.
*
* General purpose regs:
* r0-r30: 64 bit
* sp,pc : 64 bit
- * pstate : 64 bit
- * Total: 34
+ * pstate : 32 bit
+ * Total: 33 + 1
* FPU regs:
* f0-f31: 128 bit
- * Total: 32
- * Extra regs
* fpsr & fpcr: 32 bit
- * Total: 2
+ * Total: 32 + 2
*
+ * To expand a little on the "most versions of it"... when the gdb remote
+ * protocol for AArch64 was developed it depended on a statement in the
+ * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register".
+ * and, as a result, allocated only 32-bits for the PSTATE in the remote
+ * protocol. In fact this statement is still present in ARM DDI 0487A.i.
+ *
+ * Unfortunately "is a 32-bit register" has a very special meaning for
+ * system registers. It means that "the upper bits, bits[63:32], are
+ * RES0.". RES0 is heavily used in the ARM architecture documents as a
+ * way to leave space for future architecture changes. So to translate a
+ * little for people who don't spend their spare time reading ARM architecture
+ * manuals, what "is a 32-bit register" actually means in this context is
+ * "is a 64-bit register but one with no meaning allocated to any of the
+ * upper 32-bits... *yet*".
+ *
+ * Perhaps then we should not be surprised that this has led to some
+ * confusion. Specifically a patch, influenced by the above translation,
+ * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch
+ * was reverted in gdb-7.8.1 and all later releases, when this was
+ * discovered to be an undocumented protocol change.
+ *
+ * So... it is *not* wrong for us to only allocate 32-bits to PSTATE
+ * here even though the kernel itself allocates 64-bits for the same
+ * state. That is because this bit of code tells the kernel how the gdb
+ * remote protocol (well most versions of it) describes the register state.
+ *
+ * Note that if you are using one of the versions of gdb that supports
+ * the gdb-7.7 version of the protocol you cannot use kgdb directly
+ * without providing a custom register description (gdb can load new
+ * protocol descriptions at runtime).
*/
-#define _GP_REGS 34
+#define _GP_REGS 33
#define _FP_REGS 32
-#define _EXTRA_REGS 2
+#define _EXTRA_REGS 3
/*
* general purpose registers size in bytes.
* pstate is only 4 bytes. subtract 4 bytes
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
new file mode 100644
index 000000000000..61b49150dfa3
--- /dev/null
+++ b/arch/arm64/include/asm/kprobes.h
@@ -0,0 +1,62 @@
+/*
+ * arch/arm64/include/asm/kprobes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KPROBES_H
+#define _ARM_KPROBES_H
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 1
+#define MAX_STACK_SIZE 128
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* Single step context for kprobe */
+struct kprobe_step_ctx {
+ unsigned long ss_pending;
+ unsigned long match_addr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_irqflag;
+ struct prev_kprobe prev_kprobe;
+ struct kprobe_step_ctx ss_ctx;
+ struct pt_regs jprobe_saved_regs;
+ char jprobes_stack[MAX_STACK_SIZE];
+};
+
+void arch_remove_kprobe(struct kprobe *);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr);
+int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr);
+void kretprobe_trampoline(void);
+void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
+
+#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 40bc1681b6d5..4cdeae3b17c6 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -210,7 +210,7 @@ static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
+ return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
}
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 72a3025bb583..31b73227b41f 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -55,8 +55,9 @@
#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * PAGE_OFFSET - the virtual address of the start of the linear map (top
* (VA_BITS - 1))
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image
* VA_BITS - the maximum number of bits for virtual addresses.
* VA_START - the first kernel virtual address.
* TASK_SIZE - the maximum size of a user space task.
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 97b1d8f26b9c..8d9fce037b2f 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -34,7 +34,7 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
- pgprot_t prot);
+ pgprot_t prot, bool allow_block_mappings);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
#endif
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index e9b4f2942335..600887e491fd 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -5,6 +5,8 @@
#ifdef CONFIG_NUMA
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
/* currently, arm64 implements flat NUMA topology */
#define parent_node(node) (node)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 17b45f7d96d3..8472c6def5ef 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -23,16 +23,8 @@
/* PAGE_SHIFT determines the page size */
/* CONT_SHIFT determines the number of pages which can be tracked together */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define PAGE_SHIFT 16
-#define CONT_SHIFT 5
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define PAGE_SHIFT 14
-#define CONT_SHIFT 7
-#else
-#define PAGE_SHIFT 12
-#define CONT_SHIFT 4
-#endif
+#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
+#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index ff98585d085a..d25f4f137c2a 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -26,7 +26,7 @@
#define check_pgt_cache() do { } while (0)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
new file mode 100644
index 000000000000..5af574d632fa
--- /dev/null
+++ b/arch/arm64/include/asm/probes.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm64/include/asm/probes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef _ARM_PROBES_H
+#define _ARM_PROBES_H
+
+#include <asm/opcodes.h>
+
+struct kprobe;
+struct arch_specific_insn;
+
+typedef u32 kprobe_opcode_t;
+typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_specific_insn {
+ kprobe_opcode_t *insn;
+ pstate_check_t *pstate_cc;
+ kprobes_handler_t *handler;
+ /* restore address after step xol */
+ unsigned long restore;
+};
+
+#endif
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index cef1cf398356..ace0a96e7d6e 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -192,5 +192,6 @@ static inline void spin_lock_prefetch(const void *ptr)
void cpu_enable_pan(void *__unused);
void cpu_enable_uao(void *__unused);
+void cpu_enable_cache_maint_trap(void *__unused);
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
new file mode 100644
index 000000000000..07b8ed037dee
--- /dev/null
+++ b/arch/arm64/include/asm/ptdump.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_PTDUMP_H
+#define __ASM_PTDUMP_H
+
+#ifdef CONFIG_ARM64_PTDUMP
+
+#include <linux/mm_types.h>
+
+struct addr_marker {
+ unsigned long start_address;
+ char *name;
+};
+
+struct ptdump_info {
+ struct mm_struct *mm;
+ const struct addr_marker *markers;
+ unsigned long base_addr;
+ unsigned long max_addr;
+};
+
+int ptdump_register(struct ptdump_info *info, const char *name);
+
+#else
+static inline int ptdump_register(struct ptdump_info *info, const char *name)
+{
+ return 0;
+}
+#endif /* CONFIG_ARM64_PTDUMP */
+
+#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index a307eb6e7fa8..ada08b5b036d 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -46,7 +46,6 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
-#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
@@ -74,6 +73,7 @@
#define COMPAT_PT_DATA_ADDR 0x10004
#define COMPAT_PT_TEXT_END_ADDR 0x10008
#ifndef __ASSEMBLY__
+#include <linux/bug.h>
/* sizeof(struct user) for AArch32 */
#define COMPAT_USER_SZ 296
@@ -117,8 +117,12 @@ struct pt_regs {
};
u64 orig_x0;
u64 syscallno;
+ u64 orig_addr_limit;
+ u64 unused; // maintain 16 byte alignment
};
+#define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
+
#define arch_has_single_step() (1)
#ifdef CONFIG_COMPAT
@@ -144,9 +148,58 @@ struct pt_regs {
#define fast_interrupts_enabled(regs) \
(!((regs)->pstate & PSR_F_BIT))
-#define user_stack_pointer(regs) \
+#define GET_USP(regs) \
(!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
+#define SET_USP(ptregs, value) \
+ (!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value))
+
+extern int regs_query_register_offset(const char *name);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
+{
+ u64 val = 0;
+
+ WARN_ON(offset & 7);
+
+ offset >>= 3;
+ switch (offset) {
+ case 0 ... 30:
+ val = regs->regs[offset];
+ break;
+ case offsetof(struct pt_regs, sp) >> 3:
+ val = regs->sp;
+ break;
+ case offsetof(struct pt_regs, pc) >> 3:
+ val = regs->pc;
+ break;
+ case offsetof(struct pt_regs, pstate) >> 3:
+ val = regs->pstate;
+ break;
+ default:
+ val = 0;
+ }
+
+ return val;
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->regs[0];
@@ -156,8 +209,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
struct task_struct;
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
-#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
+#define GET_IP(regs) ((unsigned long)(regs)->pc)
+#define SET_IP(regs, value) ((regs)->pc = ((u64) (value)))
+
+#define GET_FP(ptregs) ((unsigned long)(ptregs)->regs[29])
+#define SET_FP(ptregs, value) ((ptregs)->regs[29] = ((u64) (value)))
+
+#include <asm-generic/ptrace.h>
+#undef profile_pc
extern unsigned long profile_pc(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 433e50405274..022644704a93 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
cpu_park_loop();
}
+/*
+ * If a secondary CPU enters the kernel but fails to come online,
+ * (e.g. due to mismatched features), and cannot exit the kernel,
+ * we increment cpus_stuck_in_kernel and leave the CPU in a
+ * quiesecent loop within the kernel text. The memory containing
+ * this loop must not be re-used for anything else as the 'stuck'
+ * core is executing it.
+ *
+ * This function is used to inhibit features like kexec and hibernate.
+ */
+bool cpus_are_stuck_in_kernel(void);
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index fc9682bfe002..e875a5a551d7 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -30,22 +30,53 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
unsigned int tmp;
arch_spinlock_t lockval;
+ u32 owner;
+
+ /*
+ * Ensure prior spin_lock operations to other locks have completed
+ * on this CPU before we test whether "lock" is locked.
+ */
+ smp_mb();
+ owner = READ_ONCE(lock->owner) << 16;
asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, %2\n"
+ /* Is the lock free? */
" eor %w1, %w0, %w0, ror #16\n"
-" cbnz %w1, 1b\n"
+" cbz %w1, 3f\n"
+ /* Lock taken -- has there been a subsequent unlock->lock transition? */
+" eor %w1, %w3, %w0, lsl #16\n"
+" cbz %w1, 1b\n"
+ /*
+ * The owner has been updated, so there was an unlock->lock
+ * transition that we missed. That means we can rely on the
+ * store-release of the unlock operation paired with the
+ * load-acquire of the lock operation to publish any of our
+ * previous stores to the new lock owner and therefore don't
+ * need to bother with the writeback below.
+ */
+" b 4f\n"
+"3:\n"
+ /*
+ * Serialise against any concurrent lockers by writing back the
+ * unlocked lock value
+ */
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" stxr %w1, %w0, %2\n"
-" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
- /* LSE atomics */
" nop\n"
-" nop\n")
+" nop\n",
+ /* LSE atomics */
+" mov %w1, %w0\n"
+" cas %w0, %w0, %2\n"
+" eor %w1, %w1, %w0\n")
+ /* Somebody else wrote to the lock, GOTO 10 and reload the value */
+" cbnz %w1, 2b\n"
+"4:"
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- :
+ : "r" (owner)
: "memory");
}
@@ -148,6 +179,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
+ smp_mb(); /* See arch_spin_unlock_wait */
return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 751e901c8d37..cc06794b7346 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -98,11 +98,11 @@
SCTLR_ELx_SA | SCTLR_ELx_I)
/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_UCI (1 << 26)
#define SCTLR_EL1_SPAN (1 << 23)
#define SCTLR_EL1_SED (1 << 8)
#define SCTLR_EL1_CP15BEN (1 << 5)
-
/* id_aa64isar0 */
#define ID_AA64ISAR0_RDM_SHIFT 28
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 0cc2f29bf9da..9cd03f3e812f 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -34,6 +34,8 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
+void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static inline int __in_irqentry_text(unsigned long ptr)
{
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74572af..5e834d10b291 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -21,6 +21,7 @@
/*
* User space memory access functions
*/
+#include <linux/kasan-checks.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -81,19 +82,6 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a) == (b))
/*
- * Return 1 if addr < current->addr_limit, 0 otherwise.
- */
-#define __addr_ok(addr) \
-({ \
- unsigned long flag; \
- asm("cmp %1, %0; cset %0, lo" \
- : "=&r" (flag) \
- : "r" (addr), "0" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; \
-})
-
-/*
* Test whether a block of memory is a valid user space address.
* Returns 1 if the range is valid, 0 otherwise.
*
@@ -269,15 +257,29 @@ do { \
-EFAULT; \
})
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ return __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ return __arch_copy_to_user(to, from, n);
+}
+
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ kasan_check_write(to, n);
+
if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
+ n = __arch_copy_from_user(to, from, n);
else /* security hole - plug it */
memset(to, 0, n);
return n;
@@ -285,8 +287,10 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ kasan_check_read(from, n);
+
if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
+ n = __arch_copy_to_user(to, from, n);
return n;
}
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 41e58fe3c041..e78ac26324bd 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 390
+#define __NR_compat_syscalls 394
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 5b925b761a2a..b7e8ef16ff0d 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat)
__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
#define __NR_membarrier 389
__SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 390
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 391
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 392
+__SYSCALL(__NR_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 393
+__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index de66199673d7..2b9a63771eda 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -22,6 +22,8 @@
struct vdso_data {
__u64 cs_cycle_last; /* Timebase at clocksource init */
+ __u64 raw_time_sec; /* Raw time */
+ __u64 raw_time_nsec;
__u64 xtime_clock_sec; /* Kernel time */
__u64 xtime_clock_nsec;
__u64 xtime_coarse_sec; /* Coarse time */
@@ -29,8 +31,10 @@ struct vdso_data {
__u64 wtm_clock_sec; /* Wall to monotonic time */
__u64 wtm_clock_nsec;
__u32 tb_seq_count; /* Timebase sequence counter */
- __u32 cs_mult; /* Clocksource multiplier */
- __u32 cs_shift; /* Clocksource shift */
+ /* cs_* members must be adjacent and in this order (ldp accesses) */
+ __u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */
+ __u32 cs_shift; /* Clocksource shift (mono = raw) */
+ __u32 cs_raw_mult; /* Raw clocksource multiplier */
__u32 tz_minuteswest; /* Whacky timezone stuff */
__u32 tz_dsttime;
__u32 use_syscall;
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index dcbcf8dcbefb..bbc6a8cf83f1 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -34,6 +34,11 @@
*/
#define HVC_SET_VECTORS 1
+/*
+ * HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
+ */
+#define HVC_SOFT_RESTART 2
+
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)
diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h
new file mode 100644
index 000000000000..ec154e719b11
--- /dev/null
+++ b/arch/arm64/include/asm/xen/xen-ops.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_XEN_OPS_H
+#define _ASM_XEN_OPS_H
+
+void xen_efi_runtime_setup(void);
+
+#endif /* _ASM_XEN_OPS_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 2173149d8954..14f7b651c787 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -26,8 +26,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o entry32.o \
- ../../arm/kernel/opcodes.o
+ sys_compat.o entry32.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
@@ -42,16 +41,15 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o
arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
arm64-obj-$(CONFIG_ACPI) += acpi.o
+arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
+arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
+ cpu-reset.o
-obj-y += $(arm64-obj-y) vdso/
+obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
head-y := head.o
extra-y += $(head-y) vmlinux.lds
-
-# vDSO - this must be built first to generate the symbol offsets
-$(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h
-$(obj)/vdso/vdso-offsets.h: $(obj)/vdso
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
new file mode 100644
index 000000000000..f85149cc7c71
--- /dev/null
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -0,0 +1,112 @@
+/*
+ * ACPI 5.1 based NUMA setup for ARM64
+ * Lots of code was borrowed from arch/x86/mm/srat.c
+ *
+ * Copyright 2004 Andi Kleen, SuSE Labs.
+ * Copyright (C) 2013-2016, Linaro Ltd.
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ *
+ * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
+ *
+ * Called from acpi_numa_init while reading the SRAT and SLIT tables.
+ * Assumes all memory regions belonging to a single proximity domain
+ * are in one chunk. Holes between them will be included in the node.
+ */
+
+#define pr_fmt(fmt) "ACPI: NUMA: " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/bootmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/topology.h>
+
+#include <acpi/processor.h>
+#include <asm/numa.h>
+
+static int cpus_in_srat;
+
+struct __node_cpu_hwid {
+ u32 node_id; /* logical node containing this CPU */
+ u64 cpu_hwid; /* MPIDR for this CPU */
+};
+
+static struct __node_cpu_hwid early_node_cpu_hwid[NR_CPUS] = {
+[0 ... NR_CPUS - 1] = {NUMA_NO_NODE, PHYS_CPUID_INVALID} };
+
+int acpi_numa_get_nid(unsigned int cpu, u64 hwid)
+{
+ int i;
+
+ for (i = 0; i < cpus_in_srat; i++) {
+ if (hwid == early_node_cpu_hwid[i].cpu_hwid)
+ return early_node_cpu_hwid[i].node_id;
+ }
+
+ return NUMA_NO_NODE;
+}
+
+/* Callback for Proximity Domain -> ACPI processor UID mapping */
+void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
+{
+ int pxm, node;
+ phys_cpuid_t mpidr;
+
+ if (srat_disabled())
+ return;
+
+ if (pa->header.length < sizeof(struct acpi_srat_gicc_affinity)) {
+ pr_err("SRAT: Invalid SRAT header length: %d\n",
+ pa->header.length);
+ bad_srat();
+ return;
+ }
+
+ if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
+ return;
+
+ if (cpus_in_srat >= NR_CPUS) {
+ pr_warn_once("SRAT: cpu_to_node_map[%d] is too small, may not be able to use all cpus\n",
+ NR_CPUS);
+ return;
+ }
+
+ pxm = pa->proximity_domain;
+ node = acpi_map_pxm_to_node(pxm);
+
+ if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ pr_err("SRAT: Too many proximity domains %d\n", pxm);
+ bad_srat();
+ return;
+ }
+
+ mpidr = acpi_map_madt_entry(pa->acpi_processor_uid);
+ if (mpidr == PHYS_CPUID_INVALID) {
+ pr_err("SRAT: PXM %d with ACPI ID %d has no valid MPIDR in MADT\n",
+ pxm, pa->acpi_processor_uid);
+ bad_srat();
+ return;
+ }
+
+ early_node_cpu_hwid[cpus_in_srat].node_id = node;
+ early_node_cpu_hwid[cpus_in_srat].cpu_hwid = mpidr;
+ node_set(node, numa_nodes_parsed);
+ cpus_in_srat++;
+ pr_info("SRAT: PXM %d -> MPIDR 0x%Lx -> Node %d\n",
+ pxm, mpidr, node);
+}
+
+int __init arm64_acpi_numa_init(void)
+{
+ int ret;
+
+ ret = acpi_numa_init();
+ if (ret)
+ return ret;
+
+ return srat_disabled() ? -EINVAL : 0;
+}
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 678f30b05a45..78f368039c79 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/arm-smccc.h>
+#include <linux/kprobes.h>
#include <asm/checksum.h>
@@ -34,8 +35,8 @@ EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
/* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__copy_in_user);
@@ -68,6 +69,7 @@ EXPORT_SYMBOL(test_and_change_bit);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
+NOKPROBE_SYMBOL(_mcount);
#endif
/* arm-smccc */
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index c37202c0c838..42ffdb54e162 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -121,7 +121,7 @@ static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
* 0 - If all the hooks ran successfully.
* -EINVAL - At least one hook is not supported by the CPU.
*/
-static int run_all_insn_set_hw_mode(unsigned long cpu)
+static int run_all_insn_set_hw_mode(unsigned int cpu)
{
int rc = 0;
unsigned long flags;
@@ -131,7 +131,7 @@ static int run_all_insn_set_hw_mode(unsigned long cpu)
list_for_each_entry(insn, &insn_emulation, node) {
bool enable = (insn->current_mode == INSN_HW);
if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
- pr_warn("CPU[%ld] cannot support the emulation of %s",
+ pr_warn("CPU[%u] cannot support the emulation of %s",
cpu, insn->ops->name);
rc = -EINVAL;
}
@@ -316,28 +316,6 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
*/
#define TYPE_SWPB (1 << 22)
-/*
- * Set up process info to signal segmentation fault - called on access error.
- */
-static void set_segfault(struct pt_regs *regs, unsigned long addr)
-{
- siginfo_t info;
-
- down_read(&current->mm->mmap_sem);
- if (find_vma(current->mm, addr) == NULL)
- info.si_code = SEGV_MAPERR;
- else
- info.si_code = SEGV_ACCERR;
- up_read(&current->mm->mmap_sem);
-
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_addr = (void *) instruction_pointer(regs);
-
- pr_debug("SWP{B} emulation: access caused memory abort!\n");
- arm64_notify_die("Illegal memory access", regs, &info, 0);
-}
-
static int emulate_swpX(unsigned int address, unsigned int *data,
unsigned int type)
{
@@ -366,6 +344,21 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
return res;
}
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
+{
+ u32 cc_bits = opcode >> 28;
+
+ if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+ if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
+ return ARM_OPCODE_CONDTEST_PASS;
+ else
+ return ARM_OPCODE_CONDTEST_FAIL;
+ }
+ return ARM_OPCODE_CONDTEST_UNCOND;
+}
+
/*
* swp_handler logs the id of calling process, dissects the instruction, sanity
* checks the memory location, calls emulate_swpX for the actual operation and
@@ -380,7 +373,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
type = instr & TYPE_SWPB;
- switch (arm_check_condition(instr, regs->pstate)) {
+ switch (aarch32_check_condition(instr, regs->pstate)) {
case ARM_OPCODE_CONDTEST_PASS:
break;
case ARM_OPCODE_CONDTEST_FAIL:
@@ -430,7 +423,8 @@ ret:
return 0;
fault:
- set_segfault(regs, address);
+ pr_debug("SWP{B} emulation: access caused memory abort!\n");
+ arm64_notify_segfault(regs, address);
return 0;
}
@@ -461,7 +455,7 @@ static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
{
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
- switch (arm_check_condition(instr, regs->pstate)) {
+ switch (aarch32_check_condition(instr, regs->pstate)) {
case ARM_OPCODE_CONDTEST_PASS:
break;
case ARM_OPCODE_CONDTEST_FAIL:
@@ -617,20 +611,6 @@ static struct insn_emulation_ops setend_ops = {
.set_hw_mode = setend_set_hw_mode,
};
-static int insn_cpu_hotplug_notify(struct notifier_block *b,
- unsigned long action, void *hcpu)
-{
- int rc = 0;
- if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
- rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
-
- return notifier_from_errno(rc);
-}
-
-static struct notifier_block insn_cpu_hotplug_notifier = {
- .notifier_call = insn_cpu_hotplug_notify,
-};
-
/*
* Invoked as late_initcall, since not needed before init spawned.
*/
@@ -649,7 +629,9 @@ static int __init armv8_deprecated_init(void)
pr_info("setend instruction emulation is not supported on the system");
}
- register_cpu_notifier(&insn_cpu_hotplug_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
+ "AP_ARM64_ISNDEP_STARTING",
+ run_all_insn_set_hw_mode, NULL);
register_insn_emulation_sysctl(ctl_abi);
return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index f8e5d47f0880..05070b72fc28 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -51,6 +51,17 @@ int main(void)
DEFINE(S_X5, offsetof(struct pt_regs, regs[5]));
DEFINE(S_X6, offsetof(struct pt_regs, regs[6]));
DEFINE(S_X7, offsetof(struct pt_regs, regs[7]));
+ DEFINE(S_X8, offsetof(struct pt_regs, regs[8]));
+ DEFINE(S_X10, offsetof(struct pt_regs, regs[10]));
+ DEFINE(S_X12, offsetof(struct pt_regs, regs[12]));
+ DEFINE(S_X14, offsetof(struct pt_regs, regs[14]));
+ DEFINE(S_X16, offsetof(struct pt_regs, regs[16]));
+ DEFINE(S_X18, offsetof(struct pt_regs, regs[18]));
+ DEFINE(S_X20, offsetof(struct pt_regs, regs[20]));
+ DEFINE(S_X22, offsetof(struct pt_regs, regs[22]));
+ DEFINE(S_X24, offsetof(struct pt_regs, regs[24]));
+ DEFINE(S_X26, offsetof(struct pt_regs, regs[26]));
+ DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
DEFINE(S_SP, offsetof(struct pt_regs, sp));
#ifdef CONFIG_COMPAT
@@ -60,6 +71,7 @@ int main(void)
DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
+ DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
@@ -77,6 +89,7 @@ int main(void)
BLANK();
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
@@ -84,6 +97,8 @@ int main(void)
DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
BLANK();
DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last));
+ DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec));
+ DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec));
DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec));
DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
@@ -91,7 +106,8 @@ int main(void)
DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec));
DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec));
DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count));
- DEFINE(VDSO_CS_MULT, offsetof(struct vdso_data, cs_mult));
+ DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult));
+ DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult));
DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift));
DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
new file mode 100644
index 000000000000..65f42d257414
--- /dev/null
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -0,0 +1,54 @@
+/*
+ * CPU reset routines
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Huawei Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/sysreg.h>
+#include <asm/virt.h>
+
+.text
+.pushsection .idmap.text, "ax"
+
+/*
+ * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
+ * cpu_soft_restart.
+ *
+ * @el2_switch: Flag to indicate a swich to EL2 is needed.
+ * @entry: Location to jump to for soft reset.
+ * arg0: First argument passed to @entry.
+ * arg1: Second argument passed to @entry.
+ * arg2: Third argument passed to @entry.
+ *
+ * Put the CPU into the same state as it would be if it had been reset, and
+ * branch to what would be the reset vector. It must be executed with the
+ * flat identity mapping.
+ */
+ENTRY(__cpu_soft_restart)
+ /* Clear sctlr_el1 flags. */
+ mrs x12, sctlr_el1
+ ldr x13, =SCTLR_ELx_FLAGS
+ bic x12, x12, x13
+ msr sctlr_el1, x12
+ isb
+
+ cbz x0, 1f // el2_switch?
+ mov x0, #HVC_SOFT_RESTART
+ hvc #0 // no return
+
+1: mov x18, x1 // entry
+ mov x0, x2 // arg0
+ mov x1, x3 // arg1
+ mov x2, x4 // arg2
+ br x18
+ENDPROC(__cpu_soft_restart)
+
+.popsection
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
new file mode 100644
index 000000000000..d4e9ecb264f0
--- /dev/null
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -0,0 +1,34 @@
+/*
+ * CPU reset routines
+ *
+ * Copyright (C) 2015 Huawei Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARM64_CPU_RESET_H
+#define _ARM64_CPU_RESET_H
+
+#include <asm/virt.h>
+
+void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
+ unsigned long arg0, unsigned long arg1, unsigned long arg2);
+
+static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
+ unsigned long entry, unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
+{
+ typeof(__cpu_soft_restart) *restart;
+
+ el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
+ is_hyp_mode_available();
+ restart = (void *)virt_to_phys(__cpu_soft_restart);
+
+ cpu_install_idmap();
+ restart(el2_switch, entry, arg0, arg1, arg2);
+ unreachable();
+}
+
+#endif
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index d42789499f17..82b0fc2e637b 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -46,6 +46,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.desc = "ARM errata 826319, 827319, 824069",
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
+ .enable = cpu_enable_cache_maint_trap,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_819472
@@ -54,6 +55,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.desc = "ARM errata 819472",
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
+ .enable = cpu_enable_cache_maint_trap,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_832075
@@ -98,6 +100,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_RANGE(MIDR_THUNDERX, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 1),
},
+ {
+ /* Cavium ThunderX, T81 pass 1.0 */
+ .desc = "Cavium erratum 27456",
+ .capability = ARM64_WORKAROUND_CAVIUM_27456,
+ MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
+ },
#endif
{
}
@@ -127,3 +135,8 @@ void check_local_cpu_errata(void)
{
update_cpu_capabilities(arm64_errata, "enabling workaround for");
}
+
+void __init enable_errata_workarounds(void)
+{
+ enable_cpu_capabilities(arm64_errata);
+}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 811773d1c1d0..916d27ad79c1 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -913,8 +913,7 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void __init
-enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++)
if (caps->enable && cpus_have_cap(caps->capability))
@@ -1036,6 +1035,7 @@ void __init setup_cpu_features(void)
/* Set the CPU feature capabilies */
setup_feature_capabilities();
+ enable_errata_workarounds();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index e11857fce05f..75a0f8acef66 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -9,13 +9,16 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
-int __init arm_cpuidle_init(unsigned int cpu)
+int arm_cpuidle_init(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
@@ -39,3 +42,18 @@ int arm_cpuidle_suspend(int index)
return cpu_ops[cpu]->cpu_suspend(index);
}
+
+#ifdef CONFIG_ACPI
+
+#include <acpi/processor.h>
+
+int acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return arm_cpuidle_init(cpu);
+}
+
+int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+{
+ return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
+}
+#endif
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 3808470486f3..ed1b84fe6925 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -22,6 +22,8 @@
#include <linux/bitops.h>
#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/personality.h>
@@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
static int c_show(struct seq_file *m, void *v)
{
int i, j;
+ bool compat = personality(current->personality) == PER_LINUX32;
for_each_online_cpu(i) {
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
@@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
* "processor". Give glibc what it expects.
*/
seq_printf(m, "processor\t: %d\n", i);
+ if (compat)
+ seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+ MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000UL/HZ),
@@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
* software which does already (at least for 32-bit).
*/
seq_puts(m, "Features\t:");
- if (personality(current->personality) == PER_LINUX32) {
+ if (compat) {
#ifdef CONFIG_COMPAT
for (j = 0; compat_hwcap_str[j]; j++)
if (compat_elf_hwcap & (1 << j))
@@ -177,6 +183,123 @@ const struct seq_operations cpuinfo_op = {
.show = c_show
};
+
+static struct kobj_type cpuregs_kobj_type = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/*
+ * The ARM ARM uses the phrase "32-bit register" to describe a register
+ * whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however
+ * no statement is made as to whether the upper 32 bits will or will not
+ * be made use of in future, and between ARM DDI 0487A.c and ARM DDI
+ * 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit.
+ *
+ * Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit
+ * registers, we expose them both as 64 bit values to cater for possible
+ * future expansion without an ABI break.
+ */
+#define kobj_to_cpuinfo(kobj) container_of(kobj, struct cpuinfo_arm64, kobj)
+#define CPUREGS_ATTR_RO(_name, _field) \
+ static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \
+ \
+ if (info->reg_midr) \
+ return sprintf(buf, "0x%016x\n", info->reg_##_field); \
+ else \
+ return 0; \
+ } \
+ static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name)
+
+CPUREGS_ATTR_RO(midr_el1, midr);
+CPUREGS_ATTR_RO(revidr_el1, revidr);
+
+static struct attribute *cpuregs_id_attrs[] = {
+ &cpuregs_attr_midr_el1.attr,
+ &cpuregs_attr_revidr_el1.attr,
+ NULL
+};
+
+static struct attribute_group cpuregs_attr_group = {
+ .attrs = cpuregs_id_attrs,
+ .name = "identification"
+};
+
+static int cpuid_add_regs(int cpu)
+{
+ int rc;
+ struct device *dev;
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ rc = -ENODEV;
+ goto out;
+ }
+ rc = kobject_add(&info->kobj, &dev->kobj, "regs");
+ if (rc)
+ goto out;
+ rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group);
+ if (rc)
+ kobject_del(&info->kobj);
+out:
+ return rc;
+}
+
+static int cpuid_remove_regs(int cpu)
+{
+ struct device *dev;
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+
+ dev = get_cpu_device(cpu);
+ if (!dev)
+ return -ENODEV;
+ if (info->kobj.parent) {
+ sysfs_remove_group(&info->kobj, &cpuregs_attr_group);
+ kobject_del(&info->kobj);
+ }
+
+ return 0;
+}
+
+static int cpuid_callback(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ int rc = 0;
+ unsigned long cpu = (unsigned long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ rc = cpuid_add_regs(cpu);
+ break;
+ case CPU_DEAD:
+ rc = cpuid_remove_regs(cpu);
+ break;
+ }
+
+ return notifier_from_errno(rc);
+}
+
+static int __init cpuinfo_regs_init(void)
+{
+ int cpu;
+
+ cpu_notifier_register_begin();
+
+ for_each_possible_cpu(cpu) {
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+
+ kobject_init(&info->kobj, &cpuregs_kobj_type);
+ if (cpu_online(cpu))
+ cpuid_add_regs(cpu);
+ }
+ __hotcpu_notifier(cpuid_callback, 0);
+
+ cpu_notifier_register_done();
+ return 0;
+}
static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
{
unsigned int cpu = smp_processor_id();
@@ -206,6 +329,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_ctr = read_cpuid_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
+ info->reg_revidr = read_cpuid(REVIDR_EL1);
info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
@@ -258,3 +382,5 @@ void __init cpuinfo_store_boot_cpu(void)
boot_cpu_data = *info;
init_cpu_features(&boot_cpu_data);
}
+
+device_initcall(cpuinfo_regs_init);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 4fbf3c54275c..91fff48d0f57 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -23,6 +23,7 @@
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/ptrace.h>
+#include <linux/kprobes.h>
#include <linux/stat.h>
#include <linux/uaccess.h>
@@ -48,6 +49,7 @@ static void mdscr_write(u32 mdscr)
asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
local_dbg_restore(flags);
}
+NOKPROBE_SYMBOL(mdscr_write);
static u32 mdscr_read(void)
{
@@ -55,6 +57,7 @@ static u32 mdscr_read(void)
asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
return mdscr;
}
+NOKPROBE_SYMBOL(mdscr_read);
/*
* Allow root to disable self-hosted debug from userspace.
@@ -103,6 +106,7 @@ void enable_debug_monitors(enum dbg_active_el el)
mdscr_write(mdscr);
}
}
+NOKPROBE_SYMBOL(enable_debug_monitors);
void disable_debug_monitors(enum dbg_active_el el)
{
@@ -123,6 +127,7 @@ void disable_debug_monitors(enum dbg_active_el el)
mdscr_write(mdscr);
}
}
+NOKPROBE_SYMBOL(disable_debug_monitors);
/*
* OS lock clearing.
@@ -151,7 +156,6 @@ static int debug_monitors_init(void)
/* Clear the OS lock. */
on_each_cpu(clear_os_lock, NULL, 1);
isb();
- local_dbg_enable();
/* Register hotplug handler. */
__register_cpu_notifier(&os_lock_nb);
@@ -166,22 +170,15 @@ postcore_initcall(debug_monitors_init);
*/
static void set_regs_spsr_ss(struct pt_regs *regs)
{
- unsigned long spsr;
-
- spsr = regs->pstate;
- spsr &= ~DBG_SPSR_SS;
- spsr |= DBG_SPSR_SS;
- regs->pstate = spsr;
+ regs->pstate |= DBG_SPSR_SS;
}
+NOKPROBE_SYMBOL(set_regs_spsr_ss);
static void clear_regs_spsr_ss(struct pt_regs *regs)
{
- unsigned long spsr;
-
- spsr = regs->pstate;
- spsr &= ~DBG_SPSR_SS;
- regs->pstate = spsr;
+ regs->pstate &= ~DBG_SPSR_SS;
}
+NOKPROBE_SYMBOL(clear_regs_spsr_ss);
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
@@ -225,6 +222,7 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
return retval;
}
+NOKPROBE_SYMBOL(call_step_hook);
static void send_user_sigtrap(int si_code)
{
@@ -266,6 +264,10 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
*/
user_rewind_single_step(current);
} else {
+#ifdef CONFIG_KPROBES
+ if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+#endif
if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
return 0;
@@ -279,6 +281,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
return 0;
}
+NOKPROBE_SYMBOL(single_step_handler);
/*
* Breakpoint handler is re-entrant as another breakpoint can
@@ -316,19 +319,28 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
+NOKPROBE_SYMBOL(call_break_hook);
static int brk_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
if (user_mode(regs)) {
send_user_sigtrap(TRAP_BRKPT);
- } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
- pr_warning("Unexpected kernel BRK exception at EL1\n");
+ }
+#ifdef CONFIG_KPROBES
+ else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
+ if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED)
+ return -EFAULT;
+ }
+#endif
+ else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+ pr_warn("Unexpected kernel BRK exception at EL1\n");
return -EFAULT;
}
return 0;
}
+NOKPROBE_SYMBOL(brk_handler);
int aarch32_break_handler(struct pt_regs *regs)
{
@@ -365,6 +377,7 @@ int aarch32_break_handler(struct pt_regs *regs)
send_user_sigtrap(TRAP_BRKPT);
return 0;
}
+NOKPROBE_SYMBOL(aarch32_break_handler);
static int __init debug_traps_init(void)
{
@@ -386,6 +399,7 @@ void user_rewind_single_step(struct task_struct *task)
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task));
}
+NOKPROBE_SYMBOL(user_rewind_single_step);
void user_fastforward_single_step(struct task_struct *task)
{
@@ -401,6 +415,7 @@ void kernel_enable_single_step(struct pt_regs *regs)
mdscr_write(mdscr_read() | DBG_MDSCR_SS);
enable_debug_monitors(DBG_ACTIVE_EL1);
}
+NOKPROBE_SYMBOL(kernel_enable_single_step);
void kernel_disable_single_step(void)
{
@@ -408,12 +423,14 @@ void kernel_disable_single_step(void)
mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
disable_debug_monitors(DBG_ACTIVE_EL1);
}
+NOKPROBE_SYMBOL(kernel_disable_single_step);
int kernel_active_single_step(void)
{
WARN_ON(!irqs_disabled());
return mdscr_read() & DBG_MDSCR_SS;
}
+NOKPROBE_SYMBOL(kernel_active_single_step);
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
@@ -421,8 +438,10 @@ void user_enable_single_step(struct task_struct *task)
set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
set_regs_spsr_ss(task_pt_regs(task));
}
+NOKPROBE_SYMBOL(user_enable_single_step);
void user_disable_single_step(struct task_struct *task)
{
clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
}
+NOKPROBE_SYMBOL(user_disable_single_step);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 78f52488f9ff..ba9bee389fd5 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -62,13 +62,61 @@ struct screen_info screen_info __section(.data);
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
pteval_t prot_val = create_mapping_protection(md);
+ bool allow_block_mappings = (md->type != EFI_RUNTIME_SERVICES_CODE &&
+ md->type != EFI_RUNTIME_SERVICES_DATA);
+
+ if (!PAGE_ALIGNED(md->phys_addr) ||
+ !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
+ /*
+ * If the end address of this region is not aligned to page
+ * size, the mapping is rounded up, and may end up sharing a
+ * page frame with the next UEFI memory region. If we create
+ * a block entry now, we may need to split it again when mapping
+ * the next region, and support for that is going to be removed
+ * from the MMU routines. So avoid block mappings altogether in
+ * that case.
+ */
+ allow_block_mappings = false;
+ }
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
- __pgprot(prot_val | PTE_NG));
+ __pgprot(prot_val | PTE_NG), allow_block_mappings);
+ return 0;
+}
+
+static int __init set_permissions(pte_t *ptep, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ efi_memory_desc_t *md = data;
+ pte_t pte = *ptep;
+
+ if (md->attribute & EFI_MEMORY_RO)
+ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+ if (md->attribute & EFI_MEMORY_XP)
+ pte = set_pte_bit(pte, __pgprot(PTE_PXN));
+ set_pte(ptep, pte);
return 0;
}
+int __init efi_set_mapping_permissions(struct mm_struct *mm,
+ efi_memory_desc_t *md)
+{
+ BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
+ md->type != EFI_RUNTIME_SERVICES_DATA);
+
+ /*
+ * Calling apply_to_page_range() is only safe on regions that are
+ * guaranteed to be mapped down to pages. Since we are only called
+ * for regions that have been mapped using efi_create_mapping() above
+ * (and this is checked by the generic Memory Attributes table parsing
+ * routines), there is no need to check that again here.
+ */
+ return apply_to_page_range(mm, md->virt_addr,
+ md->num_pages << EFI_PAGE_SHIFT,
+ set_permissions, md);
+}
+
static int __init arm64_dmi_init(void)
{
/*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 12e8d2bcb3f9..96e4a2b64cc1 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,6 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
+#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
@@ -97,7 +98,14 @@
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
- .endif
+ get_thread_info tsk
+ /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+ ldr x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [sp, #S_ORIG_ADDR_LIMIT]
+ mov x20, #TASK_SIZE_64
+ str x20, [tsk, #TI_ADDR_LIMIT]
+ ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
+ .endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
@@ -128,6 +136,14 @@
.endm
.macro kernel_exit, el
+ .if \el != 0
+ /* Restore the task's original addr_limit. */
+ ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
+ str x20, [tsk, #TI_ADDR_LIMIT]
+
+ /* No need to restore UAO, it will be restored from SPSR_EL1 */
+ .endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
@@ -242,6 +258,7 @@ tsk .req x28 // current thread_info
/*
* Exception vectors.
*/
+ .pushsection ".entry.text", "ax"
.align 11
ENTRY(vectors)
@@ -406,7 +423,6 @@ el1_irq:
bl trace_hardirqs_off
#endif
- get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT
@@ -451,7 +467,7 @@ el0_sync:
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
- b.eq el0_undef
+ b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
@@ -532,7 +548,7 @@ el0_ia:
enable_dbg_and_irq
ct_user_exit
mov x0, x26
- orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
+ mov x1, x25
mov x2, sp
bl do_mem_abort
b ret_to_user
@@ -579,6 +595,16 @@ el0_undef:
mov x0, sp
bl do_undefinstr
b ret_to_user
+el0_sys:
+ /*
+ * System instructions, for trapped cache maintenance instructions
+ */
+ enable_dbg_and_irq
+ ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_sysinstr
+ b ret_to_user
el0_dbg:
/*
* Debug exception handling
@@ -774,6 +800,8 @@ __ni_sys_trace:
bl do_ni_syscall
b __sys_trace_return
+ .popsection // .entry.text
+
/*
* Special system call wrappers.
*/
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index f8df75d740f4..21ab5df9fa76 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -33,6 +33,7 @@
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
+#include <asm/smp.h>
#include <asm/suspend.h>
#include <asm/virt.h>
@@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
unsigned long flags;
struct sleep_stack_data state;
+ if (cpus_are_stuck_in_kernel()) {
+ pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
+ return -EBUSY;
+ }
+
local_dbg_save(flags);
if (__cpu_suspend_enter(&state)) {
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index ce21aa88263f..26a6bf77d272 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -24,6 +24,7 @@
#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
+#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
@@ -127,6 +128,7 @@ static u64 read_wb_reg(int reg, int n)
return val;
}
+NOKPROBE_SYMBOL(read_wb_reg);
static void write_wb_reg(int reg, int n, u64 val)
{
@@ -140,6 +142,7 @@ static void write_wb_reg(int reg, int n, u64 val)
}
isb();
}
+NOKPROBE_SYMBOL(write_wb_reg);
/*
* Convert a breakpoint privilege level to the corresponding exception
@@ -157,6 +160,7 @@ static enum dbg_active_el debug_exception_level(int privilege)
return -EINVAL;
}
}
+NOKPROBE_SYMBOL(debug_exception_level);
enum hw_breakpoint_ops {
HW_BREAKPOINT_INSTALL,
@@ -575,6 +579,7 @@ static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
write_wb_reg(reg, i, ctrl);
}
}
+NOKPROBE_SYMBOL(toggle_bp_registers);
/*
* Debug exception handlers.
@@ -654,6 +659,7 @@ unlock:
return 0;
}
+NOKPROBE_SYMBOL(breakpoint_handler);
static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
@@ -756,6 +762,7 @@ unlock:
return 0;
}
+NOKPROBE_SYMBOL(watchpoint_handler);
/*
* Handle single-step exception.
@@ -813,6 +820,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
return !handled_exception;
}
+NOKPROBE_SYMBOL(reinstall_suspended_bps);
/*
* Context-switcher for restoring suspended breakpoints.
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 8727f4490772..d3b5f75e652e 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -71,8 +71,16 @@ el1_sync:
msr vbar_el2, x1
b 9f
+2: cmp x0, #HVC_SOFT_RESTART
+ b.ne 3f
+ mov x0, x2
+ mov x2, x4
+ mov x4, x1
+ mov x1, x3
+ br x4 // no return
+
/* Someone called kvm_call_hyp() against the hyp-stub... */
-2: mov x0, #ARM_EXCEPTION_HYP_GONE
+3: mov x0, #ARM_EXCEPTION_HYP_GONE
9: eret
ENDPROC(el1_sync)
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 368c08290dd8..63f9432d05e8 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -30,6 +30,7 @@
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
#include <asm/fixmap.h>
+#include <asm/opcodes.h>
#include <asm/insn.h>
#define AARCH64_INSN_SF_BIT BIT(31)
@@ -162,6 +163,32 @@ static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
aarch64_insn_is_nop(insn);
}
+bool __kprobes aarch64_insn_uses_literal(u32 insn)
+{
+ /* ldr/ldrsw (literal), prfm */
+
+ return aarch64_insn_is_ldr_lit(insn) ||
+ aarch64_insn_is_ldrsw_lit(insn) ||
+ aarch64_insn_is_adr_adrp(insn) ||
+ aarch64_insn_is_prfm_lit(insn);
+}
+
+bool __kprobes aarch64_insn_is_branch(u32 insn)
+{
+ /* b, bl, cb*, tb*, b.cond, br, blr */
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_cbz(insn) ||
+ aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_tbz(insn) ||
+ aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_ret(insn) ||
+ aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_bcond(insn);
+}
+
/*
* ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
* Section B2.6.5 "Concurrent modification and execution of instructions":
@@ -1175,6 +1202,14 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset)
BUG();
}
+/*
+ * Extract the Op/CR data from a msr/mrs instruction.
+ */
+u32 aarch64_insn_extract_system_reg(u32 insn)
+{
+ return (insn & 0x1FFFE0) >> 5;
+}
+
bool aarch32_insn_is_wide(u32 insn)
{
return insn >= 0xe800;
@@ -1200,3 +1235,101 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn)
{
return insn & CRM_MASK;
}
+
+static bool __kprobes __check_eq(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) != 0;
+}
+
+static bool __kprobes __check_ne(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) == 0;
+}
+
+static bool __kprobes __check_cs(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_cc(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_mi(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_pl(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_vs(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) != 0;
+}
+
+static bool __kprobes __check_vc(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) == 0;
+}
+
+static bool __kprobes __check_hi(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_ls(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_ge(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_lt(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_gt(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_le(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_al(unsigned long pstate)
+{
+ return true;
+}
+
+/*
+ * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
+ * it behaves identically to 0b1110 ("al").
+ */
+pstate_check_t * const aarch32_opcode_cond_checks[16] = {
+ __check_eq, __check_ne, __check_cs, __check_cc,
+ __check_mi, __check_pl, __check_vs, __check_vc,
+ __check_hi, __check_ls, __check_ge, __check_lt,
+ __check_gt, __check_le, __check_al, __check_al
+};
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index b67531a13136..8c57f6496e56 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/kprobes.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@@ -58,7 +59,17 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "x30", 8, offsetof(struct pt_regs, regs[30])},
{ "sp", 8, offsetof(struct pt_regs, sp)},
{ "pc", 8, offsetof(struct pt_regs, pc)},
- { "pstate", 8, offsetof(struct pt_regs, pstate)},
+ /*
+ * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
+ * protocol disagrees. Therefore we must extract only the lower
+ * 32-bits. Look for the big comment in asm/kgdb.h for more
+ * detail.
+ */
+ { "pstate", 4, offsetof(struct pt_regs, pstate)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ + 4
+#endif
+ },
{ "v0", 16, -1 },
{ "v1", 16, -1 },
{ "v2", 16, -1 },
@@ -128,6 +139,8 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
memset((char *)gdb_regs, 0, NUMREGBYTES);
thread_regs = task_pt_regs(task);
memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+ /* Special case for PSTATE (check comments in asm/kgdb.h for details) */
+ dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs);
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
@@ -218,6 +231,7 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return 0;
}
+NOKPROBE_SYMBOL(kgdb_brk_fn)
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
{
@@ -226,12 +240,14 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
return 0;
}
+NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
{
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return 0;
}
+NOKPROBE_SYMBOL(kgdb_step_brk_fn);
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
new file mode 100644
index 000000000000..bc96c8a7fc79
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -0,0 +1,212 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/mmu_context.h>
+
+#include "cpu-reset.h"
+
+/* Global variables for the arm64_relocate_new_kernel routine. */
+extern const unsigned char arm64_relocate_new_kernel[];
+extern const unsigned long arm64_relocate_new_kernel_size;
+
+static unsigned long kimage_start;
+
+/**
+ * kexec_image_info - For debugging output.
+ */
+#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
+static void _kexec_image_info(const char *func, int line,
+ const struct kimage *kimage)
+{
+ unsigned long i;
+
+ pr_debug("%s:%d:\n", func, line);
+ pr_debug(" kexec kimage info:\n");
+ pr_debug(" type: %d\n", kimage->type);
+ pr_debug(" start: %lx\n", kimage->start);
+ pr_debug(" head: %lx\n", kimage->head);
+ pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
+
+ for (i = 0; i < kimage->nr_segments; i++) {
+ pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
+ i,
+ kimage->segment[i].mem,
+ kimage->segment[i].mem + kimage->segment[i].memsz,
+ kimage->segment[i].memsz,
+ kimage->segment[i].memsz / PAGE_SIZE);
+ }
+}
+
+void machine_kexec_cleanup(struct kimage *kimage)
+{
+ /* Empty routine needed to avoid build errors. */
+}
+
+/**
+ * machine_kexec_prepare - Prepare for a kexec reboot.
+ *
+ * Called from the core kexec code when a kernel image is loaded.
+ * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
+ * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
+ */
+int machine_kexec_prepare(struct kimage *kimage)
+{
+ kimage_start = kimage->start;
+
+ kexec_image_info(kimage);
+
+ if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
+ pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
+ */
+static void kexec_list_flush(struct kimage *kimage)
+{
+ kimage_entry_t *entry;
+
+ for (entry = &kimage->head; ; entry++) {
+ unsigned int flag;
+ void *addr;
+
+ /* flush the list entries. */
+ __flush_dcache_area(entry, sizeof(kimage_entry_t));
+
+ flag = *entry & IND_FLAGS;
+ if (flag == IND_DONE)
+ break;
+
+ addr = phys_to_virt(*entry & PAGE_MASK);
+
+ switch (flag) {
+ case IND_INDIRECTION:
+ /* Set entry point just before the new list page. */
+ entry = (kimage_entry_t *)addr - 1;
+ break;
+ case IND_SOURCE:
+ /* flush the source pages. */
+ __flush_dcache_area(addr, PAGE_SIZE);
+ break;
+ case IND_DESTINATION:
+ break;
+ default:
+ BUG();
+ }
+ }
+}
+
+/**
+ * kexec_segment_flush - Helper to flush the kimage segments to PoC.
+ */
+static void kexec_segment_flush(const struct kimage *kimage)
+{
+ unsigned long i;
+
+ pr_debug("%s:\n", __func__);
+
+ for (i = 0; i < kimage->nr_segments; i++) {
+ pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
+ i,
+ kimage->segment[i].mem,
+ kimage->segment[i].mem + kimage->segment[i].memsz,
+ kimage->segment[i].memsz,
+ kimage->segment[i].memsz / PAGE_SIZE);
+
+ __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
+ kimage->segment[i].memsz);
+ }
+}
+
+/**
+ * machine_kexec - Do the kexec reboot.
+ *
+ * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
+ */
+void machine_kexec(struct kimage *kimage)
+{
+ phys_addr_t reboot_code_buffer_phys;
+ void *reboot_code_buffer;
+
+ /*
+ * New cpus may have become stuck_in_kernel after we loaded the image.
+ */
+ BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1));
+
+ reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
+ reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
+
+ kexec_image_info(kimage);
+
+ pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__,
+ kimage->control_code_page);
+ pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__,
+ &reboot_code_buffer_phys);
+ pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__,
+ reboot_code_buffer);
+ pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__,
+ arm64_relocate_new_kernel);
+ pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
+ __func__, __LINE__, arm64_relocate_new_kernel_size,
+ arm64_relocate_new_kernel_size);
+
+ /*
+ * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
+ * after the kernel is shut down.
+ */
+ memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
+ arm64_relocate_new_kernel_size);
+
+ /* Flush the reboot_code_buffer in preparation for its execution. */
+ __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
+ flush_icache_range((uintptr_t)reboot_code_buffer,
+ arm64_relocate_new_kernel_size);
+
+ /* Flush the kimage list and its buffers. */
+ kexec_list_flush(kimage);
+
+ /* Flush the new image if already in place. */
+ if (kimage->head & IND_DONE)
+ kexec_segment_flush(kimage);
+
+ pr_info("Bye!\n");
+
+ /* Disable all DAIF exceptions. */
+ asm volatile ("msr daifset, #0xf" : : : "memory");
+
+ /*
+ * cpu_soft_restart will shutdown the MMU, disable data caches, then
+ * transfer control to the reboot_code_buffer which contains a copy of
+ * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
+ * uses physical addressing to relocate the new image to its final
+ * position and transfers control to the image entry point when the
+ * relocation is complete.
+ */
+
+ cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head,
+ kimage_start, 0);
+
+ BUG(); /* Should never get here. */
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ /* Empty routine needed to avoid build errors. */
+}
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
new file mode 100644
index 000000000000..ce06312e3d34
--- /dev/null
+++ b/arch/arm64/kernel/probes/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
+ kprobes_trampoline.o \
+ simulate-insn.o
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
new file mode 100644
index 000000000000..37e47a9d617e
--- /dev/null
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -0,0 +1,174 @@
+/*
+ * arch/arm64/kernel/probes/decode-insn.c
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <asm/kprobes.h>
+#include <asm/insn.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+static bool __kprobes aarch64_insn_is_steppable(u32 insn)
+{
+ /*
+ * Branch instructions will write a new value into the PC which is
+ * likely to be relative to the XOL address and therefore invalid.
+ * Deliberate generation of an exception during stepping is also not
+ * currently safe. Lastly, MSR instructions can do any number of nasty
+ * things we can't handle during single-stepping.
+ */
+ if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
+ if (aarch64_insn_is_branch(insn) ||
+ aarch64_insn_is_msr_imm(insn) ||
+ aarch64_insn_is_msr_reg(insn) ||
+ aarch64_insn_is_exception(insn) ||
+ aarch64_insn_is_eret(insn))
+ return false;
+
+ /*
+ * The MRS instruction may not return a correct value when
+ * executing in the single-stepping environment. We do make one
+ * exception, for reading the DAIF bits.
+ */
+ if (aarch64_insn_is_mrs(insn))
+ return aarch64_insn_extract_system_reg(insn)
+ != AARCH64_INSN_SPCLREG_DAIF;
+
+ /*
+ * The HINT instruction is is problematic when single-stepping,
+ * except for the NOP case.
+ */
+ if (aarch64_insn_is_hint(insn))
+ return aarch64_insn_is_nop(insn);
+
+ return true;
+ }
+
+ /*
+ * Instructions which load PC relative literals are not going to work
+ * when executed from an XOL slot. Instructions doing an exclusive
+ * load/store are not going to complete successfully when single-step
+ * exception handling happens in the middle of the sequence.
+ */
+ if (aarch64_insn_uses_literal(insn) ||
+ aarch64_insn_is_exclusive(insn))
+ return false;
+
+ return true;
+}
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD If instruction is supported and uses instruction slot,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ */
+static enum kprobe_insn __kprobes
+arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /*
+ * Instructions reading or modifying the PC won't work from the XOL
+ * slot.
+ */
+ if (aarch64_insn_is_steppable(insn))
+ return INSN_GOOD;
+
+ if (aarch64_insn_is_bcond(insn)) {
+ asi->handler = simulate_b_cond;
+ } else if (aarch64_insn_is_cbz(insn) ||
+ aarch64_insn_is_cbnz(insn)) {
+ asi->handler = simulate_cbz_cbnz;
+ } else if (aarch64_insn_is_tbz(insn) ||
+ aarch64_insn_is_tbnz(insn)) {
+ asi->handler = simulate_tbz_tbnz;
+ } else if (aarch64_insn_is_adr_adrp(insn)) {
+ asi->handler = simulate_adr_adrp;
+ } else if (aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn)) {
+ asi->handler = simulate_b_bl;
+ } else if (aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_ret(insn)) {
+ asi->handler = simulate_br_blr_ret;
+ } else if (aarch64_insn_is_ldr_lit(insn)) {
+ asi->handler = simulate_ldr_literal;
+ } else if (aarch64_insn_is_ldrsw_lit(insn)) {
+ asi->handler = simulate_ldrsw_literal;
+ } else {
+ /*
+ * Instruction cannot be stepped out-of-line and we don't
+ * (yet) simulate it.
+ */
+ return INSN_REJECTED;
+ }
+
+ return INSN_GOOD_NO_SLOT;
+}
+
+static bool __kprobes
+is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
+{
+ while (scan_start > scan_end) {
+ /*
+ * atomic region starts from exclusive load and ends with
+ * exclusive store.
+ */
+ if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
+ return false;
+ else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
+ return true;
+ scan_start--;
+ }
+
+ return false;
+}
+
+enum kprobe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
+{
+ enum kprobe_insn decoded;
+ kprobe_opcode_t insn = le32_to_cpu(*addr);
+ kprobe_opcode_t *scan_start = addr - 1;
+ kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
+#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+ struct module *mod;
+#endif
+
+ if (addr >= (kprobe_opcode_t *)_text &&
+ scan_end < (kprobe_opcode_t *)_text)
+ scan_end = (kprobe_opcode_t *)_text;
+#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+ else {
+ preempt_disable();
+ mod = __module_address((unsigned long)addr);
+ if (mod && within_module_init((unsigned long)addr, mod) &&
+ !within_module_init((unsigned long)scan_end, mod))
+ scan_end = (kprobe_opcode_t *)mod->init_layout.base;
+ else if (mod && within_module_core((unsigned long)addr, mod) &&
+ !within_module_core((unsigned long)scan_end, mod))
+ scan_end = (kprobe_opcode_t *)mod->core_layout.base;
+ preempt_enable();
+ }
+#endif
+ decoded = arm_probe_decode_insn(insn, asi);
+
+ if (decoded == INSN_REJECTED ||
+ is_probed_address_atomic(scan_start, scan_end))
+ return INSN_REJECTED;
+
+ return decoded;
+}
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
new file mode 100644
index 000000000000..d438289646a6
--- /dev/null
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm64/kernel/probes/decode-insn.h
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_ARM64_H
+#define _ARM_KERNEL_KPROBES_ARM64_H
+
+/*
+ * ARM strongly recommends a limit of 128 bytes between LoadExcl and
+ * StoreExcl instructions in a single thread of execution. So keep the
+ * max atomic context size as 32.
+ */
+#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t))
+
+enum kprobe_insn {
+ INSN_REJECTED,
+ INSN_GOOD_NO_SLOT,
+ INSN_GOOD,
+};
+
+enum kprobe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
+
+#endif /* _ARM_KERNEL_KPROBES_ARM64_H */
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
new file mode 100644
index 000000000000..bf9768588288
--- /dev/null
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -0,0 +1,686 @@
+/*
+ * arch/arm64/kernel/probes/kprobes.c
+ *
+ * Kprobes support for ARM64
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/stringify.h>
+#include <asm/traps.h>
+#include <asm/ptrace.h>
+#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
+#include <asm/system_misc.h>
+#include <asm/insn.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm-generic/sections.h>
+
+#include "decode-insn.h"
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+
+static inline unsigned long min_stack_size(unsigned long addr)
+{
+ unsigned long size;
+
+ if (on_irq_stack(addr, raw_smp_processor_id()))
+ size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
+ else
+ size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
+
+ return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
+}
+
+static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+{
+ /* prepare insn slot */
+ p->ainsn.insn[0] = cpu_to_le32(p->opcode);
+
+ flush_icache_range((uintptr_t) (p->ainsn.insn),
+ (uintptr_t) (p->ainsn.insn) +
+ MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+
+ /*
+ * Needs restoring of return address after stepping xol.
+ */
+ p->ainsn.restore = (unsigned long) p->addr +
+ sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes arch_prepare_simulate(struct kprobe *p)
+{
+ /* This instructions is not executed xol. No need to adjust the PC */
+ p->ainsn.restore = 0;
+}
+
+static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (p->ainsn.handler)
+ p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
+
+ /* single step simulated, now go for post processing */
+ post_kprobe_handler(kcb, regs);
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long probe_addr = (unsigned long)p->addr;
+ extern char __start_rodata[];
+ extern char __end_rodata[];
+
+ if (probe_addr & 0x3)
+ return -EINVAL;
+
+ /* copy instruction */
+ p->opcode = le32_to_cpu(*p->addr);
+
+ if (in_exception_text(probe_addr))
+ return -EINVAL;
+ if (probe_addr >= (unsigned long) __start_rodata &&
+ probe_addr <= (unsigned long) __end_rodata)
+ return -EINVAL;
+
+ /* decode instruction */
+ switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
+ case INSN_REJECTED: /* insn not supported */
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT: /* insn need simulation */
+ p->ainsn.insn = NULL;
+ break;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+ break;
+ };
+
+ /* prepare the instruction */
+ if (p->ainsn.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+{
+ void *addrs[1];
+ u32 insns[1];
+
+ addrs[0] = (void *)addr;
+ insns[0] = (u32)opcode;
+
+ return aarch64_insn_patch_text(addrs, insns, 1);
+}
+
+/* arm kprobe: install breakpoint in text */
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, BRK64_OPCODE_KPROBES);
+}
+
+/* disarm kprobe: remove breakpoint from text */
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, p->opcode);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+/*
+ * The D-flag (Debug mask) is set (masked) upon debug exception entry.
+ * Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive
+ * probe i.e. when probe hit from kprobe handler context upon
+ * executing the pre/post handlers. In this case we return with
+ * D-flag clear so that single-stepping can be carried-out.
+ *
+ * Leave D-flag set in all other cases.
+ */
+static void __kprobes
+spsr_set_debug_flag(struct pt_regs *regs, int mask)
+{
+ unsigned long spsr = regs->pstate;
+
+ if (mask)
+ spsr |= PSR_D_BIT;
+ else
+ spsr &= ~PSR_D_BIT;
+
+ regs->pstate = spsr;
+}
+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_irqflag = regs->pstate;
+ regs->pstate |= PSR_I_BIT;
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ if (kcb->saved_irqflag & PSR_I_BIT)
+ regs->pstate |= PSR_I_BIT;
+ else
+ regs->pstate &= ~PSR_I_BIT;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+
+ if (p->ainsn.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.insn;
+
+ set_ss_context(kcb, slot); /* mark pending ss */
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 0);
+ else
+ WARN_ON(regs->pstate & PSR_D_BIT);
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(kcb, regs);
+ kernel_enable_single_step(regs);
+ instruction_pointer_set(regs, slot);
+ } else {
+ /* insn simulation */
+ arch_simulate_insn(p, regs);
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.restore != 0)
+ instruction_pointer_set(regs, cur->ainsn.restore);
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler) {
+ /* post_handler can hit breakpoint and single step
+ * again, so we enable D-flag for recursive exception.
+ */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ instruction_pointer_set(regs, (unsigned long) cur->addr);
+ if (!instruction_pointer(regs))
+ BUG();
+
+ kernel_disable_single_step();
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 1);
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+ }
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+static void __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur_kprobe;
+ struct kprobe_ctlblk *kcb;
+ unsigned long addr = instruction_pointer(regs);
+
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe((kprobe_opcode_t *) addr);
+
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it prepped
+ * for calling the break_handler below on re-entry,
+ * so get out doing nothing more here.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return, keep PSTATE D-flag enabled until
+ * pre_handler return back.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs, kcb, 0);
+ return;
+ }
+ }
+ } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
+ BRK64_OPCODE_KPROBES) && cur_kprobe) {
+ /* We probably hit a jprobe. Call its break handler. */
+ if (cur_kprobe->break_handler &&
+ cur_kprobe->break_handler(cur_kprobe, regs)) {
+ setup_singlestep(cur_kprobe, regs, kcb, 0);
+ return;
+ }
+ }
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+}
+
+static int __kprobes
+kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ if ((kcb->ss_ctx.ss_pending)
+ && (kcb->ss_ctx.match_addr == addr)) {
+ clear_ss_context(kcb); /* clear pending ss */
+ return DBG_HOOK_HANDLED;
+ }
+ /* not ours, kprobes should ignore it */
+ return DBG_HOOK_ERROR;
+}
+
+int __kprobes
+kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ int retval;
+
+ /* return error if this is not our step */
+ retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
+
+ if (retval == DBG_HOOK_HANDLED) {
+ kprobes_restore_local_irqflag(kcb, regs);
+ kernel_disable_single_step();
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 1);
+
+ post_kprobe_handler(kcb, regs);
+ }
+
+ return retval;
+}
+
+int __kprobes
+kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
+{
+ kprobe_handler(regs);
+ return DBG_HOOK_HANDLED;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ long stack_ptr = kernel_stack_pointer(regs);
+
+ kcb->jprobe_saved_regs = *regs;
+ /*
+ * As Linus pointed out, gcc assumes that the callee
+ * owns the argument space and could overwrite it, e.g.
+ * tailcall optimization. So, to be absolutely safe
+ * we also save and restore enough stack bytes to cover
+ * the argument area.
+ */
+ kasan_disable_current();
+ memcpy(kcb->jprobes_stack, (void *)stack_ptr,
+ min_stack_size(stack_ptr));
+ kasan_enable_current();
+
+ instruction_pointer_set(regs, (unsigned long) jp->entry);
+ preempt_disable();
+ pause_graph_tracing();
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ /*
+ * Jprobe handler return by entering break exception,
+ * encoded same as kprobe, but with following conditions
+ * -a special PC to identify it from the other kprobes.
+ * -restore stack addr to original saved pt_regs
+ */
+ asm volatile(" mov sp, %0 \n"
+ "jprobe_return_break: brk %1 \n"
+ :
+ : "r" (kcb->jprobe_saved_regs.sp),
+ "I" (BRK64_ESR_KPROBES)
+ : "memory");
+
+ unreachable();
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ long stack_addr = kcb->jprobe_saved_regs.sp;
+ long orig_sp = kernel_stack_pointer(regs);
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ extern const char jprobe_return_break[];
+
+ if (instruction_pointer(regs) != (u64) jprobe_return_break)
+ return 0;
+
+ if (orig_sp != stack_addr) {
+ struct pt_regs *saved_regs =
+ (struct pt_regs *)kcb->jprobe_saved_regs.sp;
+ pr_err("current sp %lx does not match saved sp %lx\n",
+ orig_sp, stack_addr);
+ pr_err("Saved registers for jprobe %p\n", jp);
+ show_regs(saved_regs);
+ pr_err("Current registers\n");
+ show_regs(regs);
+ BUG();
+ }
+ unpause_graph_tracing();
+ *regs = kcb->jprobe_saved_regs;
+ kasan_disable_current();
+ memcpy((void *)stack_addr, kcb->jprobes_stack,
+ min_stack_size(stack_addr));
+ kasan_enable_current();
+ preempt_enable_no_resched();
+ return 1;
+}
+
+bool arch_within_kprobe_blacklist(unsigned long addr)
+{
+ extern char __idmap_text_start[], __idmap_text_end[];
+ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
+
+ if ((addr >= (unsigned long)__kprobes_text_start &&
+ addr < (unsigned long)__kprobes_text_end) ||
+ (addr >= (unsigned long)__entry_text_start &&
+ addr < (unsigned long)__entry_text_end) ||
+ (addr >= (unsigned long)__idmap_text_start &&
+ addr < (unsigned long)__idmap_text_end) ||
+ !!search_exception_tables(addr))
+ return true;
+
+ if (!is_kernel_in_hyp_mode()) {
+ if ((addr >= (unsigned long)__hyp_text_start &&
+ addr < (unsigned long)__hyp_text_end) ||
+ (addr >= (unsigned long)__hyp_idmap_text_start &&
+ addr < (unsigned long)__hyp_idmap_text_end))
+ return true;
+ }
+
+ return false;
+}
+
+void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address =
+ (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because multiple functions in the call path have
+ * return probes installed on them, and/or more than one
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always pushed into the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the (chronologically) first instance's ret_addr
+ * will be the real return address, and all the rest will
+ * point to kretprobe_trampoline.
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ if (ri->rp && ri->rp->handler) {
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_hash_unlock(current, &flags);
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+ return (void *)orig_ret_address;
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
+
+ /* replace return addr (x30) with trampoline */
+ regs->regs[30] = (long)&kretprobe_trampoline;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
new file mode 100644
index 000000000000..5d6e7f14638c
--- /dev/null
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -0,0 +1,81 @@
+/*
+ * trampoline entry and return code for kretprobes.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .text
+
+ .macro save_all_base_regs
+ stp x0, x1, [sp, #S_X0]
+ stp x2, x3, [sp, #S_X2]
+ stp x4, x5, [sp, #S_X4]
+ stp x6, x7, [sp, #S_X6]
+ stp x8, x9, [sp, #S_X8]
+ stp x10, x11, [sp, #S_X10]
+ stp x12, x13, [sp, #S_X12]
+ stp x14, x15, [sp, #S_X14]
+ stp x16, x17, [sp, #S_X16]
+ stp x18, x19, [sp, #S_X18]
+ stp x20, x21, [sp, #S_X20]
+ stp x22, x23, [sp, #S_X22]
+ stp x24, x25, [sp, #S_X24]
+ stp x26, x27, [sp, #S_X26]
+ stp x28, x29, [sp, #S_X28]
+ add x0, sp, #S_FRAME_SIZE
+ stp lr, x0, [sp, #S_LR]
+ /*
+ * Construct a useful saved PSTATE
+ */
+ mrs x0, nzcv
+ mrs x1, daif
+ orr x0, x0, x1
+ mrs x1, CurrentEL
+ orr x0, x0, x1
+ mrs x1, SPSel
+ orr x0, x0, x1
+ stp xzr, x0, [sp, #S_PC]
+ .endm
+
+ .macro restore_all_base_regs
+ ldr x0, [sp, #S_PSTATE]
+ and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT)
+ msr nzcv, x0
+ ldp x0, x1, [sp, #S_X0]
+ ldp x2, x3, [sp, #S_X2]
+ ldp x4, x5, [sp, #S_X4]
+ ldp x6, x7, [sp, #S_X6]
+ ldp x8, x9, [sp, #S_X8]
+ ldp x10, x11, [sp, #S_X10]
+ ldp x12, x13, [sp, #S_X12]
+ ldp x14, x15, [sp, #S_X14]
+ ldp x16, x17, [sp, #S_X16]
+ ldp x18, x19, [sp, #S_X18]
+ ldp x20, x21, [sp, #S_X20]
+ ldp x22, x23, [sp, #S_X22]
+ ldp x24, x25, [sp, #S_X24]
+ ldp x26, x27, [sp, #S_X26]
+ ldp x28, x29, [sp, #S_X28]
+ .endm
+
+ENTRY(kretprobe_trampoline)
+ sub sp, sp, #S_FRAME_SIZE
+
+ save_all_base_regs
+
+ mov x0, sp
+ bl trampoline_probe_handler
+ /*
+ * Replace trampoline address in lr with actual orig_ret_addr return
+ * address.
+ */
+ mov lr, x0
+
+ restore_all_base_regs
+
+ add sp, sp, #S_FRAME_SIZE
+ ret
+
+ENDPROC(kretprobe_trampoline)
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
new file mode 100644
index 000000000000..8977ce9d009d
--- /dev/null
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -0,0 +1,217 @@
+/*
+ * arch/arm64/kernel/probes/simulate-insn.c
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "simulate-insn.h"
+
+#define sign_extend(x, signbit) \
+ ((x) | (0 - ((x) & (1 << (signbit)))))
+
+#define bbl_displacement(insn) \
+ sign_extend(((insn) & 0x3ffffff) << 2, 27)
+
+#define bcond_displacement(insn) \
+ sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+
+#define cbz_displacement(insn) \
+ sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+
+#define tbz_displacement(insn) \
+ sign_extend(((insn >> 5) & 0x3fff) << 2, 15)
+
+#define ldr_displacement(insn) \
+ sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+
+static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val)
+{
+ if (reg < 31)
+ regs->regs[reg] = val;
+}
+
+static inline void set_w_reg(struct pt_regs *regs, int reg, u64 val)
+{
+ if (reg < 31)
+ regs->regs[reg] = lower_32_bits(val);
+}
+
+static inline u64 get_x_reg(struct pt_regs *regs, int reg)
+{
+ if (reg < 31)
+ return regs->regs[reg];
+ else
+ return 0;
+}
+
+static inline u32 get_w_reg(struct pt_regs *regs, int reg)
+{
+ if (reg < 31)
+ return lower_32_bits(regs->regs[reg]);
+ else
+ return 0;
+}
+
+static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+
+ return (opcode & (1 << 31)) ?
+ (get_x_reg(regs, xn) == 0) : (get_w_reg(regs, xn) == 0);
+}
+
+static bool __kprobes check_cbnz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+
+ return (opcode & (1 << 31)) ?
+ (get_x_reg(regs, xn) != 0) : (get_w_reg(regs, xn) != 0);
+}
+
+static bool __kprobes check_tbz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+ int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f);
+
+ return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) == 0;
+}
+
+static bool __kprobes check_tbnz(u32 opcode, struct pt_regs *regs)
+{
+ int xn = opcode & 0x1f;
+ int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f);
+
+ return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) != 0;
+}
+
+/*
+ * instruction simulation functions
+ */
+void __kprobes
+simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs)
+{
+ long imm, xn, val;
+
+ xn = opcode & 0x1f;
+ imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3);
+ imm = sign_extend(imm, 20);
+ if (opcode & 0x80000000)
+ val = (imm<<12) + (addr & 0xfffffffffffff000);
+ else
+ val = imm + addr;
+
+ set_x_reg(regs, xn, val);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
+
+void __kprobes
+simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = bbl_displacement(opcode);
+
+ /* Link register is x30 */
+ if (opcode & (1 << 31))
+ set_x_reg(regs, 30, addr + 4);
+
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (aarch32_opcode_cond_checks[opcode & 0xf](regs->pstate & 0xffffffff))
+ disp = bcond_displacement(opcode);
+
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int xn = (opcode >> 5) & 0x1f;
+
+ /* update pc first in case we're doing a "blr lr" */
+ instruction_pointer_set(regs, get_x_reg(regs, xn));
+
+ /* Link register is x30 */
+ if (((opcode >> 21) & 0x3) == 1)
+ set_x_reg(regs, 30, addr + 4);
+}
+
+void __kprobes
+simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (opcode & (1 << 24)) {
+ if (check_cbnz(opcode, regs))
+ disp = cbz_displacement(opcode);
+ } else {
+ if (check_cbz(opcode, regs))
+ disp = cbz_displacement(opcode);
+ }
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
+{
+ int disp = 4;
+
+ if (opcode & (1 << 24)) {
+ if (check_tbnz(opcode, regs))
+ disp = tbz_displacement(opcode);
+ } else {
+ if (check_tbz(opcode, regs))
+ disp = tbz_displacement(opcode);
+ }
+ instruction_pointer_set(regs, addr + disp);
+}
+
+void __kprobes
+simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
+{
+ u64 *load_addr;
+ int xn = opcode & 0x1f;
+ int disp;
+
+ disp = ldr_displacement(opcode);
+ load_addr = (u64 *) (addr + disp);
+
+ if (opcode & (1 << 30)) /* x0-x30 */
+ set_x_reg(regs, xn, *load_addr);
+ else /* w0-w30 */
+ set_w_reg(regs, xn, *load_addr);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
+
+void __kprobes
+simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
+{
+ s32 *load_addr;
+ int xn = opcode & 0x1f;
+ int disp;
+
+ disp = ldr_displacement(opcode);
+ load_addr = (s32 *) (addr + disp);
+
+ set_x_reg(regs, xn, *load_addr);
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+}
diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h
new file mode 100644
index 000000000000..050bde683c2d
--- /dev/null
+++ b/arch/arm64/kernel/probes/simulate-insn.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm64/kernel/probes/simulate-insn.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
+#define _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
+
+void simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs);
+void simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs);
+
+#endif /* _ARM_KERNEL_KPROBES_SIMULATE_INSN_H */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 3f6cd5c5234f..e0c81da60f76 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -48,6 +48,107 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+#define GPR_OFFSET_NAME(r) \
+ {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ GPR_OFFSET_NAME(0),
+ GPR_OFFSET_NAME(1),
+ GPR_OFFSET_NAME(2),
+ GPR_OFFSET_NAME(3),
+ GPR_OFFSET_NAME(4),
+ GPR_OFFSET_NAME(5),
+ GPR_OFFSET_NAME(6),
+ GPR_OFFSET_NAME(7),
+ GPR_OFFSET_NAME(8),
+ GPR_OFFSET_NAME(9),
+ GPR_OFFSET_NAME(10),
+ GPR_OFFSET_NAME(11),
+ GPR_OFFSET_NAME(12),
+ GPR_OFFSET_NAME(13),
+ GPR_OFFSET_NAME(14),
+ GPR_OFFSET_NAME(15),
+ GPR_OFFSET_NAME(16),
+ GPR_OFFSET_NAME(17),
+ GPR_OFFSET_NAME(18),
+ GPR_OFFSET_NAME(19),
+ GPR_OFFSET_NAME(20),
+ GPR_OFFSET_NAME(21),
+ GPR_OFFSET_NAME(22),
+ GPR_OFFSET_NAME(23),
+ GPR_OFFSET_NAME(24),
+ GPR_OFFSET_NAME(25),
+ GPR_OFFSET_NAME(26),
+ GPR_OFFSET_NAME(27),
+ GPR_OFFSET_NAME(28),
+ GPR_OFFSET_NAME(29),
+ GPR_OFFSET_NAME(30),
+ {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
+ REG_OFFSET_NAME(sp),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(pstate),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
+ on_irq_stack(addr, raw_smp_processor_id());
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
/*
* TODO: does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
@@ -1246,13 +1347,13 @@ static void tracehook_report_syscall(struct pt_regs *regs,
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
- /* Do the secure computing check first; failures should be fast. */
- if (secure_computing() == -1)
- return -1;
-
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+ /* Do the secure computing after ptrace; failures should be fast. */
+ if (secure_computing(NULL) == -1)
+ return -1;
+
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->syscallno);
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..51b73cdde287
--- /dev/null
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -0,0 +1,130 @@
+/*
+ * kexec for arm64
+ *
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kexec.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/kexec.h>
+#include <asm/page.h>
+#include <asm/sysreg.h>
+
+/*
+ * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
+ *
+ * The memory that the old kernel occupies may be overwritten when coping the
+ * new image to its final location. To assure that the
+ * arm64_relocate_new_kernel routine which does that copy is not overwritten,
+ * all code and data needed by arm64_relocate_new_kernel must be between the
+ * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
+ * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
+ * control_code_page, a special page which has been set up to be preserved
+ * during the copy operation.
+ */
+ENTRY(arm64_relocate_new_kernel)
+
+ /* Setup the list loop variables. */
+ mov x17, x1 /* x17 = kimage_start */
+ mov x16, x0 /* x16 = kimage_head */
+ dcache_line_size x15, x0 /* x15 = dcache line size */
+ mov x14, xzr /* x14 = entry ptr */
+ mov x13, xzr /* x13 = copy dest */
+
+ /* Clear the sctlr_el2 flags. */
+ mrs x0, CurrentEL
+ cmp x0, #CurrentEL_EL2
+ b.ne 1f
+ mrs x0, sctlr_el2
+ ldr x1, =SCTLR_ELx_FLAGS
+ bic x0, x0, x1
+ msr sctlr_el2, x0
+ isb
+1:
+
+ /* Check if the new image needs relocation. */
+ tbnz x16, IND_DONE_BIT, .Ldone
+
+.Lloop:
+ and x12, x16, PAGE_MASK /* x12 = addr */
+
+ /* Test the entry flags. */
+.Ltest_source:
+ tbz x16, IND_SOURCE_BIT, .Ltest_indirection
+
+ /* Invalidate dest page to PoC. */
+ mov x0, x13
+ add x20, x0, #PAGE_SIZE
+ sub x1, x15, #1
+ bic x0, x0, x1
+2: dc ivac, x0
+ add x0, x0, x15
+ cmp x0, x20
+ b.lo 2b
+ dsb sy
+
+ mov x20, x13
+ mov x21, x12
+ copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7
+
+ /* dest += PAGE_SIZE */
+ add x13, x13, PAGE_SIZE
+ b .Lnext
+
+.Ltest_indirection:
+ tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
+
+ /* ptr = addr */
+ mov x14, x12
+ b .Lnext
+
+.Ltest_destination:
+ tbz x16, IND_DESTINATION_BIT, .Lnext
+
+ /* dest = addr */
+ mov x13, x12
+
+.Lnext:
+ /* entry = *ptr++ */
+ ldr x16, [x14], #8
+
+ /* while (!(entry & DONE)) */
+ tbz x16, IND_DONE_BIT, .Lloop
+
+.Ldone:
+ /* wait for writes from copy_page to finish */
+ dsb nsh
+ ic iallu
+ dsb nsh
+ isb
+
+ /* Start new image. */
+ mov x0, xzr
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ br x17
+
+ENDPROC(arm64_relocate_new_kernel)
+
+.ltorg
+
+.align 3 /* To keep the 64-bit values below naturally aligned. */
+
+.Lcopy_end:
+.org KEXEC_CONTROL_PAGE_SIZE
+
+/*
+ * arm64_relocate_new_kernel_size - Number of bytes to copy to the
+ * control_code_page.
+ */
+.globl arm64_relocate_new_kernel_size
+arm64_relocate_new_kernel_size:
+ .quad .Lcopy_end - arm64_relocate_new_kernel
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 3279defabaa2..536dce22fe76 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -39,9 +39,7 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
-#include <linux/of_iommu.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <linux/efi.h>
#include <linux/psci.h>
@@ -202,7 +200,7 @@ static void __init request_standard_resources(void)
struct resource *res;
kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
@@ -257,14 +255,17 @@ void __init setup_arch(char **cmdline_p)
*/
cpu_uninstall_idmap();
+ xen_early_init();
efi_init();
arm64_memblock_init();
+ paging_init();
+
+ acpi_table_upgrade();
+
/* Parse the ACPI tables for possible boot-time configuration */
acpi_boot_table_init();
- paging_init();
-
if (acpi_disabled)
unflatten_device_tree();
@@ -281,8 +282,6 @@ void __init setup_arch(char **cmdline_p)
else
psci_acpi_init();
- xen_early_init();
-
cpu_read_bootcpu_ops();
smp_init_cpus();
smp_build_mpidr_hash();
@@ -302,19 +301,6 @@ void __init setup_arch(char **cmdline_p)
}
}
-static int __init arm64_device_init(void)
-{
- if (of_have_populated_dt()) {
- of_iommu_init();
- of_platform_populate(NULL, of_default_bus_match_table,
- NULL, NULL);
- } else if (acpi_disabled) {
- pr_crit("Device tree not populated\n");
- }
- return 0;
-}
-arch_initcall_sync(arm64_device_init);
-
static int __init topology_init(void)
{
int i;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 678e0842cb3b..76a6d9263908 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -267,7 +267,6 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- local_dbg_enable();
local_irq_enable();
local_async_enable();
@@ -437,9 +436,9 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
save_boot_cpu_run_el();
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
@@ -560,6 +559,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
*/
acpi_set_mailbox_entry(cpu_count, processor);
+ early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid));
+
cpu_count++;
}
@@ -694,6 +695,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(smp_processor_id());
/*
+ * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
+ * secondary CPUs present.
+ */
+ if (max_cpus == 0)
+ return;
+
+ /*
* Initialise the present map (which describes the set of CPUs
* actually populated at the present time) and release the
* secondaries from the bootloader.
@@ -909,3 +917,21 @@ int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
+
+static bool have_cpu_die(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ int any_cpu = raw_smp_processor_id();
+
+ if (cpu_ops[any_cpu]->cpu_die)
+ return true;
+#endif
+ return false;
+}
+
+bool cpus_are_stuck_in_kernel(void)
+{
+ bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
+
+ return !!cpus_stuck_in_kernel || smp_spin_tables;
+}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c5392081b49b..e04f83873af7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -41,6 +41,7 @@
#include <asm/stacktrace.h>
#include <asm/exception.h>
#include <asm/system_misc.h>
+#include <asm/sysreg.h>
static const char *handler[]= {
"Synchronous Abort",
@@ -52,20 +53,18 @@ static const char *handler[]= {
int show_unhandled_signals = 1;
/*
- * Dump out the contents of some memory nicely...
+ * Dump out the contents of some kernel memory nicely...
*/
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
- unsigned long top, bool compat)
+ unsigned long top)
{
unsigned long first;
mm_segment_t fs;
int i;
- unsigned int width = compat ? 4 : 8;
/*
* We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * to safely read from kernel space.
*/
fs = get_fs();
set_fs(KERNEL_DS);
@@ -79,22 +78,15 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
memset(str, ' ', sizeof(str));
str[sizeof(str) - 1] = '\0';
- for (p = first, i = 0; i < (32 / width)
- && p < top; i++, p += width) {
+ for (p = first, i = 0; i < (32 / 8)
+ && p < top; i++, p += 8) {
if (p >= bottom && p < top) {
unsigned long val;
- if (width == 8) {
- if (__get_user(val, (unsigned long *)p) == 0)
- sprintf(str + i * 17, " %016lx", val);
- else
- sprintf(str + i * 17, " ????????????????");
- } else {
- if (__get_user(val, (unsigned int *)p) == 0)
- sprintf(str + i * 9, " %08lx", val);
- else
- sprintf(str + i * 9, " ????????");
- }
+ if (__get_user(val, (unsigned long *)p) == 0)
+ sprintf(str + i * 17, " %016lx", val);
+ else
+ sprintf(str + i * 17, " ????????????????");
}
}
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
@@ -111,21 +103,12 @@ static void dump_backtrace_entry(unsigned long where)
print_ip_sym(where);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
for (i = -4; i < 1; i++) {
unsigned int val, bad;
@@ -139,8 +122,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ if (!user_mode(regs)) {
+ mm_segment_t fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
@@ -216,7 +209,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
dump_mem("", "Exception stack", stack,
- stack + sizeof(struct pt_regs), false);
+ stack + sizeof(struct pt_regs));
}
}
}
@@ -254,10 +247,9 @@ static int __die(const char *str, int err, struct thread_info *thread,
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
- if (!user_mode(regs) || in_interrupt()) {
+ if (!user_mode(regs)) {
dump_mem(KERN_EMERG, "Stack: ", regs->sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk),
- compat_user_mode(regs));
+ THREAD_SIZE + (unsigned long)task_stack_page(tsk));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
@@ -373,11 +365,59 @@ exit:
return fn ? fn(regs, instr) : 1;
}
-asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+static void force_signal_inject(int signal, int code, struct pt_regs *regs,
+ unsigned long address)
{
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
+ const char *desc;
+
+ switch (signal) {
+ case SIGILL:
+ desc = "undefined instruction";
+ break;
+ case SIGSEGV:
+ desc = "illegal memory access";
+ break;
+ default:
+ desc = "bad mode";
+ break;
+ }
+
+ if (unhandled_signal(current, signal) &&
+ show_unhandled_signals_ratelimited()) {
+ pr_info("%s[%d]: %s: pc=%p\n",
+ current->comm, task_pid_nr(current), desc, pc);
+ dump_instr(KERN_INFO, regs);
+ }
+ info.si_signo = signal;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = pc;
+
+ arm64_notify_die(desc, regs, &info, 0);
+}
+
+/*
+ * Set up process info to signal segmentation fault - called on access error.
+ */
+void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr)
+{
+ int code;
+
+ down_read(&current->mm->mmap_sem);
+ if (find_vma(current->mm, addr) == NULL)
+ code = SEGV_MAPERR;
+ else
+ code = SEGV_ACCERR;
+ up_read(&current->mm->mmap_sem);
+
+ force_signal_inject(SIGSEGV, code, regs, addr);
+}
+
+asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+{
/* check for AArch32 breakpoint instructions */
if (!aarch32_break_handler(regs))
return;
@@ -385,18 +425,66 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
- if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) {
- pr_info("%s[%d]: undefined instruction: pc=%p\n",
- current->comm, task_pid_nr(current), pc);
- dump_instr(KERN_INFO, regs);
- }
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+}
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = pc;
+void cpu_enable_cache_maint_trap(void *__unused)
+{
+ config_sctlr_el1(SCTLR_EL1_UCI, 0);
+}
- arm64_notify_die("Oops - undefined instruction", regs, &info, 0);
+#define __user_cache_maint(insn, address, res) \
+ asm volatile ( \
+ "1: " insn ", %1\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT) )
+
+asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
+{
+ unsigned long address;
+ int ret;
+
+ /* if this is a write with: Op0=1, Op2=1, Op1=3, CRn=7 */
+ if ((esr & 0x01fffc01) == 0x0012dc00) {
+ int rt = (esr >> 5) & 0x1f;
+ int crm = (esr >> 1) & 0x0f;
+
+ address = (rt == 31) ? 0 : regs->regs[rt];
+
+ switch (crm) {
+ case 11: /* DC CVAU, gets promoted */
+ __user_cache_maint("dc civac", address, ret);
+ break;
+ case 10: /* DC CVAC, gets promoted */
+ __user_cache_maint("dc civac", address, ret);
+ break;
+ case 14: /* DC CIVAC */
+ __user_cache_maint("dc civac", address, ret);
+ break;
+ case 5: /* IC IVAU */
+ __user_cache_maint("ic ivau", address, ret);
+ break;
+ default:
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ return;
+ }
+ } else {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ return;
+ }
+
+ if (ret)
+ arm64_notify_segfault(regs, address);
+ else
+ regs->pc += 4;
}
long compat_arm_syscall(struct pt_regs *regs);
@@ -465,7 +553,7 @@ static const char *esr_class_str[] = {
const char *esr_get_class_string(u32 esr)
{
- return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
+ return esr_class_str[ESR_ELx_EC(esr)];
}
/*
@@ -477,8 +565,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
- pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
- handler[reason], esr, esr_get_class_string(esr));
+ pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
+ handler[reason], smp_processor_id(), esr,
+ esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 9fefb005812a..076312b17d4f 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -214,10 +214,16 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
+ /* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
+ vdso_data->raw_time_sec = tk->raw_time.tv_sec;
+ vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
- vdso_data->cs_mult = tk->tkr_mono.mult;
+ /* tkr_raw.xtime_nsec == 0 */
+ vdso_data->cs_mono_mult = tk->tkr_mono.mult;
+ vdso_data->cs_raw_mult = tk->tkr_raw.mult;
+ /* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
}
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index b467fd0a384b..62c84f7cb01b 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -23,7 +23,7 @@ GCOV_PROFILE := n
ccflags-y += -Wl,-shared
obj-y += vdso.o
-extra-y += vdso.lds vdso-offsets.h
+extra-y += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
# Force dependency (incbin is bad)
@@ -42,11 +42,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
define cmd_vdsosym
- $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ && \
- cp $@ include/generated/
+ $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
endef
-$(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
# Assembly rules for the .S files
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index efa79e8d4196..e00b4671bd7c 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -26,24 +26,109 @@
#define NSEC_PER_SEC_HI16 0x3b9a
vdso_data .req x6
-use_syscall .req w7
-seqcnt .req w8
+seqcnt .req w7
+w_tmp .req w8
+x_tmp .req x8
+
+/*
+ * Conventions for macro arguments:
+ * - An argument is write-only if its name starts with "res".
+ * - All other arguments are read-only, unless otherwise specified.
+ */
.macro seqcnt_acquire
9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
tbnz seqcnt, #0, 9999b
dmb ishld
- ldr use_syscall, [vdso_data, #VDSO_USE_SYSCALL]
.endm
- .macro seqcnt_read, cnt
+ .macro seqcnt_check fail
dmb ishld
- ldr \cnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
+ ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
+ cmp w_tmp, seqcnt
+ b.ne \fail
.endm
- .macro seqcnt_check, cnt, fail
- cmp \cnt, seqcnt
- b.ne \fail
+ .macro syscall_check fail
+ ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
+ cbnz w_tmp, \fail
+ .endm
+
+ .macro get_nsec_per_sec res
+ mov \res, #NSEC_PER_SEC_LO16
+ movk \res, #NSEC_PER_SEC_HI16, lsl #16
+ .endm
+
+ /*
+ * Returns the clock delta, in nanoseconds left-shifted by the clock
+ * shift.
+ */
+ .macro get_clock_shifted_nsec res, cycle_last, mult
+ /* Read the virtual counter. */
+ isb
+ mrs x_tmp, cntvct_el0
+ /* Calculate cycle delta and convert to ns. */
+ sub \res, x_tmp, \cycle_last
+ /* We can only guarantee 56 bits of precision. */
+ movn x_tmp, #0xff00, lsl #48
+ and \res, x_tmp, \res
+ mul \res, \res, \mult
+ .endm
+
+ /*
+ * Returns in res_{sec,nsec} the REALTIME timespec, based on the
+ * "wall time" (xtime) and the clock_mono delta.
+ */
+ .macro get_ts_realtime res_sec, res_nsec, \
+ clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
+ add \res_nsec, \clock_nsec, \xtime_nsec
+ udiv x_tmp, \res_nsec, \nsec_to_sec
+ add \res_sec, \xtime_sec, x_tmp
+ msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec
+ .endm
+
+ /*
+ * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
+ * used for CLOCK_MONOTONIC_RAW.
+ */
+ .macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
+ udiv \res_sec, \clock_nsec, \nsec_to_sec
+ msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
+ .endm
+
+ /* sec and nsec are modified in place. */
+ .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
+ /* Add timespec. */
+ add \sec, \sec, \ts_sec
+ add \nsec, \nsec, \ts_nsec
+
+ /* Normalise the new timespec. */
+ cmp \nsec, \nsec_to_sec
+ b.lt 9999f
+ sub \nsec, \nsec, \nsec_to_sec
+ add \sec, \sec, #1
+9999:
+ cmp \nsec, #0
+ b.ge 9998f
+ add \nsec, \nsec, \nsec_to_sec
+ sub \sec, \sec, #1
+9998:
+ .endm
+
+ .macro clock_gettime_return, shift=0
+ .if \shift == 1
+ lsr x11, x11, x12
+ .endif
+ stp x10, x11, [x1, #TSPEC_TV_SEC]
+ mov x0, xzr
+ ret
+ .endm
+
+ .macro jump_slot jumptable, index, label
+ .if (. - \jumptable) != 4 * (\index)
+ .error "Jump slot index mismatch"
+ .endif
+ b \label
.endm
.text
@@ -51,18 +136,25 @@ seqcnt .req w8
/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
ENTRY(__kernel_gettimeofday)
.cfi_startproc
- mov x2, x30
- .cfi_register x30, x2
-
- /* Acquire the sequence counter and get the timespec. */
adr vdso_data, _vdso_data
-1: seqcnt_acquire
- cbnz use_syscall, 4f
-
/* If tv is NULL, skip to the timezone code. */
cbz x0, 2f
- bl __do_get_tspec
- seqcnt_check w9, 1b
+
+ /* Compute the time of day. */
+1: seqcnt_acquire
+ syscall_check fail=4f
+ ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+ /* w11 = cs_mono_mult, w12 = cs_shift */
+ ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+ ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+ seqcnt_check fail=1b
+
+ get_nsec_per_sec res=x9
+ lsl x9, x9, x12
+
+ get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ get_ts_realtime res_sec=x10, res_nsec=x11, \
+ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
/* Convert ns to us. */
mov x13, #1000
@@ -76,95 +168,126 @@ ENTRY(__kernel_gettimeofday)
stp w4, w5, [x1, #TZ_MINWEST]
3:
mov x0, xzr
- ret x2
+ ret
4:
/* Syscall fallback. */
mov x8, #__NR_gettimeofday
svc #0
- ret x2
+ ret
.cfi_endproc
ENDPROC(__kernel_gettimeofday)
+#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
+
/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
ENTRY(__kernel_clock_gettime)
.cfi_startproc
- cmp w0, #CLOCK_REALTIME
- ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
- b.ne 2f
+ cmp w0, #JUMPSLOT_MAX
+ b.hi syscall
+ adr vdso_data, _vdso_data
+ adr x_tmp, jumptable
+ add x_tmp, x_tmp, w0, uxtw #2
+ br x_tmp
+
+ ALIGN
+jumptable:
+ jump_slot jumptable, CLOCK_REALTIME, realtime
+ jump_slot jumptable, CLOCK_MONOTONIC, monotonic
+ b syscall
+ b syscall
+ jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
+ jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
+ jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
+
+ .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
+ .error "Wrong jumptable size"
+ .endif
+
+ ALIGN
+realtime:
+ seqcnt_acquire
+ syscall_check fail=syscall
+ ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+ /* w11 = cs_mono_mult, w12 = cs_shift */
+ ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+ ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+ seqcnt_check fail=realtime
- mov x2, x30
- .cfi_register x30, x2
+ /* All computations are done with left-shifted nsecs. */
+ get_nsec_per_sec res=x9
+ lsl x9, x9, x12
- /* Get kernel timespec. */
- adr vdso_data, _vdso_data
-1: seqcnt_acquire
- cbnz use_syscall, 7f
+ get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ get_ts_realtime res_sec=x10, res_nsec=x11, \
+ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
+ clock_gettime_return, shift=1
- bl __do_get_tspec
- seqcnt_check w9, 1b
+ ALIGN
+monotonic:
+ seqcnt_acquire
+ syscall_check fail=syscall
+ ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+ /* w11 = cs_mono_mult, w12 = cs_shift */
+ ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
+ ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
+ ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
+ seqcnt_check fail=monotonic
- mov x30, x2
+ /* All computations are done with left-shifted nsecs. */
+ lsl x4, x4, x12
+ get_nsec_per_sec res=x9
+ lsl x9, x9, x12
- cmp w0, #CLOCK_MONOTONIC
- b.ne 6f
+ get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ get_ts_realtime res_sec=x10, res_nsec=x11, \
+ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
- /* Get wtm timespec. */
- ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
+ add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
+ clock_gettime_return, shift=1
- /* Check the sequence counter. */
- seqcnt_read w9
- seqcnt_check w9, 1b
- b 4f
-2:
- cmp w0, #CLOCK_REALTIME_COARSE
- ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
- b.ne 8f
+ ALIGN
+monotonic_raw:
+ seqcnt_acquire
+ syscall_check fail=syscall
+ ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+ /* w11 = cs_raw_mult, w12 = cs_shift */
+ ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
+ ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
+ seqcnt_check fail=monotonic_raw
- /* xtime_coarse_nsec is already right-shifted */
- mov x12, #0
+ /* All computations are done with left-shifted nsecs. */
+ lsl x14, x14, x12
+ get_nsec_per_sec res=x9
+ lsl x9, x9, x12
- /* Get coarse timespec. */
- adr vdso_data, _vdso_data
-3: seqcnt_acquire
+ get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ get_ts_clock_raw res_sec=x10, res_nsec=x11, \
+ clock_nsec=x15, nsec_to_sec=x9
+
+ add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
+ clock_gettime_return, shift=1
+
+ ALIGN
+realtime_coarse:
+ seqcnt_acquire
ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
+ seqcnt_check fail=realtime_coarse
+ clock_gettime_return
- /* Get wtm timespec. */
+ ALIGN
+monotonic_coarse:
+ seqcnt_acquire
+ ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
+ seqcnt_check fail=monotonic_coarse
- /* Check the sequence counter. */
- seqcnt_read w9
- seqcnt_check w9, 3b
+ /* Computations are done in (non-shifted) nsecs. */
+ get_nsec_per_sec res=x9
+ add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
+ clock_gettime_return
- cmp w0, #CLOCK_MONOTONIC_COARSE
- b.ne 6f
-4:
- /* Add on wtm timespec. */
- add x10, x10, x13
- lsl x14, x14, x12
- add x11, x11, x14
-
- /* Normalise the new timespec. */
- mov x15, #NSEC_PER_SEC_LO16
- movk x15, #NSEC_PER_SEC_HI16, lsl #16
- lsl x15, x15, x12
- cmp x11, x15
- b.lt 5f
- sub x11, x11, x15
- add x10, x10, #1
-5:
- cmp x11, #0
- b.ge 6f
- add x11, x11, x15
- sub x10, x10, #1
-
-6: /* Store to the user timespec. */
- lsr x11, x11, x12
- stp x10, x11, [x1, #TSPEC_TV_SEC]
- mov x0, xzr
- ret
-7:
- mov x30, x2
-8: /* Syscall fallback. */
+ ALIGN
+syscall: /* Syscall fallback. */
mov x8, #__NR_clock_gettime
svc #0
ret
@@ -176,6 +299,7 @@ ENTRY(__kernel_clock_getres)
.cfi_startproc
cmp w0, #CLOCK_REALTIME
ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
+ ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
b.ne 1f
ldr x2, 5f
@@ -203,46 +327,3 @@ ENTRY(__kernel_clock_getres)
.quad CLOCK_COARSE_RES
.cfi_endproc
ENDPROC(__kernel_clock_getres)
-
-/*
- * Read the current time from the architected counter.
- * Expects vdso_data to be initialised.
- * Clobbers the temporary registers (x9 - x15).
- * Returns:
- * - w9 = vDSO sequence counter
- * - (x10, x11) = (ts->tv_sec, shifted ts->tv_nsec)
- * - w12 = cs_shift
- */
-ENTRY(__do_get_tspec)
- .cfi_startproc
-
- /* Read from the vDSO data page. */
- ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
- ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
- ldp w11, w12, [vdso_data, #VDSO_CS_MULT]
- seqcnt_read w9
-
- /* Read the virtual counter. */
- isb
- mrs x15, cntvct_el0
-
- /* Calculate cycle delta and convert to ns. */
- sub x10, x15, x10
- /* We can only guarantee 56 bits of precision. */
- movn x15, #0xff00, lsl #48
- and x10, x15, x10
- mul x10, x10, x11
-
- /* Use the kernel time to calculate the new timespec. */
- mov x11, #NSEC_PER_SEC_LO16
- movk x11, #NSEC_PER_SEC_HI16, lsl #16
- lsl x11, x11, x12
- add x15, x10, x14
- udiv x14, x15, x11
- add x10, x13, x14
- mul x13, x14, x11
- sub x11, x15, x13
-
- ret
- .cfi_endproc
-ENDPROC(__do_get_tspec)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 435e820e898d..89d6e177ecbd 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -118,9 +118,11 @@ SECTIONS
__exception_text_end = .;
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
+ ENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
HYPERVISOR_TEXT
IDMAP_TEXT
HIBERNATE_TEXT
@@ -131,12 +133,13 @@ SECTIONS
}
. = ALIGN(SEGMENT_ALIGN);
- RO_DATA(PAGE_SIZE) /* everything from this point to */
- EXCEPTION_TABLE(8) /* _etext will be marked RO NX */
+ _etext = .; /* End of text section */
+
+ RO_DATA(PAGE_SIZE) /* everything from this point to */
+ EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
NOTES
. = ALIGN(SEGMENT_ALIGN);
- _etext = .; /* End of text and rodata section */
__init_begin = .;
INIT_TEXT_SECTION(8)
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 3246c4aba5b1..fa96fe2bd469 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -106,7 +106,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.hsr = hsr;
- switch (hsr >> ESR_ELx_EC_SHIFT) {
+ switch (ESR_ELx_EC(hsr)) {
case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2;
/* fall through */
@@ -149,7 +149,7 @@ static exit_handle_fn arm_exit_handlers[] = {
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
u32 hsr = kvm_vcpu_get_hsr(vcpu);
- u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT;
+ u8 hsr_ec = ESR_ELx_EC(hsr);
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) {
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 778d0effa2af..0c85febcc1eb 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -17,6 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
+# KVM code is run at a different exception code with a different map, so
+# compiler instrumentation that inserts callbacks or checks into the code may
+# cause crashes. Just disable it.
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 437cfad5e3d8..4373997d1a70 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -198,7 +198,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
{
u64 esr = read_sysreg_el2(esr);
- u8 ec = esr >> ESR_ELx_EC_SHIFT;
+ u8 ec = ESR_ELx_EC(esr);
u64 hpfar, far;
vcpu->arch.fault.esr_el2 = esr;
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 0f7c40eb3f53..934137647837 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
/*
* Non-VHE: Both host and guest must save everything.
*
- * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and
- * guest must save everything.
+ * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
+ * pstate, and guest must save everything.
*/
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
@@ -37,6 +37,7 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
+ ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
@@ -61,7 +62,6 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
- ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
@@ -90,6 +90,7 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+ write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
@@ -114,7 +115,6 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
- write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index fff7cd42b3a3..5f8f80b4a224 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
* Make sure stores to the GIC via the memory mapped interface
* are now visible to the system register interface.
*/
- dsb(st);
+ if (!cpu_if->vgic_sre)
+ dsb(st);
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
continue;
- if (cpu_if->vgic_elrsr & (1 << i)) {
+ if (cpu_if->vgic_elrsr & (1 << i))
cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
- continue;
- }
+ else
+ cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
- cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
__gic_v3_set_lr(0, i);
}
@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
- isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
- write_gicreg(1, ICC_SRE_EL1);
+
+ if (!cpu_if->vgic_sre) {
+ /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+ isb();
+ write_gicreg(1, ICC_SRE_EL1);
+ }
}
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* been actually programmed with the value we want before
* starting to mess with the rest of the GIC.
*/
- write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
- isb();
+ if (!cpu_if->vgic_sre) {
+ write_gicreg(0, ICC_SRE_EL1);
+ isb();
+ }
val = read_gicreg(ICH_VTR_EL2);
max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* (re)distributors. This ensure the guest will read the
* correct values from the memory-mapped interface.
*/
- isb();
- dsb(sy);
+ if (!cpu_if->vgic_sre) {
+ isb();
+ dsb(sy);
+ }
vcpu->arch.vgic_cpu.live_lrs = live_lrs;
/*
* Prevent the guest from touching the GIC system registers if
* SRE isn't enabled for GICv3 emulation.
*/
- if (!cpu_if->vgic_sre) {
- write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
- ICC_SRE_EL2);
- }
+ write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+ ICC_SRE_EL2);
}
void __hyp_text __vgic_v3_init_lrs(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7bbe3ff02602..a57d650f552c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
return true;
}
+static bool access_gic_sre(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+ return true;
+}
+
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_gic_sgi },
/* ICC_SRE_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
- trap_raz_wi },
+ access_gic_sre },
/* CONTEXTIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 17e8306dca29..0b90497d4424 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -66,7 +66,7 @@
.endm
end .req x5
-ENTRY(__copy_from_user)
+ENTRY(__arch_copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy
ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 21faae60f988..7a7efe255034 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -65,7 +65,7 @@
.endm
end .req x5
-ENTRY(__copy_to_user)
+ENTRY(__arch_copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
-ENDPROC(__copy_to_user)
+ENDPROC(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 50ff9ba3a236..07d7352d7c38 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -52,7 +52,7 @@ ENTRY(__flush_cache_user_range)
sub x3, x2, #1
bic x4, x0, x3
1:
-USER(9f, dc cvau, x4 ) // clean D line to PoU
+user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
add x4, x4, x2
cmp x4, x1
b.lo 1b
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b7b397802088..efcf1f7ef1e4 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
&asid_generation);
flush_context(cpu);
- /* We have at least 1 ASID per CPU, so this will always succeed */
+ /* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
set_asid:
@@ -227,8 +227,11 @@ switch_mm_fastpath:
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
- /* If we end up with more CPUs than ASIDs, expect things to crash */
- WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
+ /*
+ * Expect allocation after rollover to fail if we don't have at least
+ * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+ */
+ WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
GFP_KERNEL);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index c566ec83719f..f6c55afab3e2 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -19,6 +19,7 @@
#include <linux/gfp.h>
#include <linux/acpi.h>
+#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
@@ -29,6 +30,8 @@
#include <asm/cacheflush.h>
+static int swiotlb __read_mostly;
+
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
@@ -341,6 +344,13 @@ static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
}
+static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
+{
+ if (swiotlb)
+ return swiotlb_dma_supported(hwdev, mask);
+ return 1;
+}
+
static struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
@@ -354,7 +364,7 @@ static struct dma_map_ops swiotlb_dma_ops = {
.sync_single_for_device = __swiotlb_sync_single_for_device,
.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
- .dma_supported = swiotlb_dma_supported,
+ .dma_supported = __swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
@@ -513,6 +523,9 @@ EXPORT_SYMBOL(dummy_dma_ops);
static int __init arm64_dma_init(void)
{
+ if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+ swiotlb = 1;
+
return atomic_pool_init();
}
arch_initcall(arm64_dma_init);
@@ -848,15 +861,16 @@ static int __iommu_attach_notifier(struct notifier_block *nb,
{
struct iommu_dma_notifier_data *master, *tmp;
- if (action != BUS_NOTIFY_ADD_DEVICE)
+ if (action != BUS_NOTIFY_BIND_DRIVER)
return 0;
mutex_lock(&iommu_dma_notifier_lock);
list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
- if (do_iommu_attach(master->dev, master->ops,
- master->dma_base, master->size)) {
+ if (data == master->dev && do_iommu_attach(master->dev,
+ master->ops, master->dma_base, master->size)) {
list_del(&master->list);
kfree(master);
+ break;
}
}
mutex_unlock(&iommu_dma_notifier_lock);
@@ -870,17 +884,8 @@ static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
if (!nb)
return -ENOMEM;
- /*
- * The device must be attached to a domain before the driver probe
- * routine gets a chance to start allocating DMA buffers. However,
- * the IOMMU driver also needs a chance to configure the iommu_group
- * via its add_device callback first, so we need to make the attach
- * happen between those two points. Since the IOMMU core uses a bus
- * notifier with default priority for add_device, do the same but
- * with a lower priority to ensure the appropriate ordering.
- */
+
nb->notifier_call = __iommu_attach_notifier;
- nb->priority = -100;
ret = bus_register_notifier(bus, nb);
if (ret) {
@@ -904,10 +909,6 @@ static int __init __iommu_dma_init(void)
if (!ret)
ret = register_iommu_dma_ops_notifier(&pci_bus_type);
#endif
-
- /* handle devices queued before this arch_initcall */
- if (!ret)
- __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
return ret;
}
arch_initcall(__iommu_dma_init);
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 8404190fe2bd..f94b80eb295d 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -27,11 +27,7 @@
#include <asm/memory.h>
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
-
-struct addr_marker {
- unsigned long start_address;
- const char *name;
-};
+#include <asm/ptdump.h>
static const struct addr_marker address_markers[] = {
#ifdef CONFIG_KASAN
@@ -150,6 +146,7 @@ static const struct prot_bits pte_bits[] = {
struct pg_level {
const struct prot_bits *bits;
+ const char *name;
size_t num;
u64 mask;
};
@@ -157,15 +154,19 @@ struct pg_level {
static struct pg_level pg_level[] = {
{
}, { /* pgd */
+ .name = "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pud */
+ .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pmd */
+ .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pte */
+ .name = "PTE",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
},
@@ -214,7 +215,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
delta >>= 10;
unit++;
}
- seq_printf(st->seq, "%9lu%c", delta, *unit);
+ seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+ pg_level[st->level].name);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits,
pg_level[st->level].num);
@@ -284,7 +286,8 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
}
}
-static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
+static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
+ unsigned long start)
{
pgd_t *pgd = pgd_offset(mm, 0UL);
unsigned i;
@@ -303,12 +306,13 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long st
static int ptdump_show(struct seq_file *m, void *v)
{
+ struct ptdump_info *info = m->private;
struct pg_state st = {
.seq = m,
- .marker = address_markers,
+ .marker = info->markers,
};
- walk_pgd(&st, &init_mm, VA_START);
+ walk_pgd(&st, info->mm, info->base_addr);
note_page(&st, 0, 0, 0);
return 0;
@@ -316,7 +320,7 @@ static int ptdump_show(struct seq_file *m, void *v)
static int ptdump_open(struct inode *inode, struct file *file)
{
- return single_open(file, ptdump_show, NULL);
+ return single_open(file, ptdump_show, inode->i_private);
}
static const struct file_operations ptdump_fops = {
@@ -326,7 +330,7 @@ static const struct file_operations ptdump_fops = {
.release = single_release,
};
-static int ptdump_init(void)
+int ptdump_register(struct ptdump_info *info, const char *name)
{
struct dentry *pe;
unsigned i, j;
@@ -336,8 +340,18 @@ static int ptdump_init(void)
for (j = 0; j < pg_level[i].num; j++)
pg_level[i].mask |= pg_level[i].bits[j].mask;
- pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
- &ptdump_fops);
+ pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
return pe ? 0 : -ENOMEM;
}
+
+static struct ptdump_info kernel_ptdump_info = {
+ .mm = &init_mm,
+ .markers = address_markers,
+ .base_addr = VA_START,
+};
+
+static int ptdump_init(void)
+{
+ return ptdump_register(&kernel_ptdump_info, "kernel_page_tables");
+}
device_initcall(ptdump_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5954881a35ac..c8beaa0da7df 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -41,6 +41,28 @@
static const char *fault_name(unsigned int esr);
+#ifdef CONFIG_KPROBES
+static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
+{
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+ if (!user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, esr))
+ ret = 1;
+ preempt_enable();
+ }
+
+ return ret;
+}
+#else
+static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
+{
+ return 0;
+}
+#endif
+
/*
* Dump out the page tables associated with 'addr' in mm 'mm'.
*/
@@ -109,7 +131,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* PTE_RDONLY is cleared by default in the asm below, so set it in
* back if necessary (read-only or clean PTE).
*/
- if (!pte_write(entry) || !dirty)
+ if (!pte_write(entry) || !pte_sw_dirty(entry))
pte_val(entry) |= PTE_RDONLY;
/*
@@ -202,8 +224,6 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
-#define ESR_LNX_EXEC (1 << 24)
-
static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags,
struct task_struct *tsk)
@@ -233,7 +253,7 @@ good_area:
goto out;
}
- return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
+ return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -242,14 +262,19 @@ out:
return fault;
}
-static inline int permission_fault(unsigned int esr)
+static inline bool is_permission_fault(unsigned int esr)
{
- unsigned int ec = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT;
+ unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
}
+static bool is_el0_instruction_abort(unsigned int esr)
+{
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
+}
+
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -259,6 +284,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ if (notify_page_fault(regs, esr))
+ return 0;
+
tsk = current;
mm = tsk->mm;
@@ -272,15 +300,16 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (user_mode(regs))
mm_flags |= FAULT_FLAG_USER;
- if (esr & ESR_LNX_EXEC) {
+ if (is_el0_instruction_abort(esr)) {
vm_flags = VM_EXEC;
} else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
- if (permission_fault(esr) && (addr < USER_DS)) {
- if (get_fs() == KERNEL_DS)
+ if (is_permission_fault(esr) && (addr < USER_DS)) {
+ /* regs->orig_addr_limit may be 0 if we entered from EL0 */
+ if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
if (!search_exception_tables(regs->pc))
@@ -441,7 +470,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
return 1;
}
-static struct fault_info {
+static const struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
int sig;
int code;
@@ -629,6 +658,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
return rv;
}
+NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN
void cpu_enable_pan(void *__unused)
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index dbd12ea8ce68..43a76b07eb32 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
struct page *page = pte_page(pte);
- /* no flushing needed for anonymous pages */
- if (!page_mapping(page))
- return;
-
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index aa8aee7d6929..2e49bd252fe7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt)
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ } else if (ps == (PAGE_SIZE * CONT_PTES)) {
+ hugetlb_add_hstate(CONT_PTE_SHIFT);
+ } else if (ps == (PMD_SIZE * CONT_PMDS)) {
+ hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
hugetlb_bad_size();
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_ARM64_64K_PAGES
+static __init int add_default_hugepagesz(void)
+{
+ if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
+ hugetlb_add_hstate(CONT_PMD_SHIFT);
+ return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index d45f8627012c..bbb7ee76e319 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -160,12 +160,10 @@ static void __init arm64_memory_present(void)
static void __init arm64_memory_present(void)
{
struct memblock_region *reg;
- int nid = 0;
for_each_memblock(memory, reg) {
-#ifdef CONFIG_NUMA
- nid = reg->nid;
-#endif
+ int nid = memblock_get_region_node(reg);
+
memory_present(nid, memblock_region_memory_base_pfn(reg),
memblock_region_memory_end_pfn(reg));
}
@@ -226,7 +224,7 @@ void __init arm64_memblock_init(void)
* via the linear mapping.
*/
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
- memblock_enforce_memory_limit(memory_limit);
+ memblock_mem_limit_remove_map(memory_limit);
memblock_add(__pa(_text), (u64)(_end - _text));
}
@@ -403,7 +401,8 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
- swiotlb_init(1);
+ if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+ swiotlb_init(1);
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
@@ -430,9 +429,9 @@ void __init mem_init(void)
pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(VMALLOC_START, VMALLOC_END));
pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(_text, __start_rodata));
+ MLK_ROUNDUP(_text, _etext));
pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(__start_rodata, _etext));
+ MLK_ROUNDUP(__start_rodata, __init_begin));
pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__init_begin, __init_end));
pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0f85a46c3e18..51a558195bb9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -77,7 +77,6 @@ static phys_addr_t __init early_pgtable_alloc(void)
void *ptr;
phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- BUG_ON(!phys);
/*
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
@@ -97,24 +96,6 @@ static phys_addr_t __init early_pgtable_alloc(void)
return phys;
}
-/*
- * remap a PMD into pages
- */
-static void split_pmd(pmd_t *pmd, pte_t *pte)
-{
- unsigned long pfn = pmd_pfn(*pmd);
- int i = 0;
-
- do {
- /*
- * Need to have the least restrictive permissions available
- * permissions will be fixed up later
- */
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
- pfn++;
- } while (pte++, i++, i < PTRS_PER_PTE);
-}
-
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
@@ -122,15 +103,13 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
{
pte_t *pte;
- if (pmd_none(*pmd) || pmd_sect(*pmd)) {
+ BUG_ON(pmd_sect(*pmd));
+ if (pmd_none(*pmd)) {
phys_addr_t pte_phys;
BUG_ON(!pgtable_alloc);
pte_phys = pgtable_alloc();
pte = pte_set_fixmap(pte_phys);
- if (pmd_sect(*pmd))
- split_pmd(pmd, pte);
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
- flush_tlb_all();
pte_clear_fixmap();
}
BUG_ON(pmd_bad(*pmd));
@@ -144,41 +123,10 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte_clear_fixmap();
}
-static void split_pud(pud_t *old_pud, pmd_t *pmd)
-{
- unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
- pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
- int i = 0;
-
- do {
- set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
- addr += PMD_SIZE;
- } while (pmd++, i++, i < PTRS_PER_PMD);
-}
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
-
- /*
- * If debug_page_alloc is enabled we must map the linear map
- * using pages. However, other mappings created by
- * create_mapping_noalloc must use sections in some cases. Allow
- * sections to be used in those cases, where no pgtable_alloc
- * function is provided.
- */
- return !pgtable_alloc || !debug_pagealloc_enabled();
-}
-#else
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
- return true;
-}
-#endif
-
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ phys_addr_t (*pgtable_alloc)(void),
+ bool allow_block_mappings)
{
pmd_t *pmd;
unsigned long next;
@@ -186,20 +134,13 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
/*
* Check for initial section mappings in the pgd/pud and remove them.
*/
- if (pud_none(*pud) || pud_sect(*pud)) {
+ BUG_ON(pud_sect(*pud));
+ if (pud_none(*pud)) {
phys_addr_t pmd_phys;
BUG_ON(!pgtable_alloc);
pmd_phys = pgtable_alloc();
pmd = pmd_set_fixmap(pmd_phys);
- if (pud_sect(*pud)) {
- /*
- * need to have the 1G of mappings continue to be
- * present
- */
- split_pud(pud, pmd);
- }
__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
- flush_tlb_all();
pmd_clear_fixmap();
}
BUG_ON(pud_bad(*pud));
@@ -209,7 +150,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
- block_mappings_allowed(pgtable_alloc)) {
+ allow_block_mappings) {
pmd_t old_pmd =*pmd;
pmd_set_huge(pmd, phys, prot);
/*
@@ -248,7 +189,8 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ phys_addr_t (*pgtable_alloc)(void),
+ bool allow_block_mappings)
{
pud_t *pud;
unsigned long next;
@@ -268,8 +210,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (use_1G_block(addr, next, phys) &&
- block_mappings_allowed(pgtable_alloc)) {
+ if (use_1G_block(addr, next, phys) && allow_block_mappings) {
pud_t old_pud = *pud;
pud_set_huge(pud, phys, prot);
@@ -290,7 +231,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
}
} else {
alloc_init_pmd(pud, addr, next, phys, prot,
- pgtable_alloc);
+ pgtable_alloc, allow_block_mappings);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
@@ -298,15 +239,14 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
pud_clear_fixmap();
}
-/*
- * Create the page directory entries and any necessary page tables for the
- * mapping specified by 'md'.
- */
-static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(void),
+ bool allow_block_mappings)
{
unsigned long addr, length, end, next;
+ pgd_t *pgd = pgd_offset_raw(pgdir, virt);
/*
* If the virtual and physical address don't have the same offset
@@ -322,29 +262,23 @@ static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
+ alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
+ allow_block_mappings);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
-static phys_addr_t late_pgtable_alloc(void)
+static phys_addr_t pgd_pgtable_alloc(void)
{
void *ptr = (void *)__get_free_page(PGALLOC_GFP);
- BUG_ON(!ptr);
+ if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+ BUG();
/* Ensure the zeroed page is visible to the page table walker */
dsb(ishst);
return __pa(ptr);
}
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- pgprot_t prot,
- phys_addr_t (*alloc)(void))
-{
- init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
-}
-
/*
* This function can only be used to modify existing table entries,
* without allocating new levels of table. Note that this permits the
@@ -358,16 +292,17 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
&phys, virt);
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- NULL);
+ __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
- pgprot_t prot)
+ pgprot_t prot, bool allow_block_mappings)
{
+ BUG_ON(mm == &init_mm);
+
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
- late_pgtable_alloc);
+ pgd_pgtable_alloc, allow_block_mappings);
}
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -380,51 +315,54 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
}
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- late_pgtable_alloc);
+ NULL, !debug_pagealloc_enabled());
}
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
{
unsigned long kernel_start = __pa(_text);
- unsigned long kernel_end = __pa(_etext);
+ unsigned long kernel_end = __pa(__init_begin);
/*
* Take care not to create a writable alias for the
* read-only text and rodata sections of the kernel image.
*/
- /* No overlap with the kernel text */
+ /* No overlap with the kernel text/rodata */
if (end < kernel_start || start >= kernel_end) {
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
end - start, PAGE_KERNEL,
- early_pgtable_alloc);
+ early_pgtable_alloc,
+ !debug_pagealloc_enabled());
return;
}
/*
- * This block overlaps the kernel text mapping.
+ * This block overlaps the kernel text/rodata mappings.
* Map the portion(s) which don't overlap.
*/
if (start < kernel_start)
__create_pgd_mapping(pgd, start,
__phys_to_virt(start),
kernel_start - start, PAGE_KERNEL,
- early_pgtable_alloc);
+ early_pgtable_alloc,
+ !debug_pagealloc_enabled());
if (kernel_end < end)
__create_pgd_mapping(pgd, kernel_end,
__phys_to_virt(kernel_end),
end - kernel_end, PAGE_KERNEL,
- early_pgtable_alloc);
+ early_pgtable_alloc,
+ !debug_pagealloc_enabled());
/*
- * Map the linear alias of the [_text, _etext) interval as
+ * Map the linear alias of the [_text, __init_begin) interval as
* read-only/non-executable. This makes the contents of the
* region accessible to subsystems such as hibernate, but
* protects it from inadvertent modification or execution.
*/
__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
kernel_end - kernel_start, PAGE_KERNEL_RO,
- early_pgtable_alloc);
+ early_pgtable_alloc, !debug_pagealloc_enabled());
}
static void __init map_mem(pgd_t *pgd)
@@ -449,14 +387,14 @@ void mark_rodata_ro(void)
{
unsigned long section_size;
- section_size = (unsigned long)__start_rodata - (unsigned long)_text;
+ section_size = (unsigned long)_etext - (unsigned long)_text;
create_mapping_late(__pa(_text), (unsigned long)_text,
section_size, PAGE_KERNEL_ROX);
/*
- * mark .rodata as read only. Use _etext rather than __end_rodata to
- * cover NOTES and EXCEPTION_TABLE.
+ * mark .rodata as read only. Use __init_begin rather than __end_rodata
+ * to cover NOTES and EXCEPTION_TABLE.
*/
- section_size = (unsigned long)_etext - (unsigned long)__start_rodata;
+ section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
}
@@ -481,7 +419,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(size));
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc);
+ early_pgtable_alloc, !debug_pagealloc_enabled());
vma->addr = va_start;
vma->phys_addr = pa_start;
@@ -499,8 +437,8 @@ static void __init map_kernel(pgd_t *pgd)
{
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
- map_kernel_segment(pgd, _text, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text);
- map_kernel_segment(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata);
+ map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+ map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
&vmlinux_init);
map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 98dc1047f2a2..c7fe3ec70774 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/module.h>
@@ -29,7 +30,7 @@ static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
static int numa_distance_cnt;
static u8 *numa_distance;
-static int numa_off;
+static bool numa_off;
static __init int numa_parse_early_param(char *opt)
{
@@ -37,7 +38,7 @@ static __init int numa_parse_early_param(char *opt)
return -EINVAL;
if (!strncmp(opt, "off", 3)) {
pr_info("%s\n", "NUMA turned off");
- numa_off = 1;
+ numa_off = true;
}
return 0;
}
@@ -131,25 +132,25 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
* numa_add_memblk - Set node id to memblk
* @nid: NUMA node ID of the new memblk
* @start: Start address of the new memblk
- * @size: Size of the new memblk
+ * @end: End address of the new memblk
*
* RETURNS:
* 0 on success, -errno on failure.
*/
-int __init numa_add_memblk(int nid, u64 start, u64 size)
+int __init numa_add_memblk(int nid, u64 start, u64 end)
{
int ret;
- ret = memblock_set_node(start, size, &memblock.memory, nid);
+ ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
if (ret < 0) {
pr_err("NUMA: memblock [0x%llx - 0x%llx] failed to add on node %d\n",
- start, (start + size - 1), nid);
+ start, (end - 1), nid);
return ret;
}
node_set(nid, numa_nodes_parsed);
pr_info("NUMA: Adding memblock [0x%llx - 0x%llx] on node %d\n",
- start, (start + size - 1), nid);
+ start, (end - 1), nid);
return ret;
}
@@ -362,12 +363,15 @@ static int __init dummy_numa_init(void)
int ret;
struct memblock_region *mblk;
- pr_info("%s\n", "No NUMA configuration found");
+ if (numa_off)
+ pr_info("NUMA disabled\n"); /* Forced off on command line. */
+ else
+ pr_info("No NUMA configuration found\n");
pr_info("NUMA: Faking a node at [mem %#018Lx-%#018Lx]\n",
0LLU, PFN_PHYS(max_pfn) - 1);
for_each_memblock(memory, mblk) {
- ret = numa_add_memblk(0, mblk->base, mblk->size);
+ ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);
if (!ret)
continue;
@@ -375,7 +379,7 @@ static int __init dummy_numa_init(void)
return ret;
}
- numa_off = 1;
+ numa_off = true;
return 0;
}
@@ -388,7 +392,9 @@ static int __init dummy_numa_init(void)
void __init arm64_numa_init(void)
{
if (!numa_off) {
- if (!numa_init(of_numa_init))
+ if (!acpi_disabled && !numa_init(arm64_acpi_numa_init))
+ return;
+ if (acpi_disabled && !numa_init(of_numa_init))
return;
}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index c4317879b938..5bb61de23201 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -180,6 +180,8 @@ ENTRY(__cpu_setup)
msr cpacr_el1, x0 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0
+ isb // Unmask debug exceptions now,
+ enable_dbg // since this is per-cpu
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
/*
* Memory region attributes for LPAE:
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index aee5637ea436..7c16e547ccb2 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -1,7 +1,7 @@
/*
* BPF JIT compiler for ARM64
*
- * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
+ * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -55,6 +55,7 @@
#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
/* Unconditional branch (register) */
+#define A64_BR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 49ba37e4bfc0..b2fc97a2c56c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -18,6 +18,7 @@
#define pr_fmt(fmt) "bpf_jit: " fmt
+#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/printk.h>
#include <linux/skbuff.h>
@@ -33,6 +34,7 @@ int bpf_jit_enable __read_mostly;
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
+#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
@@ -54,6 +56,8 @@ static const int bpf2a64[] = {
/* temporary registers for internal BPF JIT */
[TMP_REG_1] = A64_R(10),
[TMP_REG_2] = A64_R(11),
+ /* tail_call_cnt */
+ [TCALL_CNT] = A64_R(26),
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
};
@@ -146,13 +150,18 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
-static void build_prologue(struct jit_ctx *ctx)
+#define PROLOGUE_OFFSET 8
+
+static int build_prologue(struct jit_ctx *ctx)
{
const u8 r6 = bpf2a64[BPF_REG_6];
const u8 r7 = bpf2a64[BPF_REG_7];
const u8 r8 = bpf2a64[BPF_REG_8];
const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 tcc = bpf2a64[TCALL_CNT];
+ const int idx0 = ctx->idx;
+ int cur_offset;
/*
* BPF prog stack layout
@@ -162,8 +171,6 @@ static void build_prologue(struct jit_ctx *ctx)
* |FP/LR|
* current A64_FP => -16:+-----+
* | ... | callee saved registers
- * +-----+
- * | | x25/x26
* BPF fp register => -64:+-----+ <= (BPF_FP)
* | |
* | ... | BPF prog stack
@@ -183,18 +190,90 @@ static void build_prologue(struct jit_ctx *ctx)
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
emit(A64_MOV(1, A64_FP, A64_SP), ctx);
- /* Save callee-saved register */
+ /* Save callee-saved registers */
emit(A64_PUSH(r6, r7, A64_SP), ctx);
emit(A64_PUSH(r8, r9, A64_SP), ctx);
+ emit(A64_PUSH(fp, tcc, A64_SP), ctx);
- /* Save fp (x25) and x26. SP requires 16 bytes alignment */
- emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
-
- /* Set up BPF prog stack base register (x25) */
+ /* Set up BPF prog stack base register */
emit(A64_MOV(1, fp, A64_SP), ctx);
+ /* Initialize tail_call_cnt */
+ emit(A64_MOVZ(1, tcc, 0, 0), ctx);
+
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
+
+ cur_offset = ctx->idx - idx0;
+ if (cur_offset != PROLOGUE_OFFSET) {
+ pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
+ cur_offset, PROLOGUE_OFFSET);
+ return -1;
+ }
+ return 0;
+}
+
+static int out_offset = -1; /* initialized on the first pass of build_body() */
+static int emit_bpf_tail_call(struct jit_ctx *ctx)
+{
+ /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
+ const u8 r2 = bpf2a64[BPF_REG_2];
+ const u8 r3 = bpf2a64[BPF_REG_3];
+
+ const u8 tmp = bpf2a64[TMP_REG_1];
+ const u8 prg = bpf2a64[TMP_REG_2];
+ const u8 tcc = bpf2a64[TCALL_CNT];
+ const int idx0 = ctx->idx;
+#define cur_offset (ctx->idx - idx0)
+#define jmp_offset (out_offset - (cur_offset))
+ size_t off;
+
+ /* if (index >= array->map.max_entries)
+ * goto out;
+ */
+ off = offsetof(struct bpf_array, map.max_entries);
+ emit_a64_mov_i64(tmp, off, ctx);
+ emit(A64_LDR32(tmp, r2, tmp), ctx);
+ emit(A64_CMP(0, r3, tmp), ctx);
+ emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
+
+ /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * goto out;
+ * tail_call_cnt++;
+ */
+ emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
+ emit(A64_CMP(1, tcc, tmp), ctx);
+ emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
+ emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
+
+ /* prog = array->ptrs[index];
+ * if (prog == NULL)
+ * goto out;
+ */
+ off = offsetof(struct bpf_array, ptrs);
+ emit_a64_mov_i64(tmp, off, ctx);
+ emit(A64_LDR64(tmp, r2, tmp), ctx);
+ emit(A64_LDR64(prg, tmp, r3), ctx);
+ emit(A64_CBZ(1, prg, jmp_offset), ctx);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+ off = offsetof(struct bpf_prog, bpf_func);
+ emit_a64_mov_i64(tmp, off, ctx);
+ emit(A64_LDR64(tmp, prg, tmp), ctx);
+ emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
+ emit(A64_BR(tmp), ctx);
+
+ /* out: */
+ if (out_offset == -1)
+ out_offset = cur_offset;
+ if (cur_offset != out_offset) {
+ pr_err_once("tail_call out_offset = %d, expected %d!\n",
+ cur_offset, out_offset);
+ return -1;
+ }
+ return 0;
+#undef cur_offset
+#undef jmp_offset
}
static void build_epilogue(struct jit_ctx *ctx)
@@ -499,13 +578,15 @@ emit_cond_jmp:
const u64 func = (u64)__bpf_call_base + imm;
emit_a64_mov_i64(tmp, func, ctx);
- emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
- emit(A64_MOV(1, A64_FP, A64_SP), ctx);
emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
- emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
break;
}
+ /* tail call */
+ case BPF_JMP | BPF_CALL | BPF_X:
+ if (emit_bpf_tail_call(ctx))
+ return -EFAULT;
+ break;
/* function return */
case BPF_JMP | BPF_EXIT:
/* Optimization: when last instruction is EXIT,
@@ -650,11 +731,8 @@ emit_cond_jmp:
emit_a64_mov_i64(r3, size, ctx);
emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
- emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
- emit(A64_MOV(1, A64_FP, A64_SP), ctx);
emit(A64_BLR(r5), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
- emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
jmp_offset = epilogue_offset(ctx);
check_imm19(jmp_offset);
@@ -780,7 +858,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- build_prologue(&ctx);
+ if (build_prologue(&ctx)) {
+ prog = orig_prog;
+ goto out_off;
+ }
ctx.epilogue_offset = ctx.idx;
build_epilogue(&ctx);
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile
index 74a8d87e542b..8ff8aa9c6228 100644
--- a/arch/arm64/xen/Makefile
+++ b/arch/arm64/xen/Makefile
@@ -1,2 +1,3 @@
xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
obj-y := xen-arm.o hypercall.o
+obj-$(CONFIG_XEN_EFI) += $(addprefix ../../arm/xen/, efi.o)
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 70df80e8da2c..329c8027b0a9 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -82,6 +82,7 @@ HYPERCALL3(vcpu_op);
HYPERCALL1(tmem_op);
HYPERCALL1(platform_op_raw);
HYPERCALL2(multicall);
+HYPERCALL2(vm_assist);
ENTRY(privcmd_call)
mov x16, x0
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index d74fd8ce980a..3d5ce38a6f0b 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -41,21 +41,49 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
+#define ATOMIC_FETCH_OP(op, asm_op, asm_con) \
+static inline int __atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int result, val; \
+ \
+ asm volatile( \
+ "/* atomic_fetch_" #op " */\n" \
+ "1: ssrf 5\n" \
+ " ld.w %0, %3\n" \
+ " mov %1, %0\n" \
+ " " #asm_op " %1, %4\n" \
+ " stcond %2, %1\n" \
+ " brne 1b" \
+ : "=&r" (result), "=&r" (val), "=o" (v->counter) \
+ : "m" (v->counter), #asm_con (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
ATOMIC_OP_RETURN(sub, sub, rKs21)
ATOMIC_OP_RETURN(add, add, r)
+ATOMIC_FETCH_OP (sub, sub, rKs21)
+ATOMIC_FETCH_OP (add, add, r)
-#define ATOMIC_OP(op, asm_op) \
+#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP_RETURN(op, asm_op, r) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
(void)__atomic_##op##_return(i, v); \
+} \
+ATOMIC_FETCH_OP(op, asm_op, r) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ return __atomic_fetch_##op(i, v); \
}
-ATOMIC_OP(and, and)
-ATOMIC_OP(or, or)
-ATOMIC_OP(xor, eor)
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, eor)
-#undef ATOMIC_OP
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
/*
@@ -87,6 +115,14 @@ static inline int atomic_add_return(int i, atomic_t *v)
return __atomic_add_return(i, v);
}
+static inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ if (IS_21BIT_CONST(i))
+ return __atomic_fetch_sub(-i, v);
+
+ return __atomic_fetch_add(i, v);
+}
+
/*
* atomic_sub_return - subtract the atomic variable
* @i: integer value to subtract
@@ -102,6 +138,14 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return __atomic_add_return(-i, v);
}
+static inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+ if (IS_21BIT_CONST(i))
+ return __atomic_fetch_sub(i, v);
+
+ return __atomic_fetch_add(-i, v);
+}
+
/*
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index 1aba19d68c5e..db039cb368be 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x)
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
+ return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page;
void *pg;
- pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg)
return NULL;
diff --git a/arch/avr32/include/uapi/asm/unistd.h b/arch/avr32/include/uapi/asm/unistd.h
index 60c0f3afc1f9..2c8a0d2b6c30 100644
--- a/arch/avr32/include/uapi/asm/unistd.h
+++ b/arch/avr32/include/uapi/asm/unistd.h
@@ -12,331 +12,333 @@
* This file contains the system call numbers.
*/
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_umask 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_chown 16
-#define __NR_lchown 17
-#define __NR_lseek 18
-#define __NR__llseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount2 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_pause 28
-#define __NR_utime 29
-#define __NR_stat 30
-#define __NR_fstat 31
-#define __NR_lstat 32
-#define __NR_access 33
-#define __NR_chroot 34
-#define __NR_sync 35
-#define __NR_fsync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_clone 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_getcwd 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_setfsuid 52
-#define __NR_setfsgid 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_setpgid 56
-#define __NR_mremap 57
-#define __NR_setresuid 58
-#define __NR_getresuid 59
-#define __NR_setreuid 60
-#define __NR_setregid 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_rt_sigaction 67
-#define __NR_rt_sigreturn 68
-#define __NR_rt_sigprocmask 69
-#define __NR_rt_sigpending 70
-#define __NR_rt_sigtimedwait 71
-#define __NR_rt_sigqueueinfo 72
-#define __NR_rt_sigsuspend 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76 /* SuS compliant getrlimit */
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_fchdir 84
-#define __NR_readlink 85
-#define __NR_pread 86
-#define __NR_pwrite 87
-#define __NR_swapon 88
-#define __NR_reboot 89
-#define __NR_mmap2 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_wait4 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_vhangup 101
-#define __NR_sigaltstack 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_swapoff 106
-#define __NR_sysinfo 107
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_umask 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_chown 16
+#define __NR_lchown 17
+#define __NR_lseek 18
+#define __NR__llseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount2 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_pause 28
+#define __NR_utime 29
+#define __NR_stat 30
+#define __NR_fstat 31
+#define __NR_lstat 32
+#define __NR_access 33
+#define __NR_chroot 34
+#define __NR_sync 35
+#define __NR_fsync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_clone 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_getcwd 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_setfsuid 52
+#define __NR_setfsgid 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_setpgid 56
+#define __NR_mremap 57
+#define __NR_setresuid 58
+#define __NR_getresuid 59
+#define __NR_setreuid 60
+#define __NR_setregid 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_rt_sigaction 67
+#define __NR_rt_sigreturn 68
+#define __NR_rt_sigprocmask 69
+#define __NR_rt_sigpending 70
+#define __NR_rt_sigtimedwait 71
+#define __NR_rt_sigqueueinfo 72
+#define __NR_rt_sigsuspend 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76 /* SuS compliant getrlimit */
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_fchdir 84
+#define __NR_readlink 85
+#define __NR_pread 86
+#define __NR_pwrite 87
+#define __NR_swapon 88
+#define __NR_reboot 89
+#define __NR_mmap2 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_wait4 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_vhangup 101
+#define __NR_sigaltstack 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_swapoff 106
+#define __NR_sysinfo 107
/* 108 was __NR_ipc for a little while */
-#define __NR_sendfile 109
-#define __NR_setdomainname 110
-#define __NR_uname 111
-#define __NR_adjtimex 112
-#define __NR_mprotect 113
-#define __NR_vfork 114
-#define __NR_init_module 115
-#define __NR_delete_module 116
-#define __NR_quotactl 117
-#define __NR_getpgid 118
-#define __NR_bdflush 119
-#define __NR_sysfs 120
-#define __NR_personality 121
-#define __NR_afs_syscall 122 /* Syscall for Andrew File System */
-#define __NR_getdents 123
-#define __NR_flock 124
-#define __NR_msync 125
-#define __NR_readv 126
-#define __NR_writev 127
-#define __NR_getsid 128
-#define __NR_fdatasync 129
-#define __NR__sysctl 130
-#define __NR_mlock 131
-#define __NR_munlock 132
-#define __NR_mlockall 133
-#define __NR_munlockall 134
-#define __NR_sched_setparam 135
-#define __NR_sched_getparam 136
-#define __NR_sched_setscheduler 137
-#define __NR_sched_getscheduler 138
-#define __NR_sched_yield 139
-#define __NR_sched_get_priority_max 140
-#define __NR_sched_get_priority_min 141
-#define __NR_sched_rr_get_interval 142
-#define __NR_nanosleep 143
-#define __NR_poll 144
-#define __NR_nfsservctl 145
-#define __NR_setresgid 146
-#define __NR_getresgid 147
+#define __NR_sendfile 109
+#define __NR_setdomainname 110
+#define __NR_uname 111
+#define __NR_adjtimex 112
+#define __NR_mprotect 113
+#define __NR_vfork 114
+#define __NR_init_module 115
+#define __NR_delete_module 116
+#define __NR_quotactl 117
+#define __NR_getpgid 118
+#define __NR_bdflush 119
+#define __NR_sysfs 120
+#define __NR_personality 121
+#define __NR_afs_syscall 122 /* Syscall for Andrew File System */
+#define __NR_getdents 123
+#define __NR_flock 124
+#define __NR_msync 125
+#define __NR_readv 126
+#define __NR_writev 127
+#define __NR_getsid 128
+#define __NR_fdatasync 129
+#define __NR__sysctl 130
+#define __NR_mlock 131
+#define __NR_munlock 132
+#define __NR_mlockall 133
+#define __NR_munlockall 134
+#define __NR_sched_setparam 135
+#define __NR_sched_getparam 136
+#define __NR_sched_setscheduler 137
+#define __NR_sched_getscheduler 138
+#define __NR_sched_yield 139
+#define __NR_sched_get_priority_max 140
+#define __NR_sched_get_priority_min 141
+#define __NR_sched_rr_get_interval 142
+#define __NR_nanosleep 143
+#define __NR_poll 144
+#define __NR_nfsservctl 145
+#define __NR_setresgid 146
+#define __NR_getresgid 147
#define __NR_prctl 148
-#define __NR_socket 149
-#define __NR_bind 150
-#define __NR_connect 151
-#define __NR_listen 152
-#define __NR_accept 153
-#define __NR_getsockname 154
-#define __NR_getpeername 155
-#define __NR_socketpair 156
-#define __NR_send 157
-#define __NR_recv 158
-#define __NR_sendto 159
-#define __NR_recvfrom 160
-#define __NR_shutdown 161
-#define __NR_setsockopt 162
-#define __NR_getsockopt 163
-#define __NR_sendmsg 164
-#define __NR_recvmsg 165
-#define __NR_truncate64 166
-#define __NR_ftruncate64 167
-#define __NR_stat64 168
-#define __NR_lstat64 169
-#define __NR_fstat64 170
-#define __NR_pivot_root 171
-#define __NR_mincore 172
-#define __NR_madvise 173
-#define __NR_getdents64 174
-#define __NR_fcntl64 175
-#define __NR_gettid 176
-#define __NR_readahead 177
-#define __NR_setxattr 178
-#define __NR_lsetxattr 179
-#define __NR_fsetxattr 180
-#define __NR_getxattr 181
-#define __NR_lgetxattr 182
-#define __NR_fgetxattr 183
-#define __NR_listxattr 184
-#define __NR_llistxattr 185
-#define __NR_flistxattr 186
-#define __NR_removexattr 187
-#define __NR_lremovexattr 188
-#define __NR_fremovexattr 189
-#define __NR_tkill 190
-#define __NR_sendfile64 191
-#define __NR_futex 192
-#define __NR_sched_setaffinity 193
-#define __NR_sched_getaffinity 194
-#define __NR_capget 195
-#define __NR_capset 196
-#define __NR_io_setup 197
-#define __NR_io_destroy 198
-#define __NR_io_getevents 199
-#define __NR_io_submit 200
-#define __NR_io_cancel 201
-#define __NR_fadvise64 202
-#define __NR_exit_group 203
-#define __NR_lookup_dcookie 204
-#define __NR_epoll_create 205
-#define __NR_epoll_ctl 206
-#define __NR_epoll_wait 207
-#define __NR_remap_file_pages 208
-#define __NR_set_tid_address 209
-#define __NR_timer_create 210
-#define __NR_timer_settime 211
-#define __NR_timer_gettime 212
-#define __NR_timer_getoverrun 213
-#define __NR_timer_delete 214
-#define __NR_clock_settime 215
-#define __NR_clock_gettime 216
-#define __NR_clock_getres 217
-#define __NR_clock_nanosleep 218
-#define __NR_statfs64 219
-#define __NR_fstatfs64 220
-#define __NR_tgkill 221
- /* 222 reserved for tux */
-#define __NR_utimes 223
-#define __NR_fadvise64_64 224
-#define __NR_cacheflush 225
-
-#define __NR_vserver 226
-#define __NR_mq_open 227
-#define __NR_mq_unlink 228
-#define __NR_mq_timedsend 229
-#define __NR_mq_timedreceive 230
-#define __NR_mq_notify 231
-#define __NR_mq_getsetattr 232
-#define __NR_kexec_load 233
-#define __NR_waitid 234
-#define __NR_add_key 235
-#define __NR_request_key 236
-#define __NR_keyctl 237
-#define __NR_ioprio_set 238
-#define __NR_ioprio_get 239
-#define __NR_inotify_init 240
-#define __NR_inotify_add_watch 241
-#define __NR_inotify_rm_watch 242
-#define __NR_openat 243
-#define __NR_mkdirat 244
-#define __NR_mknodat 245
-#define __NR_fchownat 246
-#define __NR_futimesat 247
-#define __NR_fstatat64 248
-#define __NR_unlinkat 249
-#define __NR_renameat 250
-#define __NR_linkat 251
-#define __NR_symlinkat 252
-#define __NR_readlinkat 253
-#define __NR_fchmodat 254
-#define __NR_faccessat 255
-#define __NR_pselect6 256
-#define __NR_ppoll 257
-#define __NR_unshare 258
-#define __NR_set_robust_list 259
-#define __NR_get_robust_list 260
-#define __NR_splice 261
-#define __NR_sync_file_range 262
-#define __NR_tee 263
-#define __NR_vmsplice 264
-#define __NR_epoll_pwait 265
-#define __NR_msgget 266
-#define __NR_msgsnd 267
-#define __NR_msgrcv 268
-#define __NR_msgctl 269
-#define __NR_semget 270
-#define __NR_semop 271
-#define __NR_semctl 272
-#define __NR_semtimedop 273
-#define __NR_shmat 274
-#define __NR_shmget 275
-#define __NR_shmdt 276
-#define __NR_shmctl 277
-#define __NR_utimensat 278
-#define __NR_signalfd 279
+#define __NR_socket 149
+#define __NR_bind 150
+#define __NR_connect 151
+#define __NR_listen 152
+#define __NR_accept 153
+#define __NR_getsockname 154
+#define __NR_getpeername 155
+#define __NR_socketpair 156
+#define __NR_send 157
+#define __NR_recv 158
+#define __NR_sendto 159
+#define __NR_recvfrom 160
+#define __NR_shutdown 161
+#define __NR_setsockopt 162
+#define __NR_getsockopt 163
+#define __NR_sendmsg 164
+#define __NR_recvmsg 165
+#define __NR_truncate64 166
+#define __NR_ftruncate64 167
+#define __NR_stat64 168
+#define __NR_lstat64 169
+#define __NR_fstat64 170
+#define __NR_pivot_root 171
+#define __NR_mincore 172
+#define __NR_madvise 173
+#define __NR_getdents64 174
+#define __NR_fcntl64 175
+#define __NR_gettid 176
+#define __NR_readahead 177
+#define __NR_setxattr 178
+#define __NR_lsetxattr 179
+#define __NR_fsetxattr 180
+#define __NR_getxattr 181
+#define __NR_lgetxattr 182
+#define __NR_fgetxattr 183
+#define __NR_listxattr 184
+#define __NR_llistxattr 185
+#define __NR_flistxattr 186
+#define __NR_removexattr 187
+#define __NR_lremovexattr 188
+#define __NR_fremovexattr 189
+#define __NR_tkill 190
+#define __NR_sendfile64 191
+#define __NR_futex 192
+#define __NR_sched_setaffinity 193
+#define __NR_sched_getaffinity 194
+#define __NR_capget 195
+#define __NR_capset 196
+#define __NR_io_setup 197
+#define __NR_io_destroy 198
+#define __NR_io_getevents 199
+#define __NR_io_submit 200
+#define __NR_io_cancel 201
+#define __NR_fadvise64 202
+#define __NR_exit_group 203
+#define __NR_lookup_dcookie 204
+#define __NR_epoll_create 205
+#define __NR_epoll_ctl 206
+#define __NR_epoll_wait 207
+#define __NR_remap_file_pages 208
+#define __NR_set_tid_address 209
+#define __NR_timer_create 210
+#define __NR_timer_settime 211
+#define __NR_timer_gettime 212
+#define __NR_timer_getoverrun 213
+#define __NR_timer_delete 214
+#define __NR_clock_settime 215
+#define __NR_clock_gettime 216
+#define __NR_clock_getres 217
+#define __NR_clock_nanosleep 218
+#define __NR_statfs64 219
+#define __NR_fstatfs64 220
+#define __NR_tgkill 221
+/* 222 reserved for tux */
+#define __NR_utimes 223
+#define __NR_fadvise64_64 224
+#define __NR_cacheflush 225
+#define __NR_vserver 226
+#define __NR_mq_open 227
+#define __NR_mq_unlink 228
+#define __NR_mq_timedsend 229
+#define __NR_mq_timedreceive 230
+#define __NR_mq_notify 231
+#define __NR_mq_getsetattr 232
+#define __NR_kexec_load 233
+#define __NR_waitid 234
+#define __NR_add_key 235
+#define __NR_request_key 236
+#define __NR_keyctl 237
+#define __NR_ioprio_set 238
+#define __NR_ioprio_get 239
+#define __NR_inotify_init 240
+#define __NR_inotify_add_watch 241
+#define __NR_inotify_rm_watch 242
+#define __NR_openat 243
+#define __NR_mkdirat 244
+#define __NR_mknodat 245
+#define __NR_fchownat 246
+#define __NR_futimesat 247
+#define __NR_fstatat64 248
+#define __NR_unlinkat 249
+#define __NR_renameat 250
+#define __NR_linkat 251
+#define __NR_symlinkat 252
+#define __NR_readlinkat 253
+#define __NR_fchmodat 254
+#define __NR_faccessat 255
+#define __NR_pselect6 256
+#define __NR_ppoll 257
+#define __NR_unshare 258
+#define __NR_set_robust_list 259
+#define __NR_get_robust_list 260
+#define __NR_splice 261
+#define __NR_sync_file_range 262
+#define __NR_tee 263
+#define __NR_vmsplice 264
+#define __NR_epoll_pwait 265
+#define __NR_msgget 266
+#define __NR_msgsnd 267
+#define __NR_msgrcv 268
+#define __NR_msgctl 269
+#define __NR_semget 270
+#define __NR_semop 271
+#define __NR_semctl 272
+#define __NR_semtimedop 273
+#define __NR_shmat 274
+#define __NR_shmget 275
+#define __NR_shmdt 276
+#define __NR_shmctl 277
+#define __NR_utimensat 278
+#define __NR_signalfd 279
/* 280 was __NR_timerfd */
-#define __NR_eventfd 281
-#define __NR_setns 283
-#define __NR_pread64 284
-#define __NR_pwrite64 285
-#define __NR_timerfd_create 286
-#define __NR_fallocate 287
-#define __NR_timerfd_settime 288
-#define __NR_timerfd_gettime 289
-#define __NR_signalfd4 290
-#define __NR_eventfd2 291
-#define __NR_epoll_create1 292
-#define __NR_dup3 293
-#define __NR_pipe2 294
-#define __NR_inotify_init1 295
-#define __NR_preadv 296
-#define __NR_pwritev 297
-#define __NR_rt_tgsigqueueinfo 298
-#define __NR_perf_event_open 299
-#define __NR_recvmmsg 300
-#define __NR_fanotify_init 301
-#define __NR_fanotify_mark 302
-#define __NR_prlimit64 303
-#define __NR_name_to_handle_at 304
-#define __NR_open_by_handle_at 305
-#define __NR_clock_adjtime 306
-#define __NR_syncfs 307
-#define __NR_sendmmsg 308
-#define __NR_process_vm_readv 309
-#define __NR_process_vm_writev 310
-#define __NR_kcmp 311
-#define __NR_finit_module 312
-#define __NR_sched_setattr 313
-#define __NR_sched_getattr 314
-#define __NR_renameat2 315
-#define __NR_seccomp 316
-#define __NR_getrandom 317
-#define __NR_memfd_create 318
-#define __NR_bpf 319
-#define __NR_execveat 320
-#define __NR_accept4 321
-#define __NR_userfaultfd 322
-#define __NR_membarrier 323
-#define __NR_mlock2 324
+#define __NR_eventfd 281
+/* 282 was half-implemented __NR_recvmmsg */
+#define __NR_setns 283
+#define __NR_pread64 284
+#define __NR_pwrite64 285
+#define __NR_timerfd_create 286
+#define __NR_fallocate 287
+#define __NR_timerfd_settime 288
+#define __NR_timerfd_gettime 289
+#define __NR_signalfd4 290
+#define __NR_eventfd2 291
+#define __NR_epoll_create1 292
+#define __NR_dup3 293
+#define __NR_pipe2 294
+#define __NR_inotify_init1 295
+#define __NR_preadv 296
+#define __NR_pwritev 297
+#define __NR_rt_tgsigqueueinfo 298
+#define __NR_perf_event_open 299
+#define __NR_recvmmsg 300
+#define __NR_fanotify_init 301
+#define __NR_fanotify_mark 302
+#define __NR_prlimit64 303
+#define __NR_name_to_handle_at 304
+#define __NR_open_by_handle_at 305
+#define __NR_clock_adjtime 306
+#define __NR_syncfs 307
+#define __NR_sendmmsg 308
+#define __NR_process_vm_readv 309
+#define __NR_process_vm_writev 310
+#define __NR_kcmp 311
+#define __NR_finit_module 312
+#define __NR_sched_setattr 313
+#define __NR_sched_getattr 314
+#define __NR_renameat2 315
+#define __NR_seccomp 316
+#define __NR_getrandom 317
+#define __NR_memfd_create 318
+#define __NR_bpf 319
+#define __NR_execveat 320
+#define __NR_accept4 321
+#define __NR_userfaultfd 322
+#define __NR_membarrier 323
+#define __NR_mlock2 324
#define __NR_copy_file_range 325
+#define __NR_preadv2 326
+#define __NR_pwritev2 327
#endif /* _UAPI__ASM_AVR32_UNISTD_H */
diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
index cb3991552f14..cb256534ed92 100644
--- a/arch/avr32/kernel/syscall-stubs.S
+++ b/arch/avr32/kernel/syscall-stubs.S
@@ -133,3 +133,21 @@ __sys_copy_file_range:
call sys_copy_file_range
sub sp, -4
popm pc
+
+ .global __sys_preadv2
+ .type __sys_preadv2,@function
+__sys_preadv2:
+ pushm lr
+ st.w --sp, ARG6
+ call sys_preadv2
+ sub sp, -4
+ popm pc
+
+ .global __sys_pwritev2
+ .type __sys_pwritev2,@function
+__sys_pwritev2:
+ pushm lr
+ st.w --sp, ARG6
+ call sys_pwritev2
+ sub sp, -4
+ popm pc
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index 64d71a781fa8..7b348ba70e41 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -9,334 +9,336 @@
*/
.section .rodata,"a",@progbits
- .type sys_call_table,@object
- .global sys_call_table
- .align 2
+ .type sys_call_table,@object
+ .global sys_call_table
+ .align 2
sys_call_table:
- .long sys_restart_syscall
- .long sys_exit
- .long sys_fork
- .long sys_read
- .long sys_write
- .long sys_open /* 5 */
- .long sys_close
- .long sys_umask
- .long sys_creat
- .long sys_link
- .long sys_unlink /* 10 */
- .long sys_execve
- .long sys_chdir
- .long sys_time
- .long sys_mknod
- .long sys_chmod /* 15 */
- .long sys_chown
- .long sys_lchown
- .long sys_lseek
- .long sys_llseek
- .long sys_getpid /* 20 */
- .long sys_mount
- .long sys_umount
- .long sys_setuid
- .long sys_getuid
- .long sys_stime /* 25 */
- .long sys_ptrace
- .long sys_alarm
- .long sys_pause
- .long sys_utime
- .long sys_newstat /* 30 */
- .long sys_newfstat
- .long sys_newlstat
- .long sys_access
- .long sys_chroot
- .long sys_sync /* 35 */
- .long sys_fsync
- .long sys_kill
- .long sys_rename
- .long sys_mkdir
- .long sys_rmdir /* 40 */
- .long sys_dup
- .long sys_pipe
- .long sys_times
- .long sys_clone
- .long sys_brk /* 45 */
- .long sys_setgid
- .long sys_getgid
- .long sys_getcwd
- .long sys_geteuid
- .long sys_getegid /* 50 */
- .long sys_acct
- .long sys_setfsuid
- .long sys_setfsgid
- .long sys_ioctl
- .long sys_fcntl /* 55 */
- .long sys_setpgid
- .long sys_mremap
- .long sys_setresuid
- .long sys_getresuid
- .long sys_setreuid /* 60 */
- .long sys_setregid
- .long sys_ustat
- .long sys_dup2
- .long sys_getppid
- .long sys_getpgrp /* 65 */
- .long sys_setsid
- .long sys_rt_sigaction
- .long __sys_rt_sigreturn
- .long sys_rt_sigprocmask
- .long sys_rt_sigpending /* 70 */
- .long sys_rt_sigtimedwait
- .long sys_rt_sigqueueinfo
- .long __sys_rt_sigsuspend
- .long sys_sethostname
- .long sys_setrlimit /* 75 */
- .long sys_getrlimit
- .long sys_getrusage
- .long sys_gettimeofday
- .long sys_settimeofday
- .long sys_getgroups /* 80 */
- .long sys_setgroups
- .long sys_select
- .long sys_symlink
- .long sys_fchdir
- .long sys_readlink /* 85 */
- .long sys_pread64
- .long sys_pwrite64
- .long sys_swapon
- .long sys_reboot
- .long __sys_mmap2 /* 90 */
- .long sys_munmap
- .long sys_truncate
- .long sys_ftruncate
- .long sys_fchmod
- .long sys_fchown /* 95 */
- .long sys_getpriority
- .long sys_setpriority
- .long sys_wait4
- .long sys_statfs
- .long sys_fstatfs /* 100 */
- .long sys_vhangup
- .long sys_sigaltstack
- .long sys_syslog
- .long sys_setitimer
- .long sys_getitimer /* 105 */
- .long sys_swapoff
- .long sys_sysinfo
- .long sys_ni_syscall /* was sys_ipc briefly */
- .long sys_sendfile
- .long sys_setdomainname /* 110 */
- .long sys_newuname
- .long sys_adjtimex
- .long sys_mprotect
- .long sys_vfork
- .long sys_init_module /* 115 */
- .long sys_delete_module
- .long sys_quotactl
- .long sys_getpgid
- .long sys_bdflush
- .long sys_sysfs /* 120 */
- .long sys_personality
- .long sys_ni_syscall /* reserved for afs_syscall */
- .long sys_getdents
- .long sys_flock
- .long sys_msync /* 125 */
- .long sys_readv
- .long sys_writev
- .long sys_getsid
- .long sys_fdatasync
- .long sys_sysctl /* 130 */
- .long sys_mlock
- .long sys_munlock
- .long sys_mlockall
- .long sys_munlockall
- .long sys_sched_setparam /* 135 */
- .long sys_sched_getparam
- .long sys_sched_setscheduler
- .long sys_sched_getscheduler
- .long sys_sched_yield
- .long sys_sched_get_priority_max /* 140 */
- .long sys_sched_get_priority_min
- .long sys_sched_rr_get_interval
- .long sys_nanosleep
- .long sys_poll
- .long sys_ni_syscall /* 145 was nfsservctl */
- .long sys_setresgid
- .long sys_getresgid
- .long sys_prctl
- .long sys_socket
- .long sys_bind /* 150 */
- .long sys_connect
- .long sys_listen
- .long sys_accept
- .long sys_getsockname
- .long sys_getpeername /* 155 */
- .long sys_socketpair
- .long sys_send
- .long sys_recv
- .long __sys_sendto
- .long __sys_recvfrom /* 160 */
- .long sys_shutdown
- .long sys_setsockopt
- .long sys_getsockopt
- .long sys_sendmsg
- .long sys_recvmsg /* 165 */
- .long sys_truncate64
- .long sys_ftruncate64
- .long sys_stat64
- .long sys_lstat64
- .long sys_fstat64 /* 170 */
- .long sys_pivot_root
- .long sys_mincore
- .long sys_madvise
- .long sys_getdents64
- .long sys_fcntl64 /* 175 */
- .long sys_gettid
- .long sys_readahead
- .long sys_setxattr
- .long sys_lsetxattr
- .long sys_fsetxattr /* 180 */
- .long sys_getxattr
- .long sys_lgetxattr
- .long sys_fgetxattr
- .long sys_listxattr
- .long sys_llistxattr /* 185 */
- .long sys_flistxattr
- .long sys_removexattr
- .long sys_lremovexattr
- .long sys_fremovexattr
- .long sys_tkill /* 190 */
- .long sys_sendfile64
- .long sys_futex
- .long sys_sched_setaffinity
- .long sys_sched_getaffinity
- .long sys_capget /* 195 */
- .long sys_capset
- .long sys_io_setup
- .long sys_io_destroy
- .long sys_io_getevents
- .long sys_io_submit /* 200 */
- .long sys_io_cancel
- .long sys_fadvise64
- .long sys_exit_group
- .long sys_lookup_dcookie
- .long sys_epoll_create /* 205 */
- .long sys_epoll_ctl
- .long sys_epoll_wait
- .long sys_remap_file_pages
- .long sys_set_tid_address
- .long sys_timer_create /* 210 */
- .long sys_timer_settime
- .long sys_timer_gettime
- .long sys_timer_getoverrun
- .long sys_timer_delete
- .long sys_clock_settime /* 215 */
- .long sys_clock_gettime
- .long sys_clock_getres
- .long sys_clock_nanosleep
- .long sys_statfs64
- .long sys_fstatfs64 /* 220 */
- .long sys_tgkill
- .long sys_ni_syscall /* reserved for TUX */
- .long sys_utimes
- .long sys_fadvise64_64
- .long sys_cacheflush /* 225 */
- .long sys_ni_syscall /* sys_vserver */
- .long sys_mq_open
- .long sys_mq_unlink
- .long sys_mq_timedsend
- .long sys_mq_timedreceive /* 230 */
- .long sys_mq_notify
- .long sys_mq_getsetattr
- .long sys_kexec_load
- .long sys_waitid
- .long sys_add_key /* 235 */
- .long sys_request_key
- .long sys_keyctl
- .long sys_ioprio_set
- .long sys_ioprio_get
- .long sys_inotify_init /* 240 */
- .long sys_inotify_add_watch
- .long sys_inotify_rm_watch
- .long sys_openat
- .long sys_mkdirat
- .long sys_mknodat /* 245 */
- .long sys_fchownat
- .long sys_futimesat
- .long sys_fstatat64
- .long sys_unlinkat
- .long sys_renameat /* 250 */
- .long sys_linkat
- .long sys_symlinkat
- .long sys_readlinkat
- .long sys_fchmodat
- .long sys_faccessat /* 255 */
- .long __sys_pselect6
- .long sys_ppoll
- .long sys_unshare
- .long sys_set_robust_list
- .long sys_get_robust_list /* 260 */
- .long __sys_splice
- .long __sys_sync_file_range
- .long sys_tee
- .long sys_vmsplice
- .long __sys_epoll_pwait /* 265 */
- .long sys_msgget
- .long sys_msgsnd
- .long sys_msgrcv
- .long sys_msgctl
- .long sys_semget /* 270 */
- .long sys_semop
- .long sys_semctl
- .long sys_semtimedop
- .long sys_shmat
- .long sys_shmget /* 275 */
- .long sys_shmdt
- .long sys_shmctl
- .long sys_utimensat
- .long sys_signalfd
- .long sys_ni_syscall /* 280, was sys_timerfd */
- .long sys_eventfd
- .long sys_recvmmsg
- .long sys_setns
- .long sys_pread64
- .long sys_pwrite64 /* 285 */
- .long sys_timerfd_create
- .long __sys_fallocate
- .long sys_timerfd_settime
- .long sys_timerfd_gettime
- .long sys_signalfd4 /* 290 */
- .long sys_eventfd2
- .long sys_epoll_create1
- .long sys_dup3
- .long sys_pipe2
- .long sys_inotify_init1 /* 295 */
- .long sys_preadv
- .long sys_pwritev
- .long sys_rt_tgsigqueueinfo
- .long sys_perf_event_open
- .long sys_recvmmsg /* 300 */
- .long sys_fanotify_init
- .long __sys_fanotify_mark
- .long sys_prlimit64
- .long sys_name_to_handle_at
- .long sys_open_by_handle_at /* 305 */
- .long sys_clock_adjtime
- .long sys_syncfs
- .long sys_sendmmsg
- .long __sys_process_vm_readv
- .long __sys_process_vm_writev /* 310 */
- .long sys_kcmp
- .long sys_finit_module
- .long sys_sched_setattr
- .long sys_sched_getattr
- .long sys_renameat2 /* 315 */
- .long sys_seccomp
- .long sys_getrandom
- .long sys_memfd_create
- .long sys_bpf
- .long sys_execveat /* 320 */
- .long sys_accept4
- .long sys_userfaultfd
- .long sys_membarrier
- .long sys_mlock2
- .long __sys_copy_file_range /* 325 */
- .long sys_ni_syscall /* r8 is saturated at nr_syscalls */
+ .long sys_restart_syscall
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open
+ .long sys_close
+ .long sys_umask
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod
+ .long sys_chown
+ .long sys_lchown
+ .long sys_lseek
+ .long sys_llseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_umount
+ .long sys_setuid
+ .long sys_getuid
+ .long sys_stime
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_pause
+ .long sys_utime
+ .long sys_newstat /* 30 */
+ .long sys_newfstat
+ .long sys_newlstat
+ .long sys_access
+ .long sys_chroot
+ .long sys_sync
+ .long sys_fsync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_clone
+ .long sys_brk
+ .long sys_setgid
+ .long sys_getgid
+ .long sys_getcwd
+ .long sys_geteuid
+ .long sys_getegid /* 50 */
+ .long sys_acct
+ .long sys_setfsuid
+ .long sys_setfsgid
+ .long sys_ioctl
+ .long sys_fcntl
+ .long sys_setpgid
+ .long sys_mremap
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setreuid /* 60 */
+ .long sys_setregid
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp
+ .long sys_setsid
+ .long sys_rt_sigaction
+ .long __sys_rt_sigreturn
+ .long sys_rt_sigprocmask
+ .long sys_rt_sigpending /* 70 */
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long __sys_rt_sigsuspend
+ .long sys_sethostname
+ .long sys_setrlimit
+ .long sys_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups /* 80 */
+ .long sys_setgroups
+ .long sys_select
+ .long sys_symlink
+ .long sys_fchdir
+ .long sys_readlink
+ .long sys_pread64
+ .long sys_pwrite64
+ .long sys_swapon
+ .long sys_reboot
+ .long __sys_mmap2 /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_wait4
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_vhangup
+ .long sys_sigaltstack
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer
+ .long sys_swapoff
+ .long sys_sysinfo
+ .long sys_ni_syscall /* was sys_ipc briefly */
+ .long sys_sendfile
+ .long sys_setdomainname /* 110 */
+ .long sys_newuname
+ .long sys_adjtimex
+ .long sys_mprotect
+ .long sys_vfork
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_bdflush
+ .long sys_sysfs /* 120 */
+ .long sys_personality
+ .long sys_ni_syscall /* reserved for afs_syscall */
+ .long sys_getdents
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl /* 130 */
+ .long sys_mlock
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max /* 140 */
+ .long sys_sched_get_priority_min
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_poll
+ .long sys_ni_syscall /* 145 was nfsservctl */
+ .long sys_setresgid
+ .long sys_getresgid
+ .long sys_prctl
+ .long sys_socket
+ .long sys_bind /* 150 */
+ .long sys_connect
+ .long sys_listen
+ .long sys_accept
+ .long sys_getsockname
+ .long sys_getpeername
+ .long sys_socketpair
+ .long sys_send
+ .long sys_recv
+ .long __sys_sendto
+ .long __sys_recvfrom /* 160 */
+ .long sys_shutdown
+ .long sys_setsockopt
+ .long sys_getsockopt
+ .long sys_sendmsg
+ .long sys_recvmsg
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64
+ .long sys_lstat64
+ .long sys_fstat64 /* 170 */
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_getdents64
+ .long sys_fcntl64
+ .long sys_gettid
+ .long sys_readahead
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr /* 180 */
+ .long sys_getxattr
+ .long sys_lgetxattr
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill /* 190 */
+ .long sys_sendfile64
+ .long sys_futex
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_capget
+ .long sys_capset
+ .long sys_io_setup
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit /* 200 */
+ .long sys_io_cancel
+ .long sys_fadvise64
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create /* 210 */
+ .long sys_timer_settime
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64 /* 220 */
+ .long sys_tgkill
+ .long sys_ni_syscall /* reserved for TUX */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_cacheflush
+ .long sys_ni_syscall /* sys_vserver */
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive /* 230 */
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+ .long sys_kexec_load
+ .long sys_waitid
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get
+ .long sys_inotify_init /* 240 */
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
+ .long sys_openat
+ .long sys_mkdirat
+ .long sys_mknodat
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64
+ .long sys_unlinkat
+ .long sys_renameat /* 250 */
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat
+ .long sys_fchmodat
+ .long sys_faccessat
+ .long __sys_pselect6
+ .long sys_ppoll
+ .long sys_unshare
+ .long sys_set_robust_list
+ .long sys_get_robust_list /* 260 */
+ .long __sys_splice
+ .long __sys_sync_file_range
+ .long sys_tee
+ .long sys_vmsplice
+ .long __sys_epoll_pwait
+ .long sys_msgget
+ .long sys_msgsnd
+ .long sys_msgrcv
+ .long sys_msgctl
+ .long sys_semget /* 270 */
+ .long sys_semop
+ .long sys_semctl
+ .long sys_semtimedop
+ .long sys_shmat
+ .long sys_shmget
+ .long sys_shmdt
+ .long sys_shmctl
+ .long sys_utimensat
+ .long sys_signalfd
+ .long sys_ni_syscall /* 280, was sys_timerfd */
+ .long sys_eventfd
+ .long sys_ni_syscall /* 282, was half-implemented recvmmsg */
+ .long sys_setns
+ .long sys_pread64
+ .long sys_pwrite64
+ .long sys_timerfd_create
+ .long __sys_fallocate
+ .long sys_timerfd_settime
+ .long sys_timerfd_gettime
+ .long sys_signalfd4 /* 290 */
+ .long sys_eventfd2
+ .long sys_epoll_create1
+ .long sys_dup3
+ .long sys_pipe2
+ .long sys_inotify_init1
+ .long sys_preadv
+ .long sys_pwritev
+ .long sys_rt_tgsigqueueinfo
+ .long sys_perf_event_open
+ .long sys_recvmmsg /* 300 */
+ .long sys_fanotify_init
+ .long __sys_fanotify_mark
+ .long sys_prlimit64
+ .long sys_name_to_handle_at
+ .long sys_open_by_handle_at
+ .long sys_clock_adjtime
+ .long sys_syncfs
+ .long sys_sendmmsg
+ .long __sys_process_vm_readv
+ .long __sys_process_vm_writev /* 310 */
+ .long sys_kcmp
+ .long sys_finit_module
+ .long sys_sched_setattr
+ .long sys_sched_getattr
+ .long sys_renameat2
+ .long sys_seccomp
+ .long sys_getrandom
+ .long sys_memfd_create
+ .long sys_bpf
+ .long sys_execveat /* 320 */
+ .long sys_accept4
+ .long sys_userfaultfd
+ .long sys_membarrier
+ .long sys_mlock2
+ .long __sys_copy_file_range
+ .long __sys_preadv2
+ .long __sys_pwritev2
+ .long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index 83c2a0021b56..13d3fc4270b7 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -435,7 +435,7 @@ void __init at32_init_pio(struct platform_device *pdev)
struct resource *regs;
struct pio_device *pio;
- if (pdev->id > MAX_NR_PIO_DEVICES) {
+ if (pdev->id >= MAX_NR_PIO_DEVICES) {
dev_err(&pdev->dev, "only %d PIO devices supported\n",
MAX_NR_PIO_DEVICES);
return;
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index c03533937a9f..a4b7edac8f10 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -134,7 +134,7 @@ good_area:
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 1c1c42330c99..63c7deceeeb6 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -17,6 +17,7 @@
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_xadd_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
@@ -28,10 +29,17 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
+#define atomic_fetch_add(i, v) __raw_atomic_xadd_asm(&(v)->counter, i)
+#define atomic_fetch_sub(i, v) __raw_atomic_xadd_asm(&(v)->counter, -(i))
+
#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
+#define atomic_fetch_or(i, v) __raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_fetch_and(i, v) __raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_fetch_xor(i, v) __raw_atomic_xor_asm(&(v)->counter, i)
+
#endif
#include <asm-generic/atomic.h>
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 490c7caa02d9..c58f4a83ed6f 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -12,6 +12,8 @@
#else
#include <linux/atomic.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
@@ -48,8 +50,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (arch_spin_is_locked(lock))
- cpu_relax();
+ smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline int arch_read_can_lock(arch_rwlock_t *rw)
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index a401c27b69b4..68096e8f787f 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(insl_16);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__raw_atomic_add_asm);
+EXPORT_SYMBOL(__raw_atomic_xadd_asm);
EXPORT_SYMBOL(__raw_atomic_and_asm);
EXPORT_SYMBOL(__raw_atomic_or_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm);
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 170d786807c4..6355e97d22b9 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -453,29 +453,13 @@ static struct pmu pmu = {
.read = bfin_pmu_read,
};
-static void bfin_pmu_setup(int cpu)
+static int bfin_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
+ bfin_write_PFCTL(0);
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
-}
-
-static int
-bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- bfin_write_PFCTL(0);
- bfin_pmu_setup(cpu);
- break;
-
- default:
- break;
- }
-
- return NOTIFY_OK;
+ return 0;
}
static int __init bfin_pmu_init(void)
@@ -491,8 +475,8 @@ static int __init bfin_pmu_init(void)
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
if (!ret)
- perf_cpu_notifier(bfin_pmu_notifier);
-
+ cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN",
+ bfin_pmu_prepare_cpu, NULL);
return ret;
}
early_initcall(bfin_pmu_init);
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 26fccb5568b9..1e2989c5d6b2 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -607,6 +607,28 @@ ENDPROC(___raw_atomic_add_asm)
/*
* r0 = ptr
+ * r1 = value
+ *
+ * ADD a signed value to a 32bit word and return the old value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_atomic_xadd_asm)
+ p1 = r0;
+ r3 = r1;
+ [--sp] = rets;
+ call _get_core_lock;
+ r3 = [p1];
+ r2 = r3 + r2;
+ [p1] = r2;
+ r1 = p1;
+ call _put_core_lock;
+ r0 = r3;
+ rets = [sp++];
+ rts;
+ENDPROC(___raw_atomic_add_asm)
+
+/*
+ * r0 = ptr
* r1 = mask
*
* AND the mask bits from a 32bit word and return the old 32bit value
@@ -618,10 +640,9 @@ ENTRY(___raw_atomic_and_asm)
r3 = r1;
[--sp] = rets;
call _get_core_lock;
- r2 = [p1];
- r3 = r2 & r3;
- [p1] = r3;
- r3 = r2;
+ r3 = [p1];
+ r2 = r2 & r3;
+ [p1] = r2;
r1 = p1;
call _put_core_lock;
r0 = r3;
@@ -642,10 +663,9 @@ ENTRY(___raw_atomic_or_asm)
r3 = r1;
[--sp] = rets;
call _get_core_lock;
- r2 = [p1];
- r3 = r2 | r3;
- [p1] = r3;
- r3 = r2;
+ r3 = [p1];
+ r2 = r2 | r3;
+ [p1] = r2;
r1 = p1;
call _put_core_lock;
r0 = r3;
@@ -666,10 +686,9 @@ ENTRY(___raw_atomic_xor_asm)
r3 = r1;
[--sp] = rets;
call _get_core_lock;
- r2 = [p1];
- r3 = r2 ^ r3;
- [p1] = r3;
- r3 = r2;
+ r3 = [p1];
+ r2 = r2 ^ r3;
+ [p1] = r2;
r1 = p1;
call _put_core_lock;
r0 = r3;
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index aad5d7416886..9231e5a72b93 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -1002,14 +1002,12 @@ static struct adv7842_output_format adv7842_opf[] = {
{
.op_ch_sel = ADV7842_OP_CH_SEL_BRG,
.op_format_sel = ADV7842_OP_FORMAT_SEL_SDR_ITU656_8,
- .op_656_range = 1,
.blank_data = 1,
.insert_av_codes = 1,
},
{
.op_ch_sel = ADV7842_OP_CH_SEL_RGB,
.op_format_sel = ADV7842_OP_FORMAT_SEL_SDR_ITU656_16,
- .op_656_range = 1,
.blank_data = 1,
},
};
diff --git a/arch/c6x/platforms/Makefile b/arch/c6x/platforms/Makefile
index 9a95b9bca8d0..5f7d93468b6e 100644
--- a/arch/c6x/platforms/Makefile
+++ b/arch/c6x/platforms/Makefile
@@ -4,7 +4,7 @@
# Copyright 2010, 2011 Texas Instruments Incorporated
#
-obj-y = platform.o cache.o megamod-pic.o pll.o plldata.o timer64.o
+obj-y = cache.o megamod-pic.o pll.o plldata.o timer64.o
obj-y += dscr.o
# SoC objects
diff --git a/arch/c6x/platforms/platform.c b/arch/c6x/platforms/platform.c
deleted file mode 100644
index 26c1a355d600..000000000000
--- a/arch/c6x/platforms/platform.c
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright 2011 Texas Instruments Incorporated
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-
-static int __init c6x_device_probe(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-core_initcall(c6x_device_probe);
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h
index 235ece437ddd..42f1affb9c2d 100644
--- a/arch/cris/include/asm/pgalloc.h
+++ b/arch/cris/include/asm/pgalloc.h
@@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
diff --git a/arch/cris/kernel/setup.c b/arch/cris/kernel/setup.c
index bb12aa93201d..4b4853d914e2 100644
--- a/arch/cris/kernel/setup.c
+++ b/arch/cris/kernel/setup.c
@@ -21,7 +21,6 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/setup.h>
#include <arch/system.h>
@@ -212,10 +211,3 @@ static int __init topology_init(void)
}
subsys_initcall(topology_init);
-
-static int __init cris_of_init(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-core_initcall(cris_of_init);
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 3066d40a6db1..112ef26c7f2e 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -168,7 +168,7 @@ retry:
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 64f02d451aa8..1c2a5e264fc7 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -60,16 +60,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
return atomic_add_return(i, v) < 0;
}
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
-
static inline void atomic_inc(atomic_t *v)
{
atomic_inc_return(v);
@@ -136,16 +126,6 @@ static inline long long atomic64_add_negative(long long i, atomic64_t *v)
return atomic64_add_return(i, v) < 0;
}
-static inline void atomic64_add(long long i, atomic64_t *v)
-{
- atomic64_add_return(i, v);
-}
-
-static inline void atomic64_sub(long long i, atomic64_t *v)
-{
- atomic64_sub_return(i, v);
-}
-
static inline void atomic64_inc(atomic64_t *v)
{
atomic64_inc_return(v);
@@ -182,11 +162,19 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
}
#define ATOMIC_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ return __atomic32_fetch_##op(i, &v->counter); \
+} \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
(void)__atomic32_fetch_##op(i, &v->counter); \
} \
\
+static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
+{ \
+ return __atomic64_fetch_##op(i, &v->counter); \
+} \
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
(void)__atomic64_fetch_##op(i, &v->counter); \
@@ -195,6 +183,8 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
ATOMIC_OP(or)
ATOMIC_OP(and)
ATOMIC_OP(xor)
+ATOMIC_OP(add)
+ATOMIC_OP(sub)
#undef ATOMIC_OP
diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h
index 36e126d2f801..d4912c88b829 100644
--- a/arch/frv/include/asm/atomic_defs.h
+++ b/arch/frv/include/asm/atomic_defs.h
@@ -162,6 +162,8 @@ ATOMIC_EXPORT(__atomic64_fetch_##op);
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(xor)
+ATOMIC_FETCH_OP(add)
+ATOMIC_FETCH_OP(sub)
ATOMIC_OP_RETURN(add)
ATOMIC_OP_RETURN(sub)
diff --git a/arch/frv/include/asm/serial.h b/arch/frv/include/asm/serial.h
index bce0d0d07e60..614c6d76789a 100644
--- a/arch/frv/include/asm/serial.h
+++ b/arch/frv/include/asm/serial.h
@@ -12,7 +12,3 @@
* the base baud is derived from the clock speed and so is variable
*/
#define BASE_BAUD 0
-
-#define STD_COM_FLAGS UPF_BOOT_AUTOCONF
-
-#define SERIAL_PORT_DFNS
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 61d99767fe16..614a46c413d2 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -164,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, ear0, flags);
+ fault = handle_mm_fault(vma, ear0, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index 41907d25ed38..c9ed14f6c67d 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
return pte;
@@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *page;
#ifdef CONFIG_HIGHPTE
- page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+ page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
- page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ page = alloc_pages(GFP_KERNEL, 0);
#endif
if (!page)
return NULL;
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 4435a445ae7e..349a47a918db 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -28,6 +28,19 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return ret; \
}
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ h8300flags flags; \
+ int ret; \
+ \
+ flags = arch_local_irq_save(); \
+ ret = v->counter; \
+ v->counter c_op i; \
+ arch_local_irq_restore(flags); \
+ return ret; \
+}
+
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
@@ -41,17 +54,21 @@ static inline void atomic_##op(int i, atomic_t *v) \
ATOMIC_OP_RETURN(add, +=)
ATOMIC_OP_RETURN(sub, -=)
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or, |=)
+ATOMIC_OPS(xor, ^=)
+ATOMIC_OPS(add, +=)
+ATOMIC_OPS(sub, -=)
+#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_add(i, v) (void)atomic_add_return(i, v)
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_return(v) atomic_add_return(1, v)
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 57298e7b4867..1941e4baaee6 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -8,8 +8,7 @@ config HEXAGON
# select HAVE_REGS_AND_STACK_ACCESS_API
# select HAVE_HW_BREAKPOINT if PERF_EVENTS
# select ARCH_HAS_CPU_IDLE_WAIT
- # select ARCH_WANT_OPTIONAL_GPIOLIB
- # select ARCH_REQUIRE_GPIOLIB
+ # select GPIOLIB
# select HAVE_CLK
# select GENERIC_PENDING_IRQ if SMP
select GENERIC_ATOMIC64
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 55696c4100d4..a62ba368b27d 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -110,7 +110,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
); \
} \
-#define ATOMIC_OP_RETURN(op) \
+#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
@@ -127,16 +127,37 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return output; \
}
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int output, val; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%2);\n" \
+ " %1 = "#op "(%0,%3);\n" \
+ " memw_locked(%2,P3)=%1;\n" \
+ " if !P3 jump 1b;\n" \
+ : "=&r" (output), "=&r" (val) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+ return output; \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 77da3b0ae3c2..eeebf862c46c 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{
struct page *pte;
- pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
@@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO;
return (pte_t *) __get_free_page(flags);
}
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
index 12ca4ebc0338..a1c55788c5d6 100644
--- a/arch/hexagon/include/asm/spinlock.h
+++ b/arch/hexagon/include/asm/spinlock.h
@@ -23,6 +23,8 @@
#define _ASM_SPINLOCK_H
#include <asm/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
/*
* This file is pulled in for SMP builds.
@@ -176,8 +178,12 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
* SMP spinlocks are intended to allow only a single CPU at the lock
*/
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(lock) \
- do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->lock, !VAL);
+}
+
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 8704c9320032..bd7c251e2bce 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -101,7 +101,7 @@ good_area:
break;
}
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index f80758cb7157..6a15083cc366 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -39,13 +39,12 @@ config IA64
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_LEGACY
- select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR
- select ARCH_THREAD_INFO_ALLOCATOR
+ select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL_OLD
select SYSCTL_ARCH_UNALIGN_NO_WARN
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index aa0fdf125aba..a3d0211970e9 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -140,6 +140,9 @@ static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
}
}
}
+
+extern void acpi_numa_fixup(void);
+
#endif /* CONFIG_ACPI_NUMA */
#endif /*__KERNEL__*/
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 8dfb5f6f6c35..f565ad376142 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -42,8 +42,27 @@ ia64_atomic_##op (int i, atomic_t *v) \
return new; \
}
-ATOMIC_OP(add, +)
-ATOMIC_OP(sub, -)
+#define ATOMIC_FETCH_OP(op, c_op) \
+static __inline__ int \
+ia64_atomic_fetch_##op (int i, atomic_t *v) \
+{ \
+ __s32 old, new; \
+ CMPXCHG_BUGCHECK_DECL \
+ \
+ do { \
+ CMPXCHG_BUGCHECK(v); \
+ old = atomic_read(v); \
+ new = old c_op i; \
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
+ return old; \
+}
+
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(add, +)
+ATOMIC_OPS(sub, -)
#define atomic_add_return(i,v) \
({ \
@@ -69,14 +88,44 @@ ATOMIC_OP(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \
})
-ATOMIC_OP(and, &)
-ATOMIC_OP(or, |)
-ATOMIC_OP(xor, ^)
+#define atomic_fetch_add(i,v) \
+({ \
+ int __ia64_aar_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
+ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
+ : ia64_atomic_fetch_add(__ia64_aar_i, v); \
+})
+
+#define atomic_fetch_sub(i,v) \
+({ \
+ int __ia64_asr_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
+ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
+ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
+ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
+ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
+ : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
+})
+
+ATOMIC_FETCH_OP(and, &)
+ATOMIC_FETCH_OP(or, |)
+ATOMIC_FETCH_OP(xor, ^)
+
+#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
+#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
+#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
-#define atomic_and(i,v) (void)ia64_atomic_and(i,v)
-#define atomic_or(i,v) (void)ia64_atomic_or(i,v)
-#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v)
+#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
+#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
+#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP
#define ATOMIC64_OP(op, c_op) \
@@ -94,8 +143,27 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
return new; \
}
-ATOMIC64_OP(add, +)
-ATOMIC64_OP(sub, -)
+#define ATOMIC64_FETCH_OP(op, c_op) \
+static __inline__ long \
+ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \
+{ \
+ __s64 old, new; \
+ CMPXCHG_BUGCHECK_DECL \
+ \
+ do { \
+ CMPXCHG_BUGCHECK(v); \
+ old = atomic64_read(v); \
+ new = old c_op i; \
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
+ return old; \
+}
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(add, +)
+ATOMIC64_OPS(sub, -)
#define atomic64_add_return(i,v) \
({ \
@@ -121,14 +189,44 @@ ATOMIC64_OP(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
-ATOMIC64_OP(and, &)
-ATOMIC64_OP(or, |)
-ATOMIC64_OP(xor, ^)
+#define atomic64_fetch_add(i,v) \
+({ \
+ long __ia64_aar_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
+ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
+ : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
+})
+
+#define atomic64_fetch_sub(i,v) \
+({ \
+ long __ia64_asr_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
+ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
+ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
+ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
+ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
+ : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
+})
+
+ATOMIC64_FETCH_OP(and, &)
+ATOMIC64_FETCH_OP(or, |)
+ATOMIC64_FETCH_OP(xor, ^)
+
+#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
+#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
+#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
-#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v)
-#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v)
-#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v)
+#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
+#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
+#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
index f41e66d65e31..28cb819e0ff9 100644
--- a/arch/ia64/include/asm/mutex.h
+++ b/arch/ia64/include/asm/mutex.h
@@ -82,7 +82,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- if (cmpxchg_acq(count, 1, 0) == 1)
+ if (atomic_read(count) == 1 && cmpxchg_acq(count, 1, 0) == 1)
return 1;
return 0;
}
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index 8b23e070b844..8fa98dd303b4 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -40,7 +40,7 @@
static inline void
__down_read (struct rw_semaphore *sem)
{
- long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
+ long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
if (result < 0)
rwsem_down_read_failed(sem);
@@ -55,9 +55,9 @@ ___down_write (struct rw_semaphore *sem)
long old, new;
do {
- old = sem->count;
+ old = atomic_long_read(&sem->count);
new = old + RWSEM_ACTIVE_WRITE_BIAS;
- } while (cmpxchg_acq(&sem->count, old, new) != old);
+ } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
return old;
}
@@ -85,7 +85,7 @@ __down_write_killable (struct rw_semaphore *sem)
static inline void
__up_read (struct rw_semaphore *sem)
{
- long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
+ long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
@@ -100,9 +100,9 @@ __up_write (struct rw_semaphore *sem)
long old, new;
do {
- old = sem->count;
+ old = atomic_long_read(&sem->count);
new = old - RWSEM_ACTIVE_WRITE_BIAS;
- } while (cmpxchg_rel(&sem->count, old, new) != old);
+ } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
@@ -115,8 +115,8 @@ static inline int
__down_read_trylock (struct rw_semaphore *sem)
{
long tmp;
- while ((tmp = sem->count) >= 0) {
- if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
+ while ((tmp = atomic_long_read(&sem->count)) >= 0) {
+ if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
return 1;
}
}
@@ -129,8 +129,8 @@ __down_read_trylock (struct rw_semaphore *sem)
static inline int
__down_write_trylock (struct rw_semaphore *sem)
{
- long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
+ long tmp = atomic_long_cmpxchg_acquire(&sem->count,
+ RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
@@ -143,19 +143,12 @@ __downgrade_write (struct rw_semaphore *sem)
long old, new;
do {
- old = sem->count;
+ old = atomic_long_read(&sem->count);
new = old - RWSEM_WAITING_BIAS;
- } while (cmpxchg_rel(&sem->count, old, new) != old);
+ } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
if (old < 0)
rwsem_downgrade_wake(sem);
}
-/*
- * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
- * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
- */
-#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
-#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
-
#endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 45698cd15b7b..ca9e76149a4a 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -15,6 +15,8 @@
#include <linux/atomic.h>
#include <asm/intrinsics.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
#define arch_spin_lock_init(x) ((x)->lock = 0)
@@ -86,6 +88,8 @@ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
return;
cpu_relax();
}
+
+ smp_acquire__after_ctrl_dep();
}
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index aa995b67c3f5..d1212b84fb83 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -48,15 +48,15 @@ struct thread_info {
#ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info_node(tsk, node) \
- ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node) \
+ ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else
#define current_thread_info() ((struct thread_info *) 0)
-#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
#define task_thread_info(tsk) ((struct thread_info *) 0)
#endif
-#define free_thread_info(ti) /* nothing */
+#define free_thread_stack(ti) /* nothing */
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 39d64e0df1de..77e541cf0e5d 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -205,17 +205,18 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
* must be delayed until after the TLB has been flushed (see comments at the beginning of
* this file).
*/
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
+ if (tlb->nr == tlb->max)
+ return true;
+
tlb->need_flush = 1;
if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
tlb->pages[tlb->nr++] = page;
- VM_BUG_ON(tlb->nr > tlb->max);
-
- return tlb->max - tlb->nr;
+ return false;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -235,8 +236,28 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (!__tlb_remove_page(tlb, page))
+ if (__tlb_remove_page(tlb, page)) {
tlb_flush_mmu(tlb);
+ __tlb_remove_page(tlb, page);
+ }
+}
+
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return tlb_remove_page(tlb, page);
}
/*
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index b1698bc042c8..92b7bc956795 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -524,7 +524,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
return 0;
}
-void __init acpi_numa_arch_fixup(void)
+void __init acpi_numa_fixup(void)
{
int i, j, node_from, node_to;
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index f9efe9739d3f..0eaa89f3defd 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* handled. This is done by having a special ".data..init_task" section...
*/
#define init_thread_info init_task_mem.s.thread_info
+#define init_stack init_task_mem.stack
union {
struct {
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 2029a38a72ae..afddb3e80a29 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -552,6 +552,7 @@ setup_arch (char **cmdline_p)
early_acpi_boot_init();
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
+ acpi_numa_fixup();
# ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
# endif
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 70b40d1205a6..fa6ad95e992e 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -159,7 +159,7 @@ retry:
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/m32r/boot/compressed/m32r_sio.c b/arch/m32r/boot/compressed/m32r_sio.c
index 01d877c6868f..cf3023dced49 100644
--- a/arch/m32r/boot/compressed/m32r_sio.c
+++ b/arch/m32r/boot/compressed/m32r_sio.c
@@ -8,12 +8,13 @@
#include <asm/processor.h>
-static void putc(char c);
+static void m32r_putc(char c);
static int puts(const char *s)
{
char c;
- while ((c = *s++)) putc(c);
+ while ((c = *s++))
+ m32r_putc(c);
return 0;
}
@@ -41,7 +42,7 @@ static int puts(const char *s)
#define BOOT_SIO0TXB PLD_ESIO0TXB
#endif
-static void putc(char c)
+static void m32r_putc(char c)
{
while ((*BOOT_SIO0STS & 0x3) != 0x3)
cpu_relax();
@@ -61,7 +62,7 @@ static void putc(char c)
#define SIO0TXB (volatile unsigned short *)(0x00efd000 + 30)
#endif
-static void putc(char c)
+static void m32r_putc(char c)
{
while ((*SIO0STS & 0x1) == 0)
cpu_relax();
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index ea35160d632b..640cc1c7099f 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -89,16 +89,44 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int result, val; \
+ \
+ local_irq_save(flags); \
+ __asm__ __volatile__ ( \
+ "# atomic_fetch_" #op " \n\t" \
+ DCACHE_CLEAR("%0", "r4", "%2") \
+ M32R_LOCK" %1, @%2; \n\t" \
+ "mv %0, %1 \n\t" \
+ #op " %1, %3; \n\t" \
+ M32R_UNLOCK" %1, @%2; \n\t" \
+ : "=&r" (result), "=&r" (val) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory" \
+ __ATOMIC_CLOBBER \
+ ); \
+ local_irq_restore(flags); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index fa13694eaae3..323c7fc953cd 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -13,6 +13,8 @@
#include <linux/atomic.h>
#include <asm/dcache_clear.h>
#include <asm/page.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -27,8 +29,11 @@
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
- do { cpu_relax(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->slock, VAL > 0);
+}
/**
* arch_spin_trylock - Try spin lock and return a result
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index b727e693c805..23f26f4adfff 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -41,6 +41,9 @@ EXPORT_SYMBOL(cpu_data);
EXPORT_SYMBOL(smp_flush_tlb_page);
#endif
+extern int __ucmpdi2(unsigned long long a, unsigned long long b);
+EXPORT_SYMBOL(__ucmpdi2);
+
/* compiler generated symbol */
extern void __ashldi3(void);
extern void __ashrdi3(void);
diff --git a/arch/m32r/lib/Makefile b/arch/m32r/lib/Makefile
index d16b4e40d1ae..5889eb9610b5 100644
--- a/arch/m32r/lib/Makefile
+++ b/arch/m32r/lib/Makefile
@@ -3,5 +3,5 @@
#
lib-y := checksum.o ashxdi3.o memset.o memcpy.o \
- delay.o strlen.o usercopy.o csum_partial_copy.o
-
+ delay.o strlen.o usercopy.o csum_partial_copy.o \
+ ucmpdi2.o
diff --git a/arch/m32r/lib/libgcc.h b/arch/m32r/lib/libgcc.h
new file mode 100644
index 000000000000..267aa435bc35
--- /dev/null
+++ b/arch/m32r/lib/libgcc.h
@@ -0,0 +1,23 @@
+#ifndef __ASM_LIBGCC_H
+#define __ASM_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+ int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+ int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+ struct DWstruct s;
+ long long ll;
+} DWunion;
+
+#endif /* __ASM_LIBGCC_H */
diff --git a/arch/m32r/lib/ucmpdi2.c b/arch/m32r/lib/ucmpdi2.c
new file mode 100644
index 000000000000..9d3c682c89b5
--- /dev/null
+++ b/arch/m32r/lib/ucmpdi2.c
@@ -0,0 +1,17 @@
+#include "libgcc.h"
+
+int __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+ const DWunion au = {.ll = a};
+ const DWunion bu = {.ll = b};
+
+ if ((unsigned int)au.s.high < (unsigned int)bu.s.high)
+ return 0;
+ else if ((unsigned int)au.s.high > (unsigned int)bu.s.high)
+ return 2;
+ if ((unsigned int)au.s.low < (unsigned int)bu.s.low)
+ return 0;
+ else if ((unsigned int)au.s.low > (unsigned int)bu.s.low)
+ return 2;
+ return 1;
+}
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 8f9875b7933d..a3785d3644c2 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -196,7 +196,7 @@ good_area:
*/
addr = (address & PAGE_MASK);
set_thread_fault_code(error_code);
- fault = handle_mm_fault(mm, vma, addr, flags);
+ fault = handle_mm_fault(vma, addr, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/m68k/coldfire/head.S b/arch/m68k/coldfire/head.S
index fa31be297b85..73d92ea0ce65 100644
--- a/arch/m68k/coldfire/head.S
+++ b/arch/m68k/coldfire/head.S
@@ -288,7 +288,7 @@ _clear_bss:
#endif
/*
- * Assember start up done, start code proper.
+ * Assembler start up done, start code proper.
*/
jsr start_kernel /* start Linux kernel */
diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c
index c525e4c08f84..9abb1a441da0 100644
--- a/arch/m68k/coldfire/m5272.c
+++ b/arch/m68k/coldfire/m5272.c
@@ -111,7 +111,7 @@ void __init config_BSP(char *commandp, int size)
/***************************************************************************/
/*
- * Some 5272 based boards have the FEC ethernet diectly connected to
+ * Some 5272 based boards have the FEC ethernet directly connected to
* an ethernet switch. In this case we need to use the fixed phy type,
* and we need to declare it early in boot.
*/
diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c
index 821de928dc3f..6a640be48568 100644
--- a/arch/m68k/coldfire/pci.c
+++ b/arch/m68k/coldfire/pci.c
@@ -42,7 +42,7 @@ static unsigned long iospace;
/*
* We need to be carefull probing on bus 0 (directly connected to host
- * bridge). We should only acccess the well defined possible devices in
+ * bridge). We should only access the well defined possible devices in
* use, ignore aliases and the like.
*/
static unsigned char mcf_host_slot2sid[32] = {
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 3ee6976f6088..8f5b6f7dd136 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -359,6 +360,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -553,7 +555,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index e96787ffcbce..31bded9c83d4 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -341,6 +342,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -512,7 +514,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 083fe6beac14..0d7739e04ae2 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -350,6 +351,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -533,7 +535,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 475130c06dcb..2cbb5c465fec 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -340,6 +341,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 4339658c200f..96102a42c156 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -341,6 +342,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -514,7 +516,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 831cc8c3a2e2..97d88f7dc5a7 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -357,6 +358,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -536,7 +538,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 6377afeb522b..be25ef208f0f 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -390,6 +391,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -616,7 +618,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 4304b3d56262..a008344360c9 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -339,6 +340,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 074bda4094ff..6735a25f36d4 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -340,6 +341,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 07b9fa8d7f2e..780c6e9f6cf9 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -346,6 +347,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -527,7 +529,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 36e6fae02d45..44693cf361e5 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -337,6 +338,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -506,7 +508,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 903acf929511..ef0071d61158 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -337,6 +338,7 @@ CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
@@ -506,7 +508,9 @@ CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S
index 78cb60f5bb4d..9bbffebe3eb5 100644
--- a/arch/m68k/ifpsp060/src/fpsp.S
+++ b/arch/m68k/ifpsp060/src/fpsp.S
@@ -10191,7 +10191,7 @@ xdnrm_con:
xdnrm_sd:
mov.l %a1,-(%sp)
tst.b LOCAL_EX(%a0) # is denorm pos or neg?
- smi.b %d1 # set d0 accodingly
+ smi.b %d1 # set d0 accordingly
bsr.l unf_sub
mov.l (%sp)+,%a1
xdnrm_exit:
@@ -10990,7 +10990,7 @@ src_qnan_m:
# routines where an instruction is selected by an index into
# a large jump table corresponding to a given instruction which
# has been decoded. Flow continues here where we now decode
-# further accoding to the source operand type.
+# further according to the source operand type.
#
global fsinh
@@ -23196,14 +23196,14 @@ m_sign:
#
# 1. Branch on the sign of the adjusted exponent.
# 2p.(positive exp)
-# 2. Check M16 and the digits in lwords 2 and 3 in decending order.
+# 2. Check M16 and the digits in lwords 2 and 3 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Subtract the count from the exp.
# 5. Check if the exp has crossed zero in #3 above; make the exp abs
# and set SE.
# 6. Multiply the mantissa by 10**count.
# 2n.(negative exp)
-# 2. Check the digits in lwords 3 and 2 in decending order.
+# 2. Check the digits in lwords 3 and 2 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Add the count to the exp.
# 5. Check if the exp has crossed zero in #3 above; clear SE.
diff --git a/arch/m68k/ifpsp060/src/pfpsp.S b/arch/m68k/ifpsp060/src/pfpsp.S
index 4aedef973cf6..3535e6c87eec 100644
--- a/arch/m68k/ifpsp060/src/pfpsp.S
+++ b/arch/m68k/ifpsp060/src/pfpsp.S
@@ -13156,14 +13156,14 @@ m_sign:
#
# 1. Branch on the sign of the adjusted exponent.
# 2p.(positive exp)
-# 2. Check M16 and the digits in lwords 2 and 3 in decending order.
+# 2. Check M16 and the digits in lwords 2 and 3 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Subtract the count from the exp.
# 5. Check if the exp has crossed zero in #3 above; make the exp abs
# and set SE.
# 6. Multiply the mantissa by 10**count.
# 2n.(negative exp)
-# 2. Check the digits in lwords 3 and 2 in decending order.
+# 2. Check the digits in lwords 3 and 2 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Add the count to the exp.
# 5. Check if the exp has crossed zero in #3 above; clear SE.
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 4858178260f9..cf4c3a7b1a45 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -53,6 +53,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return t; \
}
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int t, tmp; \
+ \
+ __asm__ __volatile__( \
+ "1: movel %2,%1\n" \
+ " " #asm_op "l %3,%1\n" \
+ " casl %2,%1,%0\n" \
+ " jne 1b" \
+ : "+m" (*v), "=&d" (t), "=&d" (tmp) \
+ : "g" (i), "2" (atomic_read(v))); \
+ return tmp; \
+}
+
#else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
@@ -68,20 +83,41 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
return t; \
}
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned long flags; \
+ int t; \
+ \
+ local_irq_save(flags); \
+ t = v->counter; \
+ v->counter c_op i; \
+ local_irq_restore(flags); \
+ \
+ return t; \
+}
+
#endif /* CONFIG_RMW_INSNS */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op)
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, eor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h
index 429fe26e320c..208b4daa14b3 100644
--- a/arch/m68k/include/asm/dma.h
+++ b/arch/m68k/include/asm/dma.h
@@ -18,7 +18,7 @@
* AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
* Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
*
- * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
+ * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
* Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
*
* APR/18/2002 : added proper support for MCF5272 DMA controller.
diff --git a/arch/m68k/include/asm/m525xsim.h b/arch/m68k/include/asm/m525xsim.h
index f186459072e9..699f20c8a0fe 100644
--- a/arch/m68k/include/asm/m525xsim.h
+++ b/arch/m68k/include/asm/m525xsim.h
@@ -123,10 +123,10 @@
/*
* I2C module.
*/
-#define MCFI2C_BASE0 (MCF_MBAR + 0x280) /* Base addreess I2C0 */
+#define MCFI2C_BASE0 (MCF_MBAR + 0x280) /* Base address I2C0 */
#define MCFI2C_SIZE0 0x20 /* Register set size */
-#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base addreess I2C1 */
+#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base address I2C1 */
#define MCFI2C_SIZE1 0x20 /* Register set size */
/*
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index f9924fbcfe42..fb95aed5f428 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -14,7 +14,7 @@ extern const char bad_pmd_string[];
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+ unsigned long page = __get_free_page(GFP_DMA);
if (!page)
return NULL;
@@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+ struct page *page = alloc_pages(GFP_DMA, 0);
pte_t *pte;
if (!page)
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
index 26cc3d5a63f8..8824236e303f 100644
--- a/arch/m68k/include/asm/mcfmmu.h
+++ b/arch/m68k/include/asm/mcfmmu.h
@@ -38,7 +38,7 @@
/*
* MMU Operation register.
*/
-#define MMUOR_UAA 0x00000001 /* Update allocatiom address */
+#define MMUOR_UAA 0x00000001 /* Update allocation address */
#define MMUOR_ACC 0x00000002 /* TLB access */
#define MMUOR_RD 0x00000004 /* TLB access read */
#define MMUOR_WR 0x00000000 /* TLB access write */
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 24bcba496c75..c895b987202c 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
{
pte_t *pte;
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (pte) {
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
@@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
struct page *page;
pte_t *pte;
- page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
if (!pgtable_page_ctor(page)) {
diff --git a/arch/m68k/include/asm/q40_master.h b/arch/m68k/include/asm/q40_master.h
index fc5b36278d04..c48d21b68f04 100644
--- a/arch/m68k/include/asm/q40_master.h
+++ b/arch/m68k/include/asm/q40_master.h
@@ -1,6 +1,6 @@
/*
* Q40 master Chip Control
- * RTC stuff merged for compactnes..
+ * RTC stuff merged for compactness.
*/
#ifndef _Q40_MASTER_H
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 0931388de47f..1901f61f926f 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -37,7 +37,7 @@ do { \
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ unsigned long page = __get_free_page(GFP_KERNEL);
if (!page)
return NULL;
@@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ struct page *page = alloc_pages(GFP_KERNEL, 0);
if (page == NULL)
return NULL;
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 4d2adfb32a2a..7990b6f50105 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -60,7 +60,7 @@
*
* The host talks to the IOPs using a rather simple message-passing scheme via
* a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
- * channel is conneced to a specific software driver on the IOP. For example
+ * channel is connected to a specific software driver on the IOP. For example
* on the SCC IOP there is one channel for each serial port. Each channel has
* an incoming and and outgoing message queue with a depth of one.
*
diff --git a/arch/m68k/math-emu/fp_decode.h b/arch/m68k/math-emu/fp_decode.h
index 759679d9ab96..6d1e760e2a0e 100644
--- a/arch/m68k/math-emu/fp_decode.h
+++ b/arch/m68k/math-emu/fp_decode.h
@@ -130,7 +130,7 @@ do_fscc=0
bfextu %d2{#13,#3},%d0
.endm
-| decode the 8bit diplacement from the brief extension word
+| decode the 8bit displacement from the brief extension word
.macro fp_decode_disp8
move.b %d2,%d0
ext.w %d0
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 6a94cdd0c830..bd66a0b20c6b 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -136,7 +136,7 @@ good_area:
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
pr_debug("handle_mm_fault returns %d\n", fault);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index 88fa25fae8bd..def2c642f053 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -69,16 +69,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int result, temp; \
+ \
+ smp_mb(); \
+ \
+ asm volatile ( \
+ "1: LNKGETD %1, [%2]\n" \
+ " " #op " %0, %1, %3\n" \
+ " LNKSETD [%2], %0\n" \
+ " DEFR %0, TXSTAT\n" \
+ " ANDT %0, %0, #HI(0x3f000000)\n" \
+ " CMPT %0, #HI(0x02000000)\n" \
+ " BNZ 1b\n" \
+ : "=&d" (temp), "=&d" (result) \
+ : "da" (&v->counter), "bd" (i) \
+ : "cc"); \
+ \
+ smp_mb(); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index 0295d9b8d5bf..6c1380a8a0d4 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -64,15 +64,40 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
-#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long result; \
+ unsigned long flags; \
+ \
+ __global_lock1(flags); \
+ result = v->counter; \
+ fence(); \
+ v->counter c_op i; \
+ __global_unlock1(flags); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_OP_RETURN(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or, |=)
+ATOMIC_OPS(xor, ^=)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
index 3104df0a4822..c2caa1ee4360 100644
--- a/arch/metag/include/asm/pgalloc.h
+++ b/arch/metag/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
- __GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
return pte;
}
@@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
index 86a7cf3d1386..c0c7a22be1ae 100644
--- a/arch/metag/include/asm/spinlock.h
+++ b/arch/metag/include/asm/spinlock.h
@@ -1,14 +1,24 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
#ifdef CONFIG_METAG_ATOMICITY_LOCK1
#include <asm/spinlock_lock1.h>
#else
#include <asm/spinlock_lnkget.h>
#endif
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+/*
+ * both lock1 and lnkget are test-and-set spinlocks with 0 unlocked and 1
+ * locked.
+ */
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->lock, !VAL);
+}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 33a365f924be..052cba23708c 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -806,25 +806,16 @@ static struct metag_pmu _metag_pmu = {
};
/* PMU CPU hotplug notifier */
-static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
- void *hcpu)
+static int metag_pmu_starting_cpu(unsigned int cpu)
{
- unsigned int cpu = (unsigned int)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
- return NOTIFY_DONE;
-
memset(cpuc, 0, sizeof(struct cpu_hw_events));
raw_spin_lock_init(&cpuc->pmu_lock);
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block metag_pmu_notifier = {
- .notifier_call = metag_pmu_cpu_notify,
-};
-
/* PMU Initialisation */
static int __init init_hw_perf_events(void)
{
@@ -876,16 +867,13 @@ static int __init init_hw_perf_events(void)
metag_out32(0, PERF_COUNT(0));
metag_out32(0, PERF_COUNT(1));
- for_each_possible_cpu(cpu) {
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
+ "AP_PERF_METAG_STARTING", metag_pmu_starting_cpu,
+ NULL);
- memset(cpuc, 0, sizeof(struct cpu_hw_events));
- raw_spin_lock_init(&cpuc->pmu_lock);
- }
-
- register_cpu_notifier(&metag_pmu_notifier);
ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
-out:
+ if (ret)
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_METAG_STARTING);
return ret;
}
early_initcall(init_hw_perf_events);
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c
index 31cf53d0eba2..1166f1fbfd63 100644
--- a/arch/metag/kernel/setup.c
+++ b/arch/metag/kernel/setup.c
@@ -20,7 +20,6 @@
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <linux/pfn.h>
#include <linux/root_dev.h>
#include <linux/sched.h>
@@ -414,9 +413,7 @@ static int __init customize_machine(void)
/* customizes platform devices, or adds new ones */
if (machine_desc->init_machine)
machine_desc->init_machine();
- else
- of_platform_populate(NULL, of_default_bus_match_table, NULL,
- NULL);
+
return 0;
}
arch_initcall(customize_machine);
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index f57edca63609..372783a67dda 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -133,7 +133,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return 0;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 636e0720fb20..86f65721e629 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -3,7 +3,6 @@ config MICROBLAZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
- select ARCH_WANT_OPTIONAL_GPIOLIB
select BUILDTIME_EXTABLE_SORT
select CLKSRC_OF
select CLONE_BACKWARDS3
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 61436d69775c..7c89390c0c13 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
- int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+ int flags = GFP_KERNEL | __GFP_HIGHMEM;
#else
- int flags = GFP_KERNEL | __GFP_REPEAT;
+ int flags = GFP_KERNEL;
#endif
ptepage = alloc_pages(flags, 0);
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 67e2ef48d2d0..5bbf38b916ef 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -170,7 +170,7 @@ static struct irqaction timer_irqaction = {
.dev_id = &clockevent_xilinx_timer,
};
-static __init void xilinx_clockevent_init(void)
+static __init int xilinx_clockevent_init(void)
{
clockevent_xilinx_timer.mult =
div_sc(timer_clock_freq, NSEC_PER_SEC,
@@ -181,6 +181,8 @@ static __init void xilinx_clockevent_init(void)
clockevent_delta2ns(1, &clockevent_xilinx_timer);
clockevent_xilinx_timer.cpumask = cpumask_of(0);
clockevents_register_device(&clockevent_xilinx_timer);
+
+ return 0;
}
static u64 xilinx_clock_read(void)
@@ -229,8 +231,14 @@ static struct clocksource clocksource_microblaze = {
static int __init xilinx_clocksource_init(void)
{
- if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq))
- panic("failed to register clocksource");
+ int ret;
+
+ ret = clocksource_register_hz(&clocksource_microblaze,
+ timer_clock_freq);
+ if (ret) {
+ pr_err("failed to register clocksource");
+ return ret;
+ }
/* stop timer1 */
write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
@@ -239,16 +247,16 @@ static int __init xilinx_clocksource_init(void)
write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
/* register timecounter - for ftrace support */
- init_xilinx_timecounter();
- return 0;
+ return init_xilinx_timecounter();
}
-static void __init xilinx_timer_init(struct device_node *timer)
+static int __init xilinx_timer_init(struct device_node *timer)
{
struct clk *clk;
static int initialized;
u32 irq;
u32 timer_num = 1;
+ int ret;
if (initialized)
return;
@@ -258,7 +266,7 @@ static void __init xilinx_timer_init(struct device_node *timer)
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
pr_err("ERROR: invalid timer base address\n");
- BUG();
+ return -ENXIO;
}
write_fn = timer_write32;
@@ -271,11 +279,15 @@ static void __init xilinx_timer_init(struct device_node *timer)
}
irq = irq_of_parse_and_map(timer, 0);
+ if (irq <= 0) {
+ pr_err("Failed to parse and map irq");
+ return -EINVAL;
+ }
of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
if (timer_num) {
- pr_emerg("Please enable two timers in HW\n");
- BUG();
+ pr_err("Please enable two timers in HW\n");
+ return -EINVAL;
}
pr_info("%s: irq=%d\n", timer->full_name, irq);
@@ -297,14 +309,27 @@ static void __init xilinx_timer_init(struct device_node *timer)
freq_div_hz = timer_clock_freq / HZ;
- setup_irq(irq, &timer_irqaction);
+ ret = setup_irq(irq, &timer_irqaction);
+ if (ret) {
+ pr_err("Failed to setup IRQ");
+ return ret;
+ }
+
#ifdef CONFIG_HEART_BEAT
microblaze_setup_heartbeat();
#endif
- xilinx_clocksource_init();
- xilinx_clockevent_init();
+
+ ret = xilinx_clocksource_init();
+ if (ret)
+ return ret;
+
+ ret = xilinx_clockevent_init();
+ if (ret)
+ return ret;
sched_clock_register(xilinx_clock_read, 32, timer_clock_freq);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 177dfc003643..abb678ccde6f 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -216,7 +216,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 4f4520e779a5..eb99fcc76088 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
if (mem_init_done) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL |
- __GFP_REPEAT | __GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
} else {
pte = (pte_t *)early_get_page();
if (pte)
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 7adab180e0ca..3a0019deb7f7 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
@@ -285,7 +284,6 @@ void __init plat_time_init(void)
static int __init ath79_setup(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
if (mips_machtype == ATH79_MACH_GENERIC_OF)
return 0;
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 7f50dd67aa8d..65f140e1e872 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -146,7 +146,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index a9d433a17fcf..799c4338fd5e 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -147,7 +147,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index 2774ef064505..31846000530f 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -152,7 +152,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 9bbd2218f0bf..a79107da0675 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -146,7 +146,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index f8bf9b4c1343..43d55e5abacb 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -90,7 +90,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 835b402e4574..0ab176bdb8e8 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- smp_mb__before_llsc(); \
- \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \
\
@@ -125,23 +123,84 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
raw_local_irq_restore(flags); \
} \
\
- smp_llsc_mb(); \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+{ \
+ int result; \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " move %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " ll %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
\
return result; \
}
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op)
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu)
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -362,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
}
#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
- smp_mb__before_llsc(); \
- \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
@@ -409,22 +466,85 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
raw_local_irq_restore(flags); \
} \
\
- smp_llsc_mb(); \
+ return result; \
+}
+
+#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
+static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+{ \
+ long result; \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " move %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " lld %1, %2 # atomic64_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
+ : "memory"); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
\
return result; \
}
#define ATOMIC64_OPS(op, c_op, asm_op) \
ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_OP_RETURN(op, c_op, asm_op)
+ ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC64_FETCH_OP(op, c_op, asm_op)
ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu)
-ATOMIC64_OP(and, &=, and)
-ATOMIC64_OP(or, |=, or)
-ATOMIC64_OP(xor, ^=, xor)
+
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op, asm_op) \
+ ATOMIC64_OP(op, c_op, asm_op) \
+ ATOMIC64_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC64_OPS(and, &=, and)
+ATOMIC64_OPS(or, |=, or)
+ATOMIC64_OPS(xor, ^=, xor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6733ac575da4..36a391d289aa 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -74,7 +74,7 @@
#define KVM_GUEST_KUSEG 0x00000000UL
#define KVM_GUEST_KSEG0 0x40000000UL
#define KVM_GUEST_KSEG23 0x60000000UL
-#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
@@ -338,6 +338,7 @@ struct kvm_mips_tlb {
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase;
+ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long host_stack;
unsigned long host_gp;
diff --git a/arch/mips/include/asm/octeon/cvmx-mpi-defs.h b/arch/mips/include/asm/octeon/cvmx-mpi-defs.h
deleted file mode 100644
index 4615b102625b..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-mpi-defs.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_MPI_DEFS_H__
-#define __CVMX_MPI_DEFS_H__
-
-#define CVMX_MPI_CFG (CVMX_ADD_IO_SEG(0x0001070000001000ull))
-#define CVMX_MPI_DATX(offset) (CVMX_ADD_IO_SEG(0x0001070000001080ull) + ((offset) & 15) * 8)
-#define CVMX_MPI_STS (CVMX_ADD_IO_SEG(0x0001070000001008ull))
-#define CVMX_MPI_TX (CVMX_ADD_IO_SEG(0x0001070000001010ull))
-
-union cvmx_mpi_cfg {
- uint64_t u64;
- struct cvmx_mpi_cfg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t csena3:1;
- uint64_t csena2:1;
- uint64_t csena1:1;
- uint64_t csena0:1;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t csena0:1;
- uint64_t csena1:1;
- uint64_t csena2:1;
- uint64_t csena3:1;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } s;
- struct cvmx_mpi_cfg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_12_15:4;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t reserved_12_15:4;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn30xx;
- struct cvmx_mpi_cfg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_11_15:5;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t reserved_11_15:5;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn31xx;
- struct cvmx_mpi_cfg_cn30xx cn50xx;
- struct cvmx_mpi_cfg_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_14_15:2;
- uint64_t csena1:1;
- uint64_t csena0:1;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t reserved_6_6:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t reserved_6_6:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t csena0:1;
- uint64_t csena1:1;
- uint64_t reserved_14_15:2;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn61xx;
- struct cvmx_mpi_cfg_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t csena3:1;
- uint64_t csena2:1;
- uint64_t reserved_12_13:2;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t reserved_6_6:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t reserved_6_6:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t reserved_12_13:2;
- uint64_t csena2:1;
- uint64_t csena3:1;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn66xx;
- struct cvmx_mpi_cfg_cn61xx cnf71xx;
-};
-
-union cvmx_mpi_datx {
- uint64_t u64;
- struct cvmx_mpi_datx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t data:8;
-#else
- uint64_t data:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_mpi_datx_s cn30xx;
- struct cvmx_mpi_datx_s cn31xx;
- struct cvmx_mpi_datx_s cn50xx;
- struct cvmx_mpi_datx_s cn61xx;
- struct cvmx_mpi_datx_s cn66xx;
- struct cvmx_mpi_datx_s cnf71xx;
-};
-
-union cvmx_mpi_sts {
- uint64_t u64;
- struct cvmx_mpi_sts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_13_63:51;
- uint64_t rxnum:5;
- uint64_t reserved_1_7:7;
- uint64_t busy:1;
-#else
- uint64_t busy:1;
- uint64_t reserved_1_7:7;
- uint64_t rxnum:5;
- uint64_t reserved_13_63:51;
-#endif
- } s;
- struct cvmx_mpi_sts_s cn30xx;
- struct cvmx_mpi_sts_s cn31xx;
- struct cvmx_mpi_sts_s cn50xx;
- struct cvmx_mpi_sts_s cn61xx;
- struct cvmx_mpi_sts_s cn66xx;
- struct cvmx_mpi_sts_s cnf71xx;
-};
-
-union cvmx_mpi_tx {
- uint64_t u64;
- struct cvmx_mpi_tx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_22_63:42;
- uint64_t csid:2;
- uint64_t reserved_17_19:3;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_19:3;
- uint64_t csid:2;
- uint64_t reserved_22_63:42;
-#endif
- } s;
- struct cvmx_mpi_tx_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn30xx;
- struct cvmx_mpi_tx_cn30xx cn31xx;
- struct cvmx_mpi_tx_cn30xx cn50xx;
- struct cvmx_mpi_tx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_21_63:43;
- uint64_t csid:1;
- uint64_t reserved_17_19:3;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_19:3;
- uint64_t csid:1;
- uint64_t reserved_21_63:43;
-#endif
- } cn61xx;
- struct cvmx_mpi_tx_s cn66xx;
- struct cvmx_mpi_tx_cn61xx cnf71xx;
-};
-
-#endif
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index b336037e8768..93c079a1cfc8 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
@@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
@@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
+ pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
return pmd;
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index a6b611f1da43..7d44e888134f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -24,7 +24,7 @@ struct mm_struct;
struct vm_area_struct;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
- _CACHE_CACHABLE_NONCOHERENT)
+ _page_cachable_default)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
@@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
- pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
#endif
@@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd;
}
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 40196bebe849..f485afe51514 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <asm/barrier.h>
+#include <asm/processor.h>
#include <asm/compiler.h>
#include <asm/war.h>
@@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
- while (arch_spin_is_locked(x)) { cpu_relax(); }
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ u16 owner = READ_ONCE(lock->h.serving_now);
+ smp_rmb();
+ for (;;) {
+ arch_spinlock_t tmp = READ_ONCE(*lock);
+
+ if (tmp.h.serving_now == tmp.h.ticket ||
+ tmp.h.serving_now != owner)
+ break;
+
+ cpu_relax();
+ }
+ smp_acquire__after_ctrl_dep();
+}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index 510fc0d962f2..0914ef775b5f 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/prom.h>
@@ -74,13 +73,6 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-static int __init populate_machine(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-arch_initcall(populate_machine);
-
const char *get_system_type(void)
{
if (config_enabled(CONFIG_MACH_JZ4780))
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0dcf69194473..6103b24d1bfc 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -888,17 +888,16 @@ long arch_ptrace(struct task_struct *child, long request,
*/
asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
- long ret = 0;
user_exit();
current_thread_info()->syscall = syscall;
- if (secure_computing() == -1)
- return -1;
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
- ret = -1;
+ return -1;
+
+ if (secure_computing(NULL) == -1)
+ return -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 396df6eb0a12..645c8a1982a7 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1636,6 +1636,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
if (index < 0) {
vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
vcpu);
preempt_enable();
@@ -1647,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
* invalid exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) {
+ vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
run, vcpu);
preempt_enable();
@@ -1666,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
preempt_enable();
- goto dont_update_pc;
+ goto done;
}
@@ -1694,16 +1697,20 @@ skip_fault:
kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
- preempt_enable();
- goto dont_update_pc;
}
preempt_enable();
+done:
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
dont_update_pc:
- /* Rollback PC */
- vcpu->arch.pc = curr_pc;
-done:
+ /*
+ * This is for exceptions whose emulation updates the PC, so do not
+ * overwrite the PC under any circumstances
+ */
+
return er;
}
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 4ab4bdfad703..2143884709e4 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -28,6 +28,7 @@
#define MIPS_EXC_MAX 12
/* XXXSL More to follow */
+extern char __kvm_mips_vcpu_run_end[];
extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 3ef03009de5f..828fcfc1cd7f 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1)
/* Jump to guest */
eret
+EXPORT(__kvm_mips_vcpu_run_end)
VECTOR(MIPSX(exception), unknown)
/* Find out what mode we came from and jump to the proper handler. */
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index dc052fb5c7a2..44da5259f390 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
memcpy(gebase + offset, mips32_GuestException,
mips32_GuestExceptionEnd - mips32_GuestException);
+#ifdef MODULE
+ offset += mips32_GuestExceptionEnd - mips32_GuestException;
+ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
+ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
+ vcpu->arch.vcpu_run = gebase + offset;
+#else
+ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
+#endif
+
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
@@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Disable hardware page table walking while in guest */
htw_stop();
- r = __kvm_mips_vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(run, vcpu);
/* Re-enable HTW before enabling interrupts */
htw_start();
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 4b88fa031891..9560ad731120 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -153,7 +153,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index 9f2f9b2b23ce..edfcaf06680d 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -8,7 +8,6 @@
*/
#include <linux/init.h>
#include <linux/libfdt.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <asm/prom.h>
@@ -107,10 +106,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init customize_machine(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-arch_initcall(customize_machine);
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
index 8bcf7fc40f0d..85f3ee4ab456 100644
--- a/arch/mips/oprofile/op_model_loongson3.c
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -168,33 +168,26 @@ static int loongson3_perfcount_handler(void)
return handled;
}
-static int loongson3_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int loongson3_starting_cpu(unsigned int cpu)
{
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- write_c0_perflo1(reg.control1);
- write_c0_perflo2(reg.control2);
- break;
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
- break;
- }
-
- return NOTIFY_OK;
+ write_c0_perflo1(reg.control1);
+ write_c0_perflo2(reg.control2);
+ return 0;
}
-static struct notifier_block loongson3_notifier_block = {
- .notifier_call = loongson3_cpu_callback
-};
+static int loongson3_dying_cpu(unsigned int cpu)
+{
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+ return 0;
+}
static int __init loongson3_init(void)
{
on_each_cpu(reset_counters, NULL, 1);
- register_hotcpu_notifier(&loongson3_notifier_block);
+ cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ "AP_MIPS_OP_LOONGSON3_STARTING",
+ loongson3_starting_cpu, loongson3_dying_cpu);
save_perf_irq = perf_irq;
perf_irq = loongson3_perfcount_handler;
@@ -204,7 +197,7 @@ static int __init loongson3_init(void)
static void loongson3_exit(void)
{
on_each_cpu(reset_counters, NULL, 1);
- unregister_hotcpu_notifier(&loongson3_notifier_block);
+ cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
perf_irq = save_perf_irq;
}
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
index 775ff90a9962..77ecf32ef3dc 100644
--- a/arch/mips/pic32/pic32mzda/init.c
+++ b/arch/mips/pic32/pic32mzda/init.c
@@ -147,8 +147,7 @@ static int __init plat_of_setup(void)
panic("Device tree not present");
pic32_of_prepare_platform_data(pic32_auxdata_lookup);
- if (of_platform_populate(NULL, of_default_bus_match_table,
- pic32_auxdata_lookup, NULL))
+ if (of_platform_default_populate(NULL, pic32_auxdata_lookup, NULL))
panic("Failed to populate DT");
return 0;
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index ab79828230ab..c50a670e60d2 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/cacheflush.h>
#include <asm/dma-coherence.h>
@@ -159,15 +158,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init plat_of_setup(void)
-{
- if (!of_have_populated_dt())
- panic("Device tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(plat_of_setup);
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index 3ad0b0794f7d..f24eee04e16a 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -117,11 +117,13 @@ static int systick_set_oneshot(struct clock_event_device *evt)
return 0;
}
-static void __init ralink_systick_init(struct device_node *np)
+static int __init ralink_systick_init(struct device_node *np)
{
+ int ret;
+
systick.membase = of_iomap(np, 0);
if (!systick.membase)
- return;
+ return -ENXIO;
systick_irqaction.name = np->name;
systick.dev.name = np->name;
@@ -131,16 +133,21 @@ static void __init ralink_systick_init(struct device_node *np)
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%s: request_irq failed", np->name);
- return;
+ return -EINVAL;
}
- clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
- SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up);
+ ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+ SYSTICK_FREQ, 301, 16,
+ clocksource_mmio_readl_up);
+ if (ret)
+ return ret;
clockevents_register_device(&systick.dev);
pr_info("%s: running - mult: %d, shift: %d\n",
np->name, systick.dev.mult, systick.dev.shift);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 108f8a8d1640..ada92db92f87 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -727,7 +727,7 @@ void __init txx9_iocled_init(unsigned long baseaddr,
int i;
static char *default_triggers[] __initdata = {
"heartbeat",
- "ide-disk",
+ "disk-activity",
"nand-disk",
NULL,
};
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 37030409745c..8b937300fb7f 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -215,7 +215,7 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev)
int i;
static char *default_triggers[] __initdata = {
"heartbeat",
- "ide-disk",
+ "disk-activity",
"nand-disk",
};
diff --git a/arch/mips/xilfpga/init.c b/arch/mips/xilfpga/init.c
index ce2aee2169ac..602e384a26a2 100644
--- a/arch/mips/xilfpga/init.c
+++ b/arch/mips/xilfpga/init.c
@@ -10,7 +10,6 @@
*/
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/prom.h>
@@ -43,15 +42,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init plat_of_setup(void)
-{
- if (!of_have_populated_dt())
- panic("Device tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(plat_of_setup);
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index ce318d5ab23b..36389efd45e8 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -84,16 +84,41 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return retval; \
}
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int retval, status; \
+ \
+ asm volatile( \
+ "1: mov %4,(_AAR,%3) \n" \
+ " mov (_ADR,%3),%1 \n" \
+ " mov %1,%0 \n" \
+ " " #op " %5,%0 \n" \
+ " mov %0,(_ADR,%3) \n" \
+ " mov (_ADR,%3),%0 \n" /* flush */ \
+ " mov (_ASR,%3),%0 \n" \
+ " or %0,%0 \n" \
+ " bne 1b \n" \
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
+ : "memory", "cc"); \
+ return retval; \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index 1ae580f38933..9c7b8f7942d8 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -12,6 +12,8 @@
#define _ASM_SPINLOCK_H
#include <linux/atomic.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
#include <asm/rwlock.h>
#include <asm/page.h>
@@ -23,7 +25,11 @@
*/
#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
-#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->slock, !VAL);
+}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 4861a78c7160..f5f90bbf019d 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
}
#ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti);
+void arch_release_thread_stack(unsigned long *stack);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
diff --git a/arch/mn10300/kernel/kgdb.c b/arch/mn10300/kernel/kgdb.c
index 99770823451a..2d7986c386fe 100644
--- a/arch/mn10300/kernel/kgdb.c
+++ b/arch/mn10300/kernel/kgdb.c
@@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *ti = (void *)stack;
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 4a1d181ed32f..f23781d6bbb3 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -254,7 +254,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index e77a7c728081..9577cf768875 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
return pte;
@@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *pte;
#ifdef CONFIG_HIGHPTE
- pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL, 0);
#endif
if (!pte)
return NULL;
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 6e2985e0a7b9..bb47d08c8ef7 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
- PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
@@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (pte) {
if (!pgtable_page_ctor(pte)) {
__free_page(pte);
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index e835dda2bfe2..d9563ddb337e 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -206,15 +206,21 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __init nios2_timer_get_base_and_freq(struct device_node *np,
+static int __init nios2_timer_get_base_and_freq(struct device_node *np,
void __iomem **base, u32 *freq)
{
*base = of_iomap(np, 0);
- if (!*base)
- panic("Unable to map reg for %s\n", np->name);
+ if (!*base) {
+ pr_crit("Unable to map reg for %s\n", np->name);
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "clock-frequency", freq)) {
+ pr_crit("Unable to get %s clock frequency\n", np->name);
+ return -EINVAL;
+ }
- if (of_property_read_u32(np, "clock-frequency", freq))
- panic("Unable to get %s clock frequency\n", np->name);
+ return 0;
}
static struct nios2_clockevent_dev nios2_ce = {
@@ -231,17 +237,21 @@ static struct nios2_clockevent_dev nios2_ce = {
},
};
-static __init void nios2_clockevent_init(struct device_node *timer)
+static __init int nios2_clockevent_init(struct device_node *timer)
{
void __iomem *iobase;
u32 freq;
- int irq;
+ int irq, ret;
- nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ if (ret)
+ return ret;
irq = irq_of_parse_and_map(timer, 0);
- if (!irq)
- panic("Unable to parse timer irq\n");
+ if (!irq) {
+ pr_crit("Unable to parse timer irq\n");
+ return -EINVAL;
+ }
nios2_ce.timer.base = iobase;
nios2_ce.timer.freq = freq;
@@ -253,25 +263,35 @@ static __init void nios2_clockevent_init(struct device_node *timer)
/* clear pending interrupt */
timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG);
- if (request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
- &nios2_ce.ced))
- panic("Unable to setup timer irq\n");
+ ret = request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
+ &nios2_ce.ced);
+ if (ret) {
+ pr_crit("Unable to setup timer irq\n");
+ return ret;
+ }
clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX);
+
+ return 0;
}
-static __init void nios2_clocksource_init(struct device_node *timer)
+static __init int nios2_clocksource_init(struct device_node *timer)
{
unsigned int ctrl;
void __iomem *iobase;
u32 freq;
+ int ret;
- nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ if (ret)
+ return ret;
nios2_cs.timer.base = iobase;
nios2_cs.timer.freq = freq;
- clocksource_register_hz(&nios2_cs.cs, freq);
+ ret = clocksource_register_hz(&nios2_cs.cs, freq);
+ if (ret)
+ return ret;
timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG);
timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG);
@@ -282,6 +302,8 @@ static __init void nios2_clocksource_init(struct device_node *timer)
/* Calibrate the delay loop directly */
lpj_fine = freq / HZ;
+
+ return 0;
}
/*
@@ -289,22 +311,25 @@ static __init void nios2_clocksource_init(struct device_node *timer)
* more instances, the second one gets used as clocksource and all
* others are unused.
*/
-static void __init nios2_time_init(struct device_node *timer)
+static int __init nios2_time_init(struct device_node *timer)
{
static int num_called;
+ int ret;
switch (num_called) {
case 0:
- nios2_clockevent_init(timer);
+ ret = nios2_clockevent_init(timer);
break;
case 1:
- nios2_clocksource_init(timer);
+ ret = nios2_clocksource_init(timer);
break;
default:
break;
}
num_called++;
+
+ return ret;
}
void read_persistent_clock(struct timespec *ts)
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index b51878b0c6b8..affc4eb3f89e 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -131,7 +131,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/nios2/platform/platform.c b/arch/nios2/platform/platform.c
index d478773f758a..2a35154ca153 100644
--- a/arch/nios2/platform/platform.c
+++ b/arch/nios2/platform/platform.c
@@ -9,7 +9,6 @@
*/
#include <linux/init.h>
-#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/err.h>
@@ -39,8 +38,7 @@ static int __init nios2_soc_device_init(void)
}
}
- return of_platform_populate(NULL, of_default_bus_match_table,
- NULL, NULL);
+ return 0;
}
device_initcall(nios2_soc_device_init);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 142cb057c41b..489e7f909286 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -10,7 +10,7 @@ config OPENRISC
select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ
select HAVE_MEMBLOCK
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select HAVE_ARCH_TRACEHOOK
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 21484e5b9e9a..87eebd185089 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL, 0);
if (!pte)
return NULL;
clear_page(page_address(pte));
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 230ac20ae794..e94cd225e816 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -163,7 +163,7 @@ good_area:
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 62b08ef392be..5b2a95116e8f 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
pte_t *pte;
if (likely(mem_init_done)) {
- pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
} else {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
#if 0
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 5b04d703a924..8688ba7f5966 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -214,7 +214,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_DMADEVICES=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index e945c08892fa..7e0792658952 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -231,7 +231,7 @@ CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_ONESHOT=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
CONFIG_LEDS_TRIGGER_BACKLIGHT=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 1d109990a022..5394b9c5f914 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -121,16 +121,39 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
return ret; \
}
-#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
+#define ATOMIC_FETCH_OP(op, c_op) \
+static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ ret = v->counter; \
+ v->counter c_op i; \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_OP_RETURN(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or, |=)
+ATOMIC_OPS(xor, ^=)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -185,15 +208,39 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
return ret; \
}
-#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
+#define ATOMIC64_FETCH_OP(op, c_op) \
+static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ s64 ret; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ ret = v->counter; \
+ v->counter c_op i; \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+ \
+ return ret; \
+}
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_OP_RETURN(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
-ATOMIC64_OP(and, &=)
-ATOMIC64_OP(or, |=)
-ATOMIC64_OP(xor, ^=)
#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &=)
+ATOMIC64_OPS(or, |=)
+ATOMIC64_OPS(xor, ^=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f2fd327dce2e..f08dda3f0995 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
- PMD_ORDER);
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
return pmd;
@@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
@@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 64f2992e439f..e32936cd7f10 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -13,8 +13,13 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x)
}
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
-#define arch_spin_unlock_wait(x) \
- do { cpu_relax(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
+{
+ volatile unsigned int *a = __ldcw_align(x);
+
+ smp_cond_load_acquire(a, VAL);
+}
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 4736020ba5ea..5e953ab4530d 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -8,6 +8,8 @@ struct pt_regs;
void parisc_terminate(char *msg, struct pt_regs *regs,
int code, unsigned long offset) __noreturn __cold;
+void die_if_kernel(char *str, struct pt_regs *regs, long err);
+
/* mm/fault.c */
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address);
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index e81ccf1716e9..5adc339eb7c8 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -324,8 +324,9 @@ int init_per_cpu(int cpunum)
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
- printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
- cpunum, coproc_cfg.revision, coproc_cfg.model);
+ if (cpunum == 0)
+ printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
+ cpunum, coproc_cfg.revision, coproc_cfg.model);
/*
** store status register to stack (hopefully aligned)
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index b5458b37fc5b..e02d7b4d2b69 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -311,10 +311,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
- /* Do the secure computing check first. */
- if (secure_computing() == -1)
- return -1;
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) {
/*
@@ -325,6 +321,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[20] = -1UL;
goto out;
}
+
+ /* Do the secure computing check after ptrace. */
+ if (secure_computing(NULL) == -1)
+ return -1;
+
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gr[20]);
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 58dd6801f5be..31ec99a5f119 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -309,11 +309,6 @@ void __init time_init(void)
clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
NSEC_PER_MSEC, 0);
-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
- /* At bootup only one 64bit CPU is online and cr16 is "stable" */
- set_sched_clock_stable();
-#endif
-
start_cpu_itimer(); /* get CPU 0 started */
/* register at clocksource framework */
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index d7c0acb35ec2..2b65c0177778 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -28,6 +28,7 @@
#include <linux/ratelimit.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
+#include <asm/traps.h>
/* #define DEBUG_UNALIGNED 1 */
@@ -130,8 +131,6 @@
int unaligned_enabled __read_mostly = 1;
-void die_if_kernel (char *str, struct pt_regs *regs, long err);
-
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
unsigned long saddr = regs->ior;
@@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs)
break;
}
- if (modify && R1(regs->iir))
+ if (ret == 0 && modify && R1(regs->iir))
regs->gr[R1(regs->iir)] = newbase;
@@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs)
if (ret)
{
+ /*
+ * The unaligned handler failed.
+ * If we were called by __get_user() or __put_user() jump
+ * to it's exception fixup handler instead of crashing.
+ */
+ if (!user_mode(regs) && fixup_exception(regs))
+ return;
+
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
die_if_kernel("Unaligned data reference", regs, 28);
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index ddd988b267a9..e278a87f43cc 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr)
if (addr >= kernel_unwind_table.start &&
addr <= kernel_unwind_table.end)
e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
- else
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->start &&
addr <= table->end)
@@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr)
break;
}
}
+ spin_unlock_irqrestore(&unwind_lock, flags);
+ }
return e;
}
@@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
insn = *(unsigned int *)npc;
- if ((insn & 0xffffc000) == 0x37de0000 ||
- (insn & 0xffe00000) == 0x6fc00000) {
+ if ((insn & 0xffffc001) == 0x37de0000 ||
+ (insn & 0xffe00001) == 0x6fc00000) {
/* ldo X(sp), sp, or stwm X,D(sp) */
- frame_size += (insn & 0x1 ? -1 << 13 : 0) |
- ((insn & 0x3fff) >> 1);
+ frame_size += (insn & 0x3fff) >> 1;
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
- } else if ((insn & 0xffe00008) == 0x73c00008) {
+ } else if ((insn & 0xffe00009) == 0x73c00008) {
/* std,ma X,D(sp) */
- frame_size += (insn & 0x1 ? -1 << 13 : 0) |
- (((insn >> 4) & 0x3ff) << 3);
+ frame_size += ((insn >> 4) & 0x3ff) << 3;
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
@@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
}
}
+ if (frame_size > e->Total_frame_size << 3)
+ frame_size = e->Total_frame_size << 3;
+
if (!unwind_special(info, e->region_start, frame_size)) {
info->prev_sp = info->sp - frame_size;
if (e->Millicode)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 16dbe81c97c9..163af2c31d76 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -239,7 +239,7 @@ good_area:
* fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 01f7464d9fea..ec4047e170a0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -98,7 +98,6 @@ config PPC
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select SYSCTL_EXCEPTION_TRACE
- select ARCH_WANT_OPTIONAL_GPIOLIB
select VIRT_TO_BUS if !PPC64
select HAVE_IDE
select HAVE_IOREMAP_PROT
@@ -128,7 +127,8 @@ config PPC
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_CBPF_JIT
+ select HAVE_CBPF_JIT if !PPC64
+ select HAVE_EBPF_JIT if PPC64
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -164,6 +164,8 @@ config PPC
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select GENERIC_CPU_AUTOPROBE
+ select HAVE_VIRT_CPU_ACCOUNTING
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -457,6 +459,29 @@ config KEXEC
interface is strongly in flux, so no good recommendation can be
made.
+config RELOCATABLE
+ bool "Build a relocatable kernel"
+ depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
+ select NONSTATIC_KERNEL
+ help
+ This builds a kernel image that is capable of running at the
+ location the kernel is loaded at. For ppc32, there is no any
+ alignment restrictions, and this feature is a superset of
+ DYNAMIC_MEMSTART and hence overrides it. For ppc64, we should use
+ 16k-aligned base address. The kernel is linked as a
+ position-independent executable (PIE) and contains dynamic relocations
+ which are processed early in the bootup process.
+
+ One use is for the kexec on panic case where the recovery kernel
+ must live at a different physical address than the primary
+ kernel.
+
+ Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
+ it has been loaded at and the compile time physical addresses
+ CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START
+ setting can still be useful to bootwrappers that need to know the
+ load address of the kernel (eg. u-boot/mkimage).
+
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
@@ -949,29 +974,6 @@ config DYNAMIC_MEMSTART
This option is overridden by CONFIG_RELOCATABLE
-config RELOCATABLE
- bool "Build a relocatable kernel"
- depends on ADVANCED_OPTIONS && FLATMEM && (44x || FSL_BOOKE)
- select NONSTATIC_KERNEL
- help
- This builds a kernel image that is capable of running at the
- location the kernel is loaded at, without any alignment restrictions.
- This feature is a superset of DYNAMIC_MEMSTART and hence overrides it.
-
- One use is for the kexec on panic case where the recovery kernel
- must live at a different physical address than the primary
- kernel.
-
- Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
- it has been loaded at and the compile time physical addresses
- CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START
- setting can still be useful to bootwrappers that need to know the
- load address of the kernel (eg. u-boot/mkimage).
-
-config RELOCATABLE_PPC32
- def_bool y
- depends on PPC32 && RELOCATABLE
-
config PAGE_OFFSET_BOOL
bool "Set custom page offset address"
depends on ADVANCED_OPTIONS
@@ -1054,24 +1056,14 @@ config CONSISTENT_SIZE
config PIN_TLB
bool "Pinned Kernel TLBs (860 ONLY)"
depends on ADVANCED_OPTIONS && 8xx
+
+config PIN_TLB_IMMR
+ bool "Pinned TLB for IMMR"
+ depends on PIN_TLB
+ default y
endmenu
if PPC64
-config RELOCATABLE
- bool "Build a relocatable kernel"
- depends on !COMPILE_TEST
- select NONSTATIC_KERNEL
- help
- This builds a kernel image that is capable of running anywhere
- in the RMA (real memory area) at any 16k-aligned base address.
- The kernel is linked as a position-independent executable (PIE)
- and contains dynamic relocations which are processed early
- in the bootup process.
-
- One use is for the kexec on panic case where the recovery kernel
- must live at a different physical address than the primary
- kernel.
-
# This value must have zeroes in the bottom 60 bits otherwise lots will break
config PAGE_OFFSET
hex
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index d3fcf7e64e3a..171047822b56 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -149,14 +149,14 @@ config PPC_EARLY_DEBUG_BOOTX
config PPC_EARLY_DEBUG_LPAR
bool "LPAR HV Console"
- depends on PPC_PSERIES
+ depends on PPC_PSERIES && HVC_CONSOLE
help
Select this to enable early debugging for a machine with a HVC
console on vterm 0.
config PPC_EARLY_DEBUG_LPAR_HVSI
bool "LPAR HVSI Console"
- depends on PPC_PSERIES
+ depends on PPC_PSERIES && HVC_CONSOLE
help
Select this to enable early debugging for a machine with a HVSI
console on a specified vterm.
@@ -212,7 +212,6 @@ config PPC_EARLY_DEBUG_40x
config PPC_EARLY_DEBUG_CPM
bool "Early serial debugging for Freescale CPM-based serial ports"
depends on SERIAL_CPM
- select PIN_TLB if PPC_8xx
help
Select this to enable early debugging for Freescale chips
using a CPM-based serial port. This assumes that the bootwrapper
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 709a22a3e824..ca254546cd05 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -181,6 +181,11 @@ KBUILD_CFLAGS += -pipe -Iarch/$(ARCH) $(CFLAGS-y)
CPP = $(CC) -E $(KBUILD_CFLAGS)
CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
+ifdef CONFIG_CPU_BIG_ENDIAN
+CHECKFLAGS += -D__BIG_ENDIAN__
+else
+CHECKFLAGS += -D__LITTLE_ENDIAN__
+endif
KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
@@ -221,7 +226,7 @@ KBUILD_CFLAGS += -mno-sched-epilog
endif
cpu-as-$(CONFIG_4xx) += -Wa,-m405
-cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
+cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_E200) += -Wa,-me200
KBUILD_AFLAGS += $(cpu-as-y)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8fe78a3efc92..4cd612a6e272 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -70,7 +70,7 @@ $(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o): \
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
libfdtheader := fdt.h libfdt.h libfdt_internal.h
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
$(addprefix $(obj)/,$(libfdtheader))
src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
@@ -78,7 +78,7 @@ src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
ns16550.c serial.c simple_alloc.c div64.S util.S \
gunzip_util.c elf_util.c $(zlib) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
- uartlite.c mpc52xx-psc.c
+ uartlite.c mpc52xx-psc.c opal.c opal-calls.S
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
@@ -113,6 +113,7 @@ src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S
src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
+src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
@@ -296,6 +297,9 @@ image-$(CONFIG_TQM8560) += cuImage.tqm8560
image-$(CONFIG_SBC8548) += cuImage.sbc8548
image-$(CONFIG_KSI8560) += cuImage.ksi8560
+# Board ports in arch/powerpc/platform/86xx/Kconfig
+image-$(CONFIG_MVME7100) += dtbImage.mvme7100
+
# Board ports in arch/powerpc/platform/embedded6xx/Kconfig
image-$(CONFIG_STORCENTER) += cuImage.storcenter
image-$(CONFIG_MPC7448HPC2) += cuImage.mpc7448hpc2
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
index a1b883730b31..27fcabc2f857 100644
--- a/arch/powerpc/boot/dts/ac14xx.dts
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -231,7 +231,7 @@
};
rtc@68 {
- compatible = "stm,m41t00";
+ compatible = "st,m41t00";
reg = <0x68>;
};
};
diff --git a/arch/powerpc/boot/dts/akebono.dts b/arch/powerpc/boot/dts/akebono.dts
index f92ecfed3d2f..e61d5dc598c1 100644
--- a/arch/powerpc/boot/dts/akebono.dts
+++ b/arch/powerpc/boot/dts/akebono.dts
@@ -224,7 +224,7 @@
#address-cells = <1>;
#size-cells = <0>;
rtc@68 {
- compatible = "stm,m41t80", "m41st85";
+ compatible = "st,m41t80", "m41st85";
reg = <0x68>;
};
};
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts
index 7daaca324c01..b0b26d8d68a2 100644
--- a/arch/powerpc/boot/dts/bluestone.dts
+++ b/arch/powerpc/boot/dts/bluestone.dts
@@ -279,7 +279,7 @@
#address-cells = <1>;
#size-cells = <0>;
rtc@68 {
- compatible = "stm,m41t80";
+ compatible = "st,m41t80";
reg = <0x68>;
interrupt-parent = <&UIC0>;
interrupts = <0x9 0x8>;
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 549c24c4c388..0d6ac92d0f5e 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -319,7 +319,7 @@
#address-cells = <1>;
#size-cells = <0>;
rtc@68 {
- compatible = "stm,m41t80";
+ compatible = "st,m41t80";
reg = <0x68>;
interrupt-parent = <&UIC2>;
interrupts = <0x19 0x8>;
diff --git a/arch/powerpc/boot/dts/currituck.dts b/arch/powerpc/boot/dts/currituck.dts
index d2c8a872308e..4191e1850ea1 100644
--- a/arch/powerpc/boot/dts/currituck.dts
+++ b/arch/powerpc/boot/dts/currituck.dts
@@ -116,7 +116,7 @@
#address-cells = <1>;
#size-cells = <0>;
rtc@68 {
- compatible = "stm,m41t80", "m41st85";
+ compatible = "st,m41t80", "m41st85";
reg = <0x68>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
index bc3bf9333dde..88d8423f8ac5 100644
--- a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
@@ -51,6 +51,7 @@
serial2 = &serial2;
serial3 = &serial3;
pci0 = &pci0;
+ usb0 = &usb0;
dma0 = &dma0;
dma1 = &dma1;
sdhc = &sdhc;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
index 8797ce146512..f3f968c51f4b 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
@@ -51,6 +51,7 @@
serial2 = &serial2;
serial3 = &serial3;
pci0 = &pci0;
+ usb0 = &usb0;
dma0 = &dma0;
dma1 = &dma1;
sdhc = &sdhc;
diff --git a/arch/powerpc/boot/dts/fsl/kmcoge4.dts b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
index 2d4b64fcee88..ae70a24094b0 100644
--- a/arch/powerpc/boot/dts/fsl/kmcoge4.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
@@ -106,6 +106,43 @@
sata@221000 {
status = "disabled";
};
+
+ fman0: fman@400000 {
+ enet0: ethernet@e0000 {
+ phy-connection-type = "sgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ mdio0: mdio@e1120 {
+ front_phy: ethernet-phy@11 {
+ reg = <0x11>;
+ };
+ };
+
+ enet1: ethernet@e2000 {
+ phy-connection-type = "sgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ enet2: ethernet@e4000 {
+ status = "disabled";
+ };
+
+ enet3: ethernet@e6000 {
+ status = "disabled";
+ };
+ enet4: ethernet@e8000 {
+ phy-handle = <&front_phy>;
+ phy-connection-type = "rgmii";
+ };
+ enet5: ethernet@f0000 {
+ status = "disabled";
+ };
+ };
};
rio: rapidio@ffe0c0000 {
diff --git a/arch/powerpc/boot/dts/fsl/mpc8569mds.dts b/arch/powerpc/boot/dts/fsl/mpc8569mds.dts
index a95ff7d2392c..8e94448f296c 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8569mds.dts
@@ -232,7 +232,7 @@
mode = "cpu-qe";
serial-flash@0 {
- compatible = "stm,m25p40";
+ compatible = "st,m25p40";
reg = <0>;
spi-max-frequency = <25000000>;
};
diff --git a/arch/powerpc/boot/dts/fsl/mvme7100.dts b/arch/powerpc/boot/dts/fsl/mvme7100.dts
new file mode 100644
index 000000000000..e2d306ad37a6
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mvme7100.dts
@@ -0,0 +1,153 @@
+/*
+ * Device tree source for the Emerson/Artesyn MVME7100
+ *
+ * Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A.
+ *
+ * Author: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+/include/ "mpc8641si-pre.dtsi"
+
+/ {
+ model = "MVME7100";
+ compatible = "artesyn,MVME7100";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000>;
+ };
+
+ soc: soc@f1000000 {
+ ranges = <0x00000000 0xf1000000 0x00100000>;
+
+ i2c@3000 {
+ hwmon@4c {
+ compatible = "dallas,max6649";
+ reg = <0x4c>;
+ };
+
+ rtc@68 {
+ status = "disabled";
+ };
+ };
+
+
+ enet0: ethernet@24000 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy1: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy2: ethernet-phy@3 {
+ reg = <3>;
+ };
+ phy3: ethernet-phy@4 {
+ reg = <4>;
+ };
+ };
+
+ enet1: ethernet@25000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@25520 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@26000 {
+ phy-handle = <&phy2>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@26520 {
+ status = "disabled";
+ };
+
+ enet3: ethernet@27000 {
+ phy-handle = <&phy3>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@27520 {
+ status = "disabled";
+ };
+
+ serial1: serial@4600 {
+ status = "disabled";
+ };
+ };
+
+ lbc: localbus@f1005000 {
+ reg = <0xf1005000 0x1000>;
+
+ ranges = <0 0 0xf8000000 0x08000000 // NOR Flash (128MB)
+ 2 0 0xf2030000 0x00010000 // NAND Flash (8GB)
+ 3 0 0xf2400000 0x00080000 // MRAM (512KB)
+ 4 0 0xf2000000 0x00010000 // BCSR
+ 5 0 0xf2010000 0x00010000>; // QUART
+
+ bcsr@4,0 {
+ compatible = "artesyn,mvme7100-bcsr";
+ reg = <4 0 0x10000>;
+ };
+
+ serial@5,1000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <5 0x1000 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 1 0 0>;
+ };
+
+ serial@5,2000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <5 0x2000 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 1 0 0>;
+ };
+
+ serial@5,3000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <5 0x3000 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 1 0 0>;
+ };
+
+ serial@5,4000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <5 0x4000 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 1 0 0>;
+ };
+ };
+
+ pci0: pcie@f1008000 {
+ status = "disabled";
+ };
+
+ pci1: pcie@f1009000 {
+ status = "disabled";
+ };
+
+ chosen {
+ linux,stdout-path = &serial0;
+ };
+};
+
+/include/ "mpc8641si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1022rdk.dts b/arch/powerpc/boot/dts/fsl/p1022rdk.dts
index d505d7c51903..29e8af1e3711 100644
--- a/arch/powerpc/boot/dts/fsl/p1022rdk.dts
+++ b/arch/powerpc/boot/dts/fsl/p1022rdk.dts
@@ -57,7 +57,7 @@
clock-frequency = <12288000>;
};
rtc@68 {
- compatible = "stm,m41t62";
+ compatible = "st,m41t62";
reg = <0x68>;
};
adt7461@4c{
diff --git a/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi b/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi
index 29dad723091e..fcc7e5b7fd47 100644
--- a/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi
@@ -32,7 +32,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-usb@210000 {
+usb0: usb@210000 {
compatible = "fsl-usb2-dr";
reg = <0x210000 0x1000>;
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 507649ece0a1..44e399b17f6f 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -607,7 +607,7 @@
/include/ "qoriq-gpio-3.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
usb0: usb@210000 {
- compatible = "fsl-usb2-mph-v2.4", "fsl-usb2-mph";
+ compatible = "fsl-usb2-mph-v2.5", "fsl-usb2-mph";
fsl,iommu-parent = <&pamu0>;
fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */
phy_type = "utmi";
@@ -615,7 +615,7 @@
};
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
- compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr";
+ compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr";
fsl,iommu-parent = <&pamu0>;
fsl,liodn-reg = <&guts 0x524>; /* USB2LIODNR */
dr_mode = "host";
@@ -673,3 +673,48 @@
};
};
};
+
+&qe {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "qe";
+ compatible = "fsl,qe";
+ fsl,qe-num-riscs = <1>;
+ fsl,qe-num-snums = <28>;
+
+ qeic: interrupt-controller@80 {
+ interrupt-controller;
+ compatible = "fsl,qe-ic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x80 0x80>;
+ interrupts = <95 2 0 0 94 2 0 0>; //high:79 low:78
+ };
+
+ ucc@2000 {
+ cell-index = <1>;
+ reg = <0x2000 0x200>;
+ interrupts = <32>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@2200 {
+ cell-index = <3>;
+ reg = <0x2200 0x200>;
+ interrupts = <34>;
+ interrupt-parent = <&qeic>;
+ };
+
+ muram@10000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,qe-muram", "fsl,cpm-muram";
+ ranges = <0x0 0x10000 0x6000>;
+
+ data-only@0 {
+ compatible = "fsl,qe-muram-data",
+ "fsl,cpm-muram-data";
+ reg = <0x0 0x6000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi
index 8c7ea6c05de9..863f9431285f 100644
--- a/arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi
@@ -212,4 +212,42 @@
0 0x00010000>;
};
};
+
+ qe: qe@ffe140000 {
+ ranges = <0x0 0xf 0xfe140000 0x40000>;
+ reg = <0xf 0xfe140000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+
+ si1: si@700 {
+ compatible = "fsl,t1040-qe-si";
+ reg = <0x700 0x80>;
+ };
+
+ siram1: siram@1000 {
+ compatible = "fsl,t1040-qe-siram";
+ reg = <0x1000 0x800>;
+ };
+
+ ucc_hdlc: ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+
+ ucc_serial: ucc@2200 {
+ compatible = "fsl,t1040-ucc-uart";
+ port-number = <0>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/t104xqds.dtsi b/arch/powerpc/boot/dts/fsl/t104xqds.dtsi
index 977af355b388..2fd4cbe7098f 100644
--- a/arch/powerpc/boot/dts/fsl/t104xqds.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t104xqds.dtsi
@@ -366,4 +366,42 @@
0 0x00010000>;
};
};
+
+ qe: qe@ffe140000 {
+ ranges = <0x0 0xf 0xfe140000 0x40000>;
+ reg = <0xf 0xfe140000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+
+ si1: si@700 {
+ compatible = "fsl,t1040-qe-si";
+ reg = <0x700 0x80>;
+ };
+
+ siram1: siram@1000 {
+ compatible = "fsl,t1040-qe-siram";
+ reg = <0x1000 0x800>;
+ };
+
+ ucc_hdlc: ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+
+ ucc_serial: ucc@2200 {
+ compatible = "fsl,t1040-ucc-uart";
+ port-number = <0>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
index 7c4afdb44b46..5fdddbd2a62b 100644
--- a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
@@ -222,4 +222,42 @@
0 0x00010000>;
};
};
+
+ qe: qe@ffe140000 {
+ ranges = <0x0 0xf 0xfe140000 0x40000>;
+ reg = <0xf 0xfe140000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+
+ si1: si@700 {
+ compatible = "fsl,t1040-qe-si";
+ reg = <0x700 0x80>;
+ };
+
+ siram1: siram@1000 {
+ compatible = "fsl,t1040-qe-siram";
+ reg = <0x1000 0x800>;
+ };
+
+ ucc_hdlc: ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+
+ ucc_serial: ucc@2200 {
+ compatible = "fsl,t1040-ucc-uart";
+ port-number = <0>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
index 1184a746fcb1..038cf8fadee4 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
@@ -56,6 +56,8 @@
pci1 = &pci1;
pci2 = &pci2;
pci3 = &pci3;
+ usb0 = &usb0;
+ usb1 = &usb1;
dma0 = &dma0;
dma1 = &dma1;
dma2 = &dma2;
diff --git a/arch/powerpc/boot/dts/glacier.dts b/arch/powerpc/boot/dts/glacier.dts
index 2000060386d7..a7a802f4ffdd 100644
--- a/arch/powerpc/boot/dts/glacier.dts
+++ b/arch/powerpc/boot/dts/glacier.dts
@@ -287,7 +287,7 @@
#address-cells = <1>;
#size-cells = <0>;
rtc@68 {
- compatible = "stm,m41t80";
+ compatible = "st,m41t80";
reg = <0x68>;
interrupt-parent = <&UIC2>;
interrupts = <0x19 0x8>;
diff --git a/arch/powerpc/boot/dts/icon.dts b/arch/powerpc/boot/dts/icon.dts
index abcd0caeccae..9c94fd737f7c 100644
--- a/arch/powerpc/boot/dts/icon.dts
+++ b/arch/powerpc/boot/dts/icon.dts
@@ -256,7 +256,7 @@
#size-cells = <0>;
rtc@68 {
- compatible = "stm,m41t00";
+ compatible = "st,m41t00";
reg = <0x68>;
};
};
diff --git a/arch/powerpc/boot/dts/mpc5121ads.dts b/arch/powerpc/boot/dts/mpc5121ads.dts
index c228a0a232a6..75888ce2c792 100644
--- a/arch/powerpc/boot/dts/mpc5121ads.dts
+++ b/arch/powerpc/boot/dts/mpc5121ads.dts
@@ -99,7 +99,7 @@
};
rtc@68 {
- compatible = "stm,m41t62";
+ compatible = "st,m41t62";
reg = <0x68>;
};
};
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 43546844ea5a..ca5139ee5074 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -472,7 +472,7 @@
hdd {
gpios = <&mcu_pio 1 0>;
- linux,default-trigger = "ide-disk";
+ linux,default-trigger = "disk-activity";
};
};
};
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index cf8542401a3c..90aed3ac2f69 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -92,7 +92,7 @@
dfsrr;
eeprom: at24@50 {
- compatible = "st-micro,24c256";
+ compatible = "st,24c256";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index daeacbdcf8b4..47c5fc64e433 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -416,7 +416,7 @@
gpios = <&qe_pio_e 18 0>;
flash {
- compatible = "stm,nand512-a";
+ compatible = "st,nand512-a";
};
};
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 2b4b6532d69c..e32613963ab0 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -496,7 +496,7 @@
hdd {
gpios = <&mcu_pio 1 0>;
- linux,default-trigger = "ide-disk";
+ linux,default-trigger = "disk-activity";
};
};
};
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 74b6a535a413..71842fcd621f 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -480,7 +480,7 @@
hdd {
gpios = <&mcu_pio 1 0>;
- linux,default-trigger = "ide-disk";
+ linux,default-trigger = "disk-activity";
};
};
};
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index 3b5cbac85368..e442a29b2fe0 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -446,7 +446,7 @@
hdd {
gpios = <&mcu_pio 1 0>;
- linux,default-trigger = "ide-disk";
+ linux,default-trigger = "disk-activity";
};
};
};
diff --git a/arch/powerpc/boot/dts/pdm360ng.dts b/arch/powerpc/boot/dts/pdm360ng.dts
index 871c16d1ad5e..0cec7244abe7 100644
--- a/arch/powerpc/boot/dts/pdm360ng.dts
+++ b/arch/powerpc/boot/dts/pdm360ng.dts
@@ -103,7 +103,7 @@
};
rtc@68 {
- compatible = "stm,m41t00";
+ compatible = "st,m41t00";
reg = <0x68>;
};
};
diff --git a/arch/powerpc/boot/dts/sam440ep.dts b/arch/powerpc/boot/dts/sam440ep.dts
index f0663be10421..088361cf4636 100644
--- a/arch/powerpc/boot/dts/sam440ep.dts
+++ b/arch/powerpc/boot/dts/sam440ep.dts
@@ -196,7 +196,7 @@
interrupt-parent = <&UIC0>;
interrupts = <2 4>;
rtc@68 {
- compatible = "stm,m41t80";
+ compatible = "st,m41t80";
reg = <0x68>;
};
};
diff --git a/arch/powerpc/boot/dts/xcalibur1501.dts b/arch/powerpc/boot/dts/xcalibur1501.dts
index c409cbafb126..1f2952dce77d 100644
--- a/arch/powerpc/boot/dts/xcalibur1501.dts
+++ b/arch/powerpc/boot/dts/xcalibur1501.dts
@@ -238,7 +238,7 @@
};
rtc@68 {
- compatible = "stm,m41t00",
+ compatible = "st,m41t00",
"dallas,ds1338";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/xpedite5200.dts b/arch/powerpc/boot/dts/xpedite5200.dts
index 8fd7b7031357..5b10e56a1d8e 100644
--- a/arch/powerpc/boot/dts/xpedite5200.dts
+++ b/arch/powerpc/boot/dts/xpedite5200.dts
@@ -130,7 +130,7 @@
};
rtc@68 {
- compatible = "stm,m41t00",
+ compatible = "st,m41t00",
"dallas,ds1338";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/xpedite5200_xmon.dts b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
index 0baa8283d08c..646acfbef0dd 100644
--- a/arch/powerpc/boot/dts/xpedite5200_xmon.dts
+++ b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
@@ -134,7 +134,7 @@
};
rtc@68 {
- compatible = "stm,m41t00",
+ compatible = "st,m41t00",
"dallas,ds1338";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/xpedite5301.dts b/arch/powerpc/boot/dts/xpedite5301.dts
index 04cb410da48b..7bcc94ffe53d 100644
--- a/arch/powerpc/boot/dts/xpedite5301.dts
+++ b/arch/powerpc/boot/dts/xpedite5301.dts
@@ -231,7 +231,7 @@
};
rtc@68 {
- compatible = "stm,m41t00",
+ compatible = "st,m41t00",
"dallas,ds1338";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/xpedite5330.dts b/arch/powerpc/boot/dts/xpedite5330.dts
index 73f8620f1ce7..86df8bc6ebbd 100644
--- a/arch/powerpc/boot/dts/xpedite5330.dts
+++ b/arch/powerpc/boot/dts/xpedite5330.dts
@@ -267,7 +267,7 @@
};
rtc@68 {
- compatible = "stm,m41t00",
+ compatible = "st,m41t00",
"dallas,ds1338";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/xpedite5370.dts b/arch/powerpc/boot/dts/xpedite5370.dts
index cd0ea2b99362..b8ade094f932 100644
--- a/arch/powerpc/boot/dts/xpedite5370.dts
+++ b/arch/powerpc/boot/dts/xpedite5370.dts
@@ -229,7 +229,7 @@
};
rtc@68 {
- compatible = "stm,m41t00",
+ compatible = "st,m41t00",
"dallas,ds1338";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/motload-head.S b/arch/powerpc/boot/motload-head.S
new file mode 100644
index 000000000000..41cabb4b63fa
--- /dev/null
+++ b/arch/powerpc/boot/motload-head.S
@@ -0,0 +1,11 @@
+#include "ppc_asm.h"
+
+ .text
+ .globl _zimage_start
+_zimage_start:
+ mfmsr r10
+ rlwinm r10,r10,0,~(1<<15) /* Clear MSR_EE */
+ sync
+ mtmsr r10
+ isync
+ b _zimage_start_lib
diff --git a/arch/powerpc/boot/mvme7100.c b/arch/powerpc/boot/mvme7100.c
new file mode 100644
index 000000000000..8b0a932311af
--- /dev/null
+++ b/arch/powerpc/boot/mvme7100.c
@@ -0,0 +1,59 @@
+/*
+ * Motload compatibility for the Emerson/Artesyn MVME7100
+ *
+ * Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A.
+ *
+ * Author: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "ops.h"
+#include "stdio.h"
+#include "cuboot.h"
+
+#define TARGET_86xx
+#define TARGET_HAS_ETH1
+#define TARGET_HAS_ETH2
+#define TARGET_HAS_ETH3
+#include "ppcboot.h"
+
+static bd_t bd;
+
+BSS_STACK(16384);
+
+static void mvme7100_fixups(void)
+{
+ void *devp;
+ unsigned long busfreq = bd.bi_busfreq * 1000000;
+
+ dt_fixup_cpu_clocks(bd.bi_intfreq * 1000000, busfreq / 4, busfreq);
+
+ devp = finddevice("/soc@f1000000");
+ if (devp)
+ setprop(devp, "bus-frequency", &busfreq, sizeof(busfreq));
+
+ devp = finddevice("/soc/serial@4500");
+ if (devp)
+ setprop(devp, "clock-frequency", &busfreq, sizeof(busfreq));
+
+ dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
+
+ dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
+ dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
+ dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr);
+ dt_fixup_mac_address_by_alias("ethernet3", bd.bi_enet3addr);
+}
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ CUBOOT_INIT();
+ fdt_init(_dtb_start);
+ serial_console_init();
+ platform_ops.fixups = mvme7100_fixups;
+}
diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
new file mode 100644
index 000000000000..ff2f1b97bc53
--- /dev/null
+++ b/arch/powerpc/boot/opal-calls.S
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "ppc_asm.h"
+#include "../include/asm/opal-api.h"
+
+ .text
+
+#define OPAL_CALL(name, token) \
+ .globl name; \
+name: \
+ li r0, token; \
+ b opal_call;
+
+opal_call:
+ mflr r11
+ std r11,16(r1)
+ mfcr r12
+ stw r12,8(r1)
+ mr r13,r2
+
+ /* Set opal return address */
+ ld r11,opal_return@got(r2)
+ mtlr r11
+ mfmsr r12
+
+ /* switch to BE when we enter OPAL */
+ li r11,MSR_LE
+ andc r12,r12,r11
+ mtspr SPRN_HSRR1,r12
+
+ /* load the opal call entry point and base */
+ ld r11,opal@got(r2)
+ ld r12,8(r11)
+ ld r2,0(r11)
+ mtspr SPRN_HSRR0,r12
+ hrfid
+
+opal_return:
+ FIXUP_ENDIAN
+ mr r2,r13;
+ lwz r11,8(r1);
+ ld r12,16(r1)
+ mtcr r11;
+ mtlr r12
+ blr
+
+OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
+OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
+OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE);
+OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS);
+OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH);
diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
new file mode 100644
index 000000000000..1f37e1c1d6d8
--- /dev/null
+++ b/arch/powerpc/boot/opal.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "ops.h"
+#include "stdio.h"
+#include "io.h"
+#include <libfdt.h>
+#include "../include/asm/opal-api.h"
+
+#ifdef __powerpc64__
+
+/* Global OPAL struct used by opal-call.S */
+struct opal {
+ u64 base;
+ u64 entry;
+} opal;
+
+static u32 opal_con_id;
+
+int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer);
+int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer);
+int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length);
+int64_t opal_console_flush(uint64_t term_number);
+int64_t opal_poll_events(uint64_t *outstanding_event_mask);
+
+static int opal_con_open(void)
+{
+ return 0;
+}
+
+static void opal_con_putc(unsigned char c)
+{
+ int64_t rc;
+ uint64_t olen, len;
+
+ do {
+ rc = opal_console_write_buffer_space(opal_con_id, &olen);
+ len = be64_to_cpu(olen);
+ if (rc)
+ return;
+ opal_poll_events(NULL);
+ } while (len < 1);
+
+
+ olen = cpu_to_be64(1);
+ opal_console_write(opal_con_id, &olen, &c);
+}
+
+static void opal_con_close(void)
+{
+ opal_console_flush(opal_con_id);
+}
+
+static void opal_init(void)
+{
+ void *opal_node;
+
+ opal_node = finddevice("/ibm,opal");
+ if (!opal_node)
+ return;
+ if (getprop(opal_node, "opal-base-address", &opal.base, sizeof(u64)) < 0)
+ return;
+ opal.base = be64_to_cpu(opal.base);
+ if (getprop(opal_node, "opal-entry-address", &opal.entry, sizeof(u64)) < 0)
+ return;
+ opal.entry = be64_to_cpu(opal.entry);
+}
+
+int opal_console_init(void *devp, struct serial_console_data *scdp)
+{
+ opal_init();
+
+ if (devp) {
+ int n = getprop(devp, "reg", &opal_con_id, sizeof(u32));
+ if (n != sizeof(u32))
+ return -1;
+ opal_con_id = be32_to_cpu(opal_con_id);
+ } else
+ opal_con_id = 0;
+
+ scdp->open = opal_con_open;
+ scdp->putc = opal_con_putc;
+ scdp->close = opal_con_close;
+
+ return 0;
+}
+#else
+int opal_console_init(void *devp, struct serial_console_data *scdp)
+{
+ return -1;
+}
+#endif /* __powerpc64__ */
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 5e75e1c5518e..e19b64ef977a 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -89,6 +89,7 @@ int mpsc_console_init(void *devp, struct serial_console_data *scdp);
int cpm_console_init(void *devp, struct serial_console_data *scdp);
int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp);
int uartlite_console_init(void *devp, struct serial_console_data *scdp);
+int opal_console_init(void *devp, struct serial_console_data *scdp);
void *simple_alloc_init(char *base, unsigned long heap_size,
unsigned long granularity, unsigned long max_allocs);
extern void flush_cache(void *, unsigned long);
diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h
index 35ea60c1f070..b03373d8b386 100644
--- a/arch/powerpc/boot/ppc_asm.h
+++ b/arch/powerpc/boot/ppc_asm.h
@@ -61,6 +61,10 @@
#define SPRN_TBRL 268
#define SPRN_TBRU 269
+#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
+#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+
+#define MSR_LE 0x0000000000000001
#define FIXUP_ENDIAN \
tdi 0, 0, 0x48; /* Reverse endian of b . + 8 */ \
diff --git a/arch/powerpc/boot/ppcboot.h b/arch/powerpc/boot/ppcboot.h
index 6ae6f9063952..453df429d5d0 100644
--- a/arch/powerpc/boot/ppcboot.h
+++ b/arch/powerpc/boot/ppcboot.h
@@ -43,7 +43,7 @@ typedef struct bd_info {
unsigned long bi_sramstart; /* start of SRAM memory */
unsigned long bi_sramsize; /* size of SRAM memory */
#if defined(TARGET_8xx) || defined(TARGET_CPM2) || defined(TARGET_85xx) ||\
- defined(TARGET_83xx)
+ defined(TARGET_83xx) || defined(TARGET_86xx)
unsigned long bi_immr_base; /* base of IMMR register */
#endif
#if defined(TARGET_PPC_MPC52xx)
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index 167ee9433de6..e04c1e4063ae 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -132,6 +132,8 @@ int serial_console_init(void)
else if (dt_is_compatible(devp, "xlnx,opb-uartlite-1.00.b") ||
dt_is_compatible(devp, "xlnx,xps-uartlite-1.00.a"))
rc = uartlite_console_init(devp, &serial_cd);
+ else if (dt_is_compatible(devp, "ibm,opal-console-raw"))
+ rc = opal_console_init(devp, &serial_cd);
/* Add other serial console driver calls here */
diff --git a/arch/powerpc/boot/types.h b/arch/powerpc/boot/types.h
index 31393d17a9c1..85565a89bcc2 100644
--- a/arch/powerpc/boot/types.h
+++ b/arch/powerpc/boot/types.h
@@ -12,6 +12,16 @@ typedef short s16;
typedef int s32;
typedef long long s64;
+/* required for opal-api.h */
+typedef u8 uint8_t;
+typedef u16 uint16_t;
+typedef u32 uint32_t;
+typedef u64 uint64_t;
+typedef s8 int8_t;
+typedef s16 int16_t;
+typedef s32 int32_t;
+typedef s64 int64_t;
+
#define min(x,y) ({ \
typeof(x) _x = (x); \
typeof(y) _y = (y); \
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6a19fcef5596..6681ec3625c9 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -302,6 +302,11 @@ mvme5100)
platformo="$object/fixed-head.o $object/mvme5100.o"
binary=y
;;
+mvme7100)
+ platformo="$object/motload-head.o $object/mvme7100.o"
+ link_address='0x4000000'
+ binary=y
+ ;;
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 9110a5cb1bb7..3438ed99c088 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index 790366652ba3..36c44c0b560c 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 01bd71bac027..ad2156c6e2fc 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
index e2036b7c7edb..28adb782ec51 100644
--- a/arch/powerpc/configs/40x/klondike_defconfig
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -32,8 +32,6 @@ CONFIG_SCSI_SAS_ATTRS=y
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -44,7 +42,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_AVERAGE=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index efd51701fb4d..a00f434c4d47 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index 5ded3dcdf60a..e500e6a12b3e 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig
index bcb0c4d854db..65dc084a154c 100644
--- a/arch/powerpc/configs/40x/virtex_defconfig
+++ b/arch/powerpc/configs/40x/virtex_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index 37c838f26e53..567f99bd64a3 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -18,7 +18,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index ea4ef02a0578..143b2fbddb46 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
@@ -110,10 +109,9 @@ CONFIG_MMC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
CONFIG_VFAT_FS=y
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 95494209c124..6bba1a55b827 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index a046f08413fd..477d99fefd9a 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index a326b773ac05..6b77aea79b6c 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -49,7 +49,7 @@ CONFIG_SENSORS_AD7414=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index d939e71fff7d..c8e6f048a122 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 5aa312a158dd..3799a26de6f4 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -30,7 +30,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
@@ -71,10 +70,9 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index 5909e016c37d..c265f54ab9e5 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -19,7 +19,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index 57499d25c877..bb6bd6d90821 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -43,7 +42,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_FUSION=y
CONFIG_FUSION_SAS=y
-CONFIG_I2O=y
CONFIG_NETDEVICES=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 5d52185d8f5a..060f2edddb71 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -76,8 +75,7 @@ CONFIG_LOGO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 0ad3e449526e..115a6b2be18b 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -56,10 +55,9 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_THERMAL=y
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index a042335971da..b999048c4ae6 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index 91c2aff9bd55..b8c9ee45d0a2 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 7fddf3fe275c..a4bb048448da 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -41,7 +40,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_FUSION=y
CONFIG_FUSION_SAS=y
-CONFIG_I2O=y
CONFIG_NETDEVICES=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 6928012f3813..63302fbd184d 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -85,9 +84,8 @@ CONFIG_RTC_DRV_M41T80_WDT=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_REISERFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index c294369cc39f..b3792fd8111d 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index e779228d6cd6..ff6f86241418 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig
index 53d0300b3390..ce052064bcbb 100644
--- a/arch/powerpc/configs/44x/virtex5_defconfig
+++ b/arch/powerpc/configs/44x/virtex5_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index ee434375fc24..ab932488e68b 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -23,7 +23,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_VLAN_8021Q=y
@@ -73,9 +72,7 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 19fad0e0016e..c1faac800806 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -61,8 +60,7 @@ CONFIG_USB_STORAGE=y
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index 5f40ba92a39a..9493b02ac660 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -53,8 +52,7 @@ CONFIG_I2C_MPC=y
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 909e185a88d1..fe8126bc1655 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -73,8 +72,7 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 649a01a0044d..1554de6968ca 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -34,7 +34,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -74,8 +73,7 @@ CONFIG_RTC_DRV_PCF8563=m
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=m
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=850
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index efab8388ea92..b8b316b884aa 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -75,8 +74,7 @@ CONFIG_RTC_DRV_DS1374=y
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig
index bcdfb07921fc..b60cac088a7b 100644
--- a/arch/powerpc/configs/83xx/asp8347_defconfig
+++ b/arch/powerpc/configs/83xx/asp8347_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -63,8 +62,7 @@ CONFIG_USB_EHCI_FSL=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index 11a959283149..9547dcdd6489 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -28,7 +28,6 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_TIPC=y
CONFIG_BRIDGE=m
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index b47a41f77836..80aa844c1428 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -79,8 +78,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index e28c83f320c1..d89d13bc6901 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -77,8 +76,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
index e84d35b848c0..e789518a2881 100644
--- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -52,8 +51,7 @@ CONFIG_WATCHDOG=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index ae145f410590..917a49ca2bd1 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -64,8 +63,7 @@ CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SPI=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index 87fc15bce407..00f636e95cc8 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -75,8 +74,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index 9a2ff25a2e98..a539d44d1dba 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -66,8 +65,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
index e44edc575549..9f0ddc830c82 100644
--- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -51,8 +50,7 @@ CONFIG_WATCHDOG=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
index 94a7d85f1603..ceed4c1f0ab5 100644
--- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -57,8 +56,7 @@ CONFIG_WATCHDOG=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
index 761ed8ea0729..a6819bf3ef5e 100644
--- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
@@ -65,8 +64,7 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
index bcf1b48cc9e6..4bd1992e4d98 100644
--- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -50,8 +49,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index f0f0ebf75125..2d4bb63882b8 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -24,7 +24,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -77,8 +76,7 @@ CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig
index d2e4d82de14d..b3380dbd1925 100644
--- a/arch/powerpc/configs/83xx/sbc834x_defconfig
+++ b/arch/powerpc/configs/83xx/sbc834x_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -65,9 +64,7 @@ CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
CONFIG_USB_STORAGE=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index b0939dd9ad6f..c79283be5680 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -165,10 +165,8 @@ CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/85xx/kmp204x_defconfig b/arch/powerpc/configs/85xx/kmp204x_defconfig
index e94d3eb4a8c1..aaaaa609cd24 100644
--- a/arch/powerpc/configs/85xx/kmp204x_defconfig
+++ b/arch/powerpc/configs/85xx/kmp204x_defconfig
@@ -64,7 +64,6 @@ CONFIG_IP_PIMSM_V2=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_TIPC=y
@@ -189,7 +188,7 @@ CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_UIO=y
CONFIG_STAGING=y
-CONFIG_CLK_PPC_CORENET=y
+CONFIG_CLK_QORIQ=y
CONFIG_EXT2_FS=y
CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig
index 6f753a71fe5d..bd814dfb0bbd 100644
--- a/arch/powerpc/configs/85xx/ksi8560_defconfig
+++ b/arch/powerpc/configs/85xx/ksi8560_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -49,8 +48,7 @@ CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index e38c373f2edf..32af10def641 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -41,8 +40,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index 48fc8e3a7be0..a52b2170ee33 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -44,8 +43,7 @@ CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index ecb0c3bf8796..002bb48abaa3 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -46,8 +45,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index 72b7ccfbe2c2..97ae02377cf3 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index 0ad7bd5ee6b6..13579cb30539 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_CAN=y
CONFIG_MTD=y
@@ -79,8 +78,7 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_STORAGE=y
CONFIG_RTC_CLASS=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index b45190556c0c..384926f3ce1d 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -17,7 +17,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
@@ -53,8 +52,7 @@ CONFIG_AGP=m
CONFIG_DRM=m
CONFIG_SOUND=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=m
CONFIG_UDF_FS=m
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig
index 4daaf2943b44..908f3885f4a5 100644
--- a/arch/powerpc/configs/85xx/tqm8540_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8540_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -50,8 +49,7 @@ CONFIG_I2C_MPC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig
index bb402b3cf786..f47e57610b7c 100644
--- a/arch/powerpc/configs/85xx/tqm8541_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8541_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -52,8 +51,7 @@ CONFIG_I2C_MPC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index 685d0fb132d6..42f5d0a7698e 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -28,7 +28,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig
index 02a931d4e954..71552b7929cd 100644
--- a/arch/powerpc/configs/85xx/tqm8555_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8555_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -52,8 +51,7 @@ CONFIG_I2C_MPC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig
index 633d5b759a36..25aac973d6d7 100644
--- a/arch/powerpc/configs/85xx/tqm8560_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8560_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -52,8 +51,7 @@ CONFIG_I2C_MPC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 858b539d004b..dbd961de251e 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -54,7 +54,6 @@ CONFIG_IP_PIMSM_V2=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
@@ -124,8 +123,7 @@ CONFIG_RTC_DRV_CMOS=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config
index f91f8895fc93..d3dd6b8865c0 100644
--- a/arch/powerpc/configs/86xx-hw.config
+++ b/arch/powerpc/configs/86xx-hw.config
@@ -74,9 +74,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_NR_UARTS=5
CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=5
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250=y
CONFIG_SERIO_LIBPS2=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index d89ff40d39b7..6a3f825452e9 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -24,7 +24,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 84f1b4140579..8b83ce8a01e7 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -29,7 +29,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
@@ -106,7 +105,6 @@ CONFIG_USB_STORAGE=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_MSDOS_FS=m
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index 340685caa7b8..7c9d95370150 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -306,15 +306,13 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_AMSO1100=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB_CM=y
CONFIG_INFINIBAND_SRP=m
CONFIG_DMADEVICES=y
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=m
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_AUTOFS4_FS=m
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index db328e618bb9..7b6f30dece34 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -184,7 +184,7 @@ CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_CELL=y
CONFIG_UIO=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index 253a9f200097..ac9a50da2dc6 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -110,7 +110,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_MSDOS_FS=m
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 7c137041f1d6..3403b85f9d81 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -50,9 +49,7 @@ CONFIG_SERIAL_CPM_CONSOLE=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index ee96be889dac..95411aeeeb8d 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -26,7 +26,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index 41e4d359524d..1a61e81ab0cd 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -33,8 +33,7 @@ CONFIG_DUMMY=y
CONFIG_EFS_FS=m
CONFIG_EXPERT=y
CONFIG_EXT2_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_FB=y
CONFIG_FHANDLE=y
CONFIG_FIXED_PHY=y
@@ -55,7 +54,6 @@ CONFIG_IKCONFIG=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET=y
CONFIG_IP_ADVANCED_ROUTER=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 1d9ad8500909..3b2511c090d8 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -216,14 +216,13 @@ CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_APPLEDISPLAY=m
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index 6c6c60f1aba4..c0eec4a5df4e 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
@@ -76,9 +75,7 @@ CONFIG_SND_SEQUENCER_OSS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_MSDOS_FS=y
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index 5e0f2551e5c7..e56e80090529 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -52,7 +51,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index 62ae92956d05..b413c19d7031 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -109,8 +108,7 @@ CONFIG_USB_SERIAL_FTDI_SIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_RS5C372=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_XFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index ac9666f8abf1..27abfab31219 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -35,7 +35,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
@@ -102,9 +101,7 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_TI=m
CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_FS_DAX=y
CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index 666922c5b572..197acaa026eb 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_TIPC=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index d16d6c5cb282..0b4854cf26cb 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_CAN=y
@@ -53,7 +52,7 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_BLK_DEV_XIP=y
+CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_SCSI=y
@@ -113,10 +112,9 @@ CONFIG_RTC_DRV_MPC5121=y
CONFIG_DMADEVICES=y
CONFIG_MPC512X_DMA=y
CONFIG_MPC512x_LPBFIFO=y
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 9fd041bfd778..88336d0df0d6 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
@@ -114,8 +113,7 @@ CONFIG_RTC_DRV_PCF8563=m
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig
index e2647d5bb605..d933326b4cf9 100644
--- a/arch/powerpc/configs/mpc7448_hpc2_defconfig
+++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -49,8 +48,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index 825b052176af..4cb0f617c0d6 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -56,8 +55,7 @@ CONFIG_SERIAL_CPM_CONSOLE=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 671e220a9a98..6574477fd726 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -37,7 +37,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_ESP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
@@ -101,8 +100,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 321412c5dae4..998454471a48 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -24,7 +24,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_NETDEVICES=y
@@ -37,8 +36,7 @@ CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/mpc86xx_basic_defconfig b/arch/powerpc/configs/mpc86xx_basic_defconfig
index 33af5c5de105..3283f0586e11 100644
--- a/arch/powerpc/configs/mpc86xx_basic_defconfig
+++ b/arch/powerpc/configs/mpc86xx_basic_defconfig
@@ -8,3 +8,4 @@ CONFIG_GEF_SBC610=y
CONFIG_MPC8610_HPCD=y
CONFIG_MPC8641_HPCN=y
CONFIG_SBC8641D=y
+CONFIG_MVME7100=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 2a10f98d4ee5..91f53f1bec5d 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -25,7 +25,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
index 525a2cb500a7..139add95a16a 100644
--- a/arch/powerpc/configs/mvme5100_defconfig
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -92,8 +91,7 @@ CONFIG_I2C_MPC=y
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=m
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
CONFIG_XFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 8f94782eb907..76f4edd441d3 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -152,8 +152,7 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index ea8705ffcd76..e5a674d4a716 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -40,7 +40,6 @@ CONFIG_INET_AH=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -158,7 +157,7 @@ CONFIG_ADB=y
CONFIG_ADB_CUDA=y
CONFIG_ADB_PMU=y
CONFIG_ADB_PMU_LED=y
-CONFIG_ADB_PMU_LED_IDE=y
+CONFIG_ADB_PMU_LED_DISK=y
CONFIG_PMAC_APM_EMU=m
CONFIG_PMAC_MEDIABAY=y
CONFIG_PMAC_BACKLIGHT=y
@@ -281,10 +280,8 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_APPLEDISPLAY=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 045031048f8d..dce352e9153b 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -80,6 +80,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
CONFIG_MTD_POWERNV_FLASH=y
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -181,6 +182,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_JSM=m
CONFIG_VIRTIO_CONSOLE=m
+CONFIG_POWERNV_OP_PANEL=m
CONFIG_IPMI_HANDLER=y
CONFIG_IPMI_DEVICE_INTERFACE=y
CONFIG_IPMI_POWERNV=y
@@ -233,9 +235,9 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index a43bf6ea10fb..370c0bbcff71 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
@@ -67,8 +66,7 @@ CONFIG_THERMAL=y
CONFIG_FB=m
CONFIG_FB_XILINX=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index bbc7f76d52c8..2766e8f590bc 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -35,7 +35,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_BRIDGE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -89,8 +88,7 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index b041fb607376..0a8d250cb97e 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -247,7 +247,6 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_EHCA=m
CONFIG_INFINIBAND_CXGB3=m
CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
@@ -262,14 +261,11 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index ddf9773458cf..fd2edd650c20 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -178,15 +178,11 @@ CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 99ccbebabfd3..8fbf49801233 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -442,7 +442,7 @@ CONFIG_ADB=y
CONFIG_ADB_CUDA=y
CONFIG_ADB_PMU=y
CONFIG_ADB_PMU_LED=y
-CONFIG_ADB_PMU_LED_IDE=y
+CONFIG_ADB_PMU_LED_DISK=y
CONFIG_PMAC_APM_EMU=y
CONFIG_PMAC_MEDIABAY=y
CONFIG_PMAC_BACKLIGHT=y
@@ -512,7 +512,6 @@ CONFIG_E1000E=m
CONFIG_IGB=m
CONFIG_IXGB=m
CONFIG_IXGBE=m
-CONFIG_IP1000=m
CONFIG_MV643XX_ETH=m
CONFIG_SKGE=m
CONFIG_SKY2=m
@@ -1029,14 +1028,14 @@ CONFIG_UIO_CIF=m
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=m
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=m
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=m
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_JBD2_DEBUG=y
CONFIG_REISERFS_FS=m
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index 3e336ee8bb4a..50b2bad51d0a 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -40,7 +39,6 @@ CONFIG_MTD_CFI_I4=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
CONFIG_FS_ENET=y
@@ -59,8 +57,7 @@ CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index c40046074f8b..ee0ec5a682fc 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -51,7 +51,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 36871a4bfa54..654aeffc57ef 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -53,6 +53,7 @@ CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_PPC_64K_PAGES=y
@@ -223,7 +224,6 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_EHCA=m
CONFIG_INFINIBAND_CXGB3=m
CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
@@ -233,14 +233,11 @@ CONFIG_INFINIBAND_SRP=m
CONFIG_INFINIBAND_ISER=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index b5db7dffe86d..e9122b15e5fd 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
@@ -72,8 +71,7 @@ CONFIG_USB_STORAGE=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_XFS_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 4c973c5321c6..78fddf24b5d3 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -29,7 +29,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 34eaf528fa87..dcdd51b57783 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_BT=y
@@ -96,9 +95,7 @@ CONFIG_MMC_SDHCI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 9c221b69c181..7998c177f0a2 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -9,9 +9,11 @@ obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
+obj-$(CONFIG_CRYPT_CRC32C_VPMSUM) += crc32c-vpmsum.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
+crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
diff --git a/arch/powerpc/crypto/aes-spe-regs.h b/arch/powerpc/crypto/aes-spe-regs.h
index 30d217b399c3..2cc3a2caadae 100644
--- a/arch/powerpc/crypto/aes-spe-regs.h
+++ b/arch/powerpc/crypto/aes-spe-regs.h
@@ -18,7 +18,7 @@
#define rLN r7 /* length of data to be processed */
#define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */
#define rKT r9 /* pointer to tweak key (XTS mode) */
-#define rT0 r11 /* pointers to en-/decrpytion tables */
+#define rT0 r11 /* pointers to en-/decryption tables */
#define rT1 r10
#define rD0 r9 /* data */
#define rD1 r14
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_asm.S b/arch/powerpc/crypto/crc32c-vpmsum_asm.S
new file mode 100644
index 000000000000..dc640b212299
--- /dev/null
+++ b/arch/powerpc/crypto/crc32c-vpmsum_asm.S
@@ -0,0 +1,1553 @@
+/*
+ * Calculate the checksum of data that is 16 byte aligned and a multiple of
+ * 16 bytes.
+ *
+ * The first step is to reduce it to 1024 bits. We do this in 8 parallel
+ * chunks in order to mask the latency of the vpmsum instructions. If we
+ * have more than 32 kB of data to checksum we repeat this step multiple
+ * times, passing in the previous 1024 bits.
+ *
+ * The next step is to reduce the 1024 bits to 64 bits. This step adds
+ * 32 bits of 0s to the end - this matches what a CRC does. We just
+ * calculate constants that land the data in this 32 bits.
+ *
+ * We then use fixed point Barrett reduction to compute a mod n over GF(2)
+ * for n = CRC using POWER8 instructions. We use x = 32.
+ *
+ * http://en.wikipedia.org/wiki/Barrett_reduction
+ *
+ * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ppc_asm.h>
+#include <asm/ppc-opcode.h>
+
+ .section .rodata
+.balign 16
+
+.byteswap_constant:
+ /* byte reverse permute constant */
+ .octa 0x0F0E0D0C0B0A09080706050403020100
+
+#define MAX_SIZE 32768
+.constants:
+
+ /* Reduce 262144 kbits to 1024 bits */
+ /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */
+ .octa 0x00000000b6ca9e20000000009c37c408
+
+ /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */
+ .octa 0x00000000350249a800000001b51df26c
+
+ /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */
+ .octa 0x00000001862dac54000000000724b9d0
+
+ /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */
+ .octa 0x00000001d87fb48c00000001c00532fe
+
+ /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */
+ .octa 0x00000001f39b699e00000000f05a9362
+
+ /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */
+ .octa 0x0000000101da11b400000001e1007970
+
+ /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */
+ .octa 0x00000001cab571e000000000a57366ee
+
+ /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */
+ .octa 0x00000000c7020cfe0000000192011284
+
+ /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */
+ .octa 0x00000000cdaed1ae0000000162716d9a
+
+ /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */
+ .octa 0x00000001e804effc00000000cd97ecde
+
+ /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */
+ .octa 0x0000000077c3ea3a0000000058812bc0
+
+ /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */
+ .octa 0x0000000068df31b40000000088b8c12e
+
+ /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */
+ .octa 0x00000000b059b6c200000001230b234c
+
+ /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */
+ .octa 0x0000000145fb8ed800000001120b416e
+
+ /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */
+ .octa 0x00000000cbc0916800000001974aecb0
+
+ /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */
+ .octa 0x000000005ceeedc2000000008ee3f226
+
+ /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */
+ .octa 0x0000000047d74e8600000001089aba9a
+
+ /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */
+ .octa 0x00000001407e9e220000000065113872
+
+ /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */
+ .octa 0x00000001da967bda000000005c07ec10
+
+ /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */
+ .octa 0x000000006c8983680000000187590924
+
+ /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */
+ .octa 0x00000000f2d14c9800000000e35da7c6
+
+ /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */
+ .octa 0x00000001993c6ad4000000000415855a
+
+ /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */
+ .octa 0x000000014683d1ac0000000073617758
+
+ /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */
+ .octa 0x00000001a7c93e6c0000000176021d28
+
+ /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */
+ .octa 0x000000010211e90a00000001c358fd0a
+
+ /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */
+ .octa 0x000000001119403e00000001ff7a2c18
+
+ /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */
+ .octa 0x000000001c3261aa00000000f2d9f7e4
+
+ /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */
+ .octa 0x000000014e37a634000000016cf1f9c8
+
+ /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */
+ .octa 0x0000000073786c0c000000010af9279a
+
+ /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */
+ .octa 0x000000011dc037f80000000004f101e8
+
+ /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */
+ .octa 0x0000000031433dfc0000000070bcf184
+
+ /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */
+ .octa 0x000000009cde8348000000000a8de642
+
+ /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */
+ .octa 0x0000000038d3c2a60000000062ea130c
+
+ /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */
+ .octa 0x000000011b25f26000000001eb31cbb2
+
+ /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */
+ .octa 0x000000001629e6f00000000170783448
+
+ /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */
+ .octa 0x0000000160838b4c00000001a684b4c6
+
+ /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */
+ .octa 0x000000007a44011c00000000253ca5b4
+
+ /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */
+ .octa 0x00000000226f417a0000000057b4b1e2
+
+ /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */
+ .octa 0x0000000045eb2eb400000000b6bd084c
+
+ /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */
+ .octa 0x000000014459d70c0000000123c2d592
+
+ /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */
+ .octa 0x00000001d406ed8200000000159dafce
+
+ /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */
+ .octa 0x0000000160c8e1a80000000127e1a64e
+
+ /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */
+ .octa 0x0000000027ba80980000000056860754
+
+ /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */
+ .octa 0x000000006d92d01800000001e661aae8
+
+ /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */
+ .octa 0x000000012ed7e3f200000000f82c6166
+
+ /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */
+ .octa 0x000000002dc8778800000000c4f9c7ae
+
+ /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */
+ .octa 0x0000000018240bb80000000074203d20
+
+ /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */
+ .octa 0x000000001ad381580000000198173052
+
+ /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */
+ .octa 0x00000001396b78f200000001ce8aba54
+
+ /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */
+ .octa 0x000000011a68133400000001850d5d94
+
+ /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */
+ .octa 0x000000012104732e00000001d609239c
+
+ /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */
+ .octa 0x00000000a140d90c000000001595f048
+
+ /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */
+ .octa 0x00000001b7215eda0000000042ccee08
+
+ /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */
+ .octa 0x00000001aaf1df3c000000010a389d74
+
+ /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */
+ .octa 0x0000000029d15b8a000000012a840da6
+
+ /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */
+ .octa 0x00000000f1a96922000000001d181c0c
+
+ /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */
+ .octa 0x00000001ac80d03c0000000068b7d1f6
+
+ /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */
+ .octa 0x000000000f11d56a000000005b0f14fc
+
+ /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */
+ .octa 0x00000001f1c022a20000000179e9e730
+
+ /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */
+ .octa 0x0000000173d00ae200000001ce1368d6
+
+ /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */
+ .octa 0x00000001d4ffe4ac0000000112c3a84c
+
+ /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */
+ .octa 0x000000016edc5ae400000000de940fee
+
+ /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */
+ .octa 0x00000001f1a0214000000000fe896b7e
+
+ /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */
+ .octa 0x00000000ca0b28a000000001f797431c
+
+ /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */
+ .octa 0x00000001928e30a20000000053e989ba
+
+ /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */
+ .octa 0x0000000097b1b002000000003920cd16
+
+ /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */
+ .octa 0x00000000b15bf90600000001e6f579b8
+
+ /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */
+ .octa 0x00000000411c5d52000000007493cb0a
+
+ /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */
+ .octa 0x00000001c36f330000000001bdd376d8
+
+ /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */
+ .octa 0x00000001119227e0000000016badfee6
+
+ /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */
+ .octa 0x00000000114d47020000000071de5c58
+
+ /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */
+ .octa 0x00000000458b5b9800000000453f317c
+
+ /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */
+ .octa 0x000000012e31fb8e0000000121675cce
+
+ /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */
+ .octa 0x000000005cf619d800000001f409ee92
+
+ /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */
+ .octa 0x0000000063f4d8b200000000f36b9c88
+
+ /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */
+ .octa 0x000000004138dc8a0000000036b398f4
+
+ /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */
+ .octa 0x00000001d29ee8e000000001748f9adc
+
+ /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */
+ .octa 0x000000006a08ace800000001be94ec00
+
+ /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */
+ .octa 0x0000000127d4201000000000b74370d6
+
+ /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */
+ .octa 0x0000000019d76b6200000001174d0b98
+
+ /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */
+ .octa 0x00000001b1471f6e00000000befc06a4
+
+ /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */
+ .octa 0x00000001f64c19cc00000001ae125288
+
+ /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */
+ .octa 0x00000000003c0ea00000000095c19b34
+
+ /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */
+ .octa 0x000000014d73abf600000001a78496f2
+
+ /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */
+ .octa 0x00000001620eb84400000001ac5390a0
+
+ /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */
+ .octa 0x0000000147655048000000002a80ed6e
+
+ /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */
+ .octa 0x0000000067b5077e00000001fa9b0128
+
+ /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */
+ .octa 0x0000000010ffe20600000001ea94929e
+
+ /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */
+ .octa 0x000000000fee8f1e0000000125f4305c
+
+ /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */
+ .octa 0x00000001da26fbae00000001471e2002
+
+ /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */
+ .octa 0x00000001b3a8bd880000000132d2253a
+
+ /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */
+ .octa 0x00000000e8f3898e00000000f26b3592
+
+ /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */
+ .octa 0x00000000b0d0d28c00000000bc8b67b0
+
+ /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */
+ .octa 0x0000000030f2a798000000013a826ef2
+
+ /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */
+ .octa 0x000000000fba10020000000081482c84
+
+ /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */
+ .octa 0x00000000bdb9bd7200000000e77307c2
+
+ /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */
+ .octa 0x0000000075d3bf5a00000000d4a07ec8
+
+ /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */
+ .octa 0x00000000ef1f98a00000000017102100
+
+ /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */
+ .octa 0x00000000689c760200000000db406486
+
+ /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */
+ .octa 0x000000016d5fa5fe0000000192db7f88
+
+ /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */
+ .octa 0x00000001d0d2b9ca000000018bf67b1e
+
+ /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */
+ .octa 0x0000000041e7b470000000007c09163e
+
+ /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */
+ .octa 0x00000001cbb6495e000000000adac060
+
+ /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */
+ .octa 0x000000010052a0b000000000bd8316ae
+
+ /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */
+ .octa 0x00000001d8effb5c000000019f09ab54
+
+ /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */
+ .octa 0x00000001d969853c0000000125155542
+
+ /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */
+ .octa 0x00000000523ccce2000000018fdb5882
+
+ /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */
+ .octa 0x000000001e2436bc00000000e794b3f4
+
+ /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */
+ .octa 0x00000000ddd1c3a2000000016f9bb022
+
+ /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */
+ .octa 0x0000000019fcfe3800000000290c9978
+
+ /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */
+ .octa 0x00000001ce95db640000000083c0f350
+
+ /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */
+ .octa 0x00000000af5828060000000173ea6628
+
+ /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */
+ .octa 0x00000001006388f600000001c8b4e00a
+
+ /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */
+ .octa 0x0000000179eca00a00000000de95d6aa
+
+ /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */
+ .octa 0x0000000122410a6a000000010b7f7248
+
+ /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */
+ .octa 0x000000004288e87c00000001326e3a06
+
+ /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */
+ .octa 0x000000016c5490da00000000bb62c2e6
+
+ /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */
+ .octa 0x00000000d1c71f6e0000000156a4b2c2
+
+ /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */
+ .octa 0x00000001b4ce08a6000000011dfe763a
+
+ /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */
+ .octa 0x00000001466ba60c000000007bcca8e2
+
+ /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */
+ .octa 0x00000001f6c488a40000000186118faa
+
+ /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */
+ .octa 0x000000013bfb06820000000111a65a88
+
+ /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */
+ .octa 0x00000000690e9e54000000003565e1c4
+
+ /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */
+ .octa 0x00000000281346b6000000012ed02a82
+
+ /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */
+ .octa 0x000000015646402400000000c486ecfc
+
+ /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */
+ .octa 0x000000016063a8dc0000000001b951b2
+
+ /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */
+ .octa 0x0000000116a663620000000048143916
+
+ /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */
+ .octa 0x000000017e8aa4d200000001dc2ae124
+
+ /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */
+ .octa 0x00000001728eb10c00000001416c58d6
+
+ /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */
+ .octa 0x00000001b08fd7fa00000000a479744a
+
+ /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */
+ .octa 0x00000001092a16e80000000096ca3a26
+
+ /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */
+ .octa 0x00000000a505637c00000000ff223d4e
+
+ /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */
+ .octa 0x00000000d94869b2000000010e84da42
+
+ /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */
+ .octa 0x00000001c8b203ae00000001b61ba3d0
+
+ /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */
+ .octa 0x000000005704aea000000000680f2de8
+
+ /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */
+ .octa 0x000000012e295fa2000000008772a9a8
+
+ /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */
+ .octa 0x000000011d0908bc0000000155f295bc
+
+ /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */
+ .octa 0x0000000193ed97ea00000000595f9282
+
+ /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */
+ .octa 0x000000013a0f1c520000000164b1c25a
+
+ /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */
+ .octa 0x000000010c2c40c000000000fbd67c50
+
+ /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */
+ .octa 0x00000000ff6fac3e0000000096076268
+
+ /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */
+ .octa 0x000000017b3609c000000001d288e4cc
+
+ /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */
+ .octa 0x0000000088c8c92200000001eaac1bdc
+
+ /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */
+ .octa 0x00000001751baae600000001f1ea39e2
+
+ /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */
+ .octa 0x000000010795297200000001eb6506fc
+
+ /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */
+ .octa 0x0000000162b00abe000000010f806ffe
+
+ /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */
+ .octa 0x000000000d7b404c000000010408481e
+
+ /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */
+ .octa 0x00000000763b13d40000000188260534
+
+ /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */
+ .octa 0x00000000f6dc22d80000000058fc73e0
+
+ /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */
+ .octa 0x000000007daae06000000000391c59b8
+
+ /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */
+ .octa 0x000000013359ab7c000000018b638400
+
+ /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */
+ .octa 0x000000008add438a000000011738f5c4
+
+ /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */
+ .octa 0x00000001edbefdea000000008cf7c6da
+
+ /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */
+ .octa 0x000000004104e0f800000001ef97fb16
+
+ /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */
+ .octa 0x00000000b48a82220000000102130e20
+
+ /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */
+ .octa 0x00000001bcb4684400000000db968898
+
+ /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */
+ .octa 0x000000013293ce0a00000000b5047b5e
+
+ /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */
+ .octa 0x00000001710d0844000000010b90fdb2
+
+ /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */
+ .octa 0x0000000117907f6e000000004834a32e
+
+ /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */
+ .octa 0x0000000087ddf93e0000000059c8f2b0
+
+ /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */
+ .octa 0x000000005970e9b00000000122cec508
+
+ /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */
+ .octa 0x0000000185b2b7d0000000000a330cda
+
+ /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */
+ .octa 0x00000001dcee0efc000000014a47148c
+
+ /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */
+ .octa 0x0000000030da27220000000042c61cb8
+
+ /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */
+ .octa 0x000000012f925a180000000012fe6960
+
+ /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */
+ .octa 0x00000000dd2e357c00000000dbda2c20
+
+ /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */
+ .octa 0x00000000071c80de000000011122410c
+
+ /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */
+ .octa 0x000000011513140a00000000977b2070
+
+ /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */
+ .octa 0x00000001df876e8e000000014050438e
+
+ /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */
+ .octa 0x000000015f81d6ce0000000147c840e8
+
+ /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */
+ .octa 0x000000019dd94dbe00000001cc7c88ce
+
+ /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */
+ .octa 0x00000001373d206e00000001476b35a4
+
+ /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */
+ .octa 0x00000000668ccade000000013d52d508
+
+ /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */
+ .octa 0x00000001b192d268000000008e4be32e
+
+ /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */
+ .octa 0x00000000e30f3a7800000000024120fe
+
+ /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */
+ .octa 0x000000010ef1f7bc00000000ddecddb4
+
+ /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */
+ .octa 0x00000001f5ac738000000000d4d403bc
+
+ /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */
+ .octa 0x000000011822ea7000000001734b89aa
+
+ /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */
+ .octa 0x00000000c3a33848000000010e7a58d6
+
+ /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */
+ .octa 0x00000001bd151c2400000001f9f04e9c
+
+ /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */
+ .octa 0x0000000056002d7600000000b692225e
+
+ /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */
+ .octa 0x000000014657c4f4000000019b8d3f3e
+
+ /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */
+ .octa 0x0000000113742d7c00000001a874f11e
+
+ /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */
+ .octa 0x000000019c5920ba000000010d5a4254
+
+ /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */
+ .octa 0x000000005216d2d600000000bbb2f5d6
+
+ /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */
+ .octa 0x0000000136f5ad8a0000000179cc0e36
+
+ /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */
+ .octa 0x000000018b07beb600000001dca1da4a
+
+ /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */
+ .octa 0x00000000db1e93b000000000feb1a192
+
+ /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */
+ .octa 0x000000000b96fa3a00000000d1eeedd6
+
+ /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */
+ .octa 0x00000001d9968af0000000008fad9bb4
+
+ /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */
+ .octa 0x000000000e4a77a200000001884938e4
+
+ /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */
+ .octa 0x00000000508c2ac800000001bc2e9bc0
+
+ /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */
+ .octa 0x0000000021572a8000000001f9658a68
+
+ /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */
+ .octa 0x00000001b859daf2000000001b9224fc
+
+ /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */
+ .octa 0x000000016f7884740000000055b2fb84
+
+ /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */
+ .octa 0x00000001b438810e000000018b090348
+
+ /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */
+ .octa 0x0000000095ddc6f2000000011ccbd5ea
+
+ /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */
+ .octa 0x00000001d977c20c0000000007ae47f8
+
+ /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */
+ .octa 0x00000000ebedb99a0000000172acbec0
+
+ /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */
+ .octa 0x00000001df9e9e9200000001c6e3ff20
+
+ /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */
+ .octa 0x00000001a4a3f95200000000e1b38744
+
+ /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */
+ .octa 0x00000000e2f5122000000000791585b2
+
+ /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */
+ .octa 0x000000004aa01f3e00000000ac53b894
+
+ /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */
+ .octa 0x00000000b3e90a5800000001ed5f2cf4
+
+ /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */
+ .octa 0x000000000c9ca2aa00000001df48b2e0
+
+ /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */
+ .octa 0x000000015168231600000000049c1c62
+
+ /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */
+ .octa 0x0000000036fce78c000000017c460c12
+
+ /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */
+ .octa 0x000000009037dc10000000015be4da7e
+
+ /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */
+ .octa 0x00000000d3298582000000010f38f668
+
+ /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */
+ .octa 0x00000001b42e8ad60000000039f40a00
+
+ /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */
+ .octa 0x00000000142a983800000000bd4c10c4
+
+ /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */
+ .octa 0x0000000109c7f1900000000042db1d98
+
+ /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */
+ .octa 0x0000000056ff931000000001c905bae6
+
+ /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */
+ .octa 0x00000001594513aa00000000069d40ea
+
+ /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */
+ .octa 0x00000001e3b5b1e8000000008e4fbad0
+
+ /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */
+ .octa 0x000000011dd5fc080000000047bedd46
+
+ /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */
+ .octa 0x00000001675f0cc20000000026396bf8
+
+ /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */
+ .octa 0x00000000d1c8dd4400000000379beb92
+
+ /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */
+ .octa 0x0000000115ebd3d8000000000abae54a
+
+ /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */
+ .octa 0x00000001ecbd0dac0000000007e6a128
+
+ /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */
+ .octa 0x00000000cdf67af2000000000ade29d2
+
+ /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */
+ .octa 0x000000004c01ff4c00000000f974c45c
+
+ /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */
+ .octa 0x00000000f2d8657e00000000e77ac60a
+
+ /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */
+ .octa 0x000000006bae74c40000000145895816
+
+ /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */
+ .octa 0x0000000152af8aa00000000038e362be
+
+ /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */
+ .octa 0x0000000004663802000000007f991a64
+
+ /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */
+ .octa 0x00000001ab2f5afc00000000fa366d3a
+
+ /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */
+ .octa 0x0000000074a4ebd400000001a2bb34f0
+
+ /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */
+ .octa 0x00000001d7ab3a4c0000000028a9981e
+
+ /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */
+ .octa 0x00000001a8da60c600000001dbc672be
+
+ /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */
+ .octa 0x000000013cf6382000000000b04d77f6
+
+ /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */
+ .octa 0x00000000bec12e1e0000000124400d96
+
+ /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */
+ .octa 0x00000001c6368010000000014ca4b414
+
+ /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */
+ .octa 0x00000001e6e78758000000012fe2c938
+
+ /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */
+ .octa 0x000000008d7f2b3c00000001faed01e6
+
+ /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */
+ .octa 0x000000016b4a156e000000007e80ecfe
+
+ /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */
+ .octa 0x00000001c63cfeb60000000098daee94
+
+ /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */
+ .octa 0x000000015f902670000000010a04edea
+
+ /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */
+ .octa 0x00000001cd5de11e00000001c00b4524
+
+ /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */
+ .octa 0x000000001acaec540000000170296550
+
+ /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */
+ .octa 0x000000002bd0ca780000000181afaa48
+
+ /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */
+ .octa 0x0000000032d63d5c0000000185a31ffa
+
+ /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */
+ .octa 0x000000001c6d4e4c000000002469f608
+
+ /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */
+ .octa 0x0000000106a60b92000000006980102a
+
+ /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */
+ .octa 0x00000000d3855e120000000111ea9ca8
+
+ /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */
+ .octa 0x00000000e312563600000001bd1d29ce
+
+ /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */
+ .octa 0x000000009e8f7ea400000001b34b9580
+
+ /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */
+ .octa 0x00000001c82e562c000000003076054e
+
+ /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */
+ .octa 0x00000000ca9f09ce000000012a608ea4
+
+ /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */
+ .octa 0x00000000c63764e600000000784d05fe
+
+ /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */
+ .octa 0x0000000168d2e49e000000016ef0d82a
+
+ /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */
+ .octa 0x00000000e986c1480000000075bda454
+
+ /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */
+ .octa 0x00000000cfb65894000000003dc0a1c4
+
+ /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */
+ .octa 0x0000000111cadee400000000e9a5d8be
+
+ /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */
+ .octa 0x0000000171fb63ce00000001609bc4b4
+
+.short_constants:
+
+ /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */
+ /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod p(x)` */
+ .octa 0x7fec2963e5bf80485cf015c388e56f72
+
+ /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod p(x)` */
+ .octa 0x38e888d4844752a9963a18920246e2e6
+
+ /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod p(x)` */
+ .octa 0x42316c00730206ad419a441956993a31
+
+ /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod p(x)` */
+ .octa 0x543d5c543e65ddf9924752ba2b830011
+
+ /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod p(x)` */
+ .octa 0x78e87aaf56767c9255bd7f9518e4a304
+
+ /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod p(x)` */
+ .octa 0x8f68fcec1903da7f6d76739fe0553f1e
+
+ /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod p(x)` */
+ .octa 0x3f4840246791d588c133722b1fe0b5c3
+
+ /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod p(x)` */
+ .octa 0x34c96751b04de25a64b67ee0e55ef1f3
+
+ /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)` */
+ .octa 0x156c8e180b4a395b069db049b8fdb1e7
+
+ /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */
+ .octa 0xe0b99ccbe661f7bea11bfaf3c9e90b9e
+
+ /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */
+ .octa 0x041d37768cd75659817cdc5119b29a35
+
+ /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */
+ .octa 0x3a0777818cfaa9651ce9d94b36c41f1c
+
+ /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */
+ .octa 0x0e148e8252377a554f256efcb82be955
+
+ /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */
+ .octa 0x9c25531d19e65ddeec1631edb2dea967
+
+ /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */
+ .octa 0x790606ff9957c0a65d27e147510ac59a
+
+ /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */
+ .octa 0x82f63b786ea2d55ca66805eb18b8ea18
+
+
+.barrett_constants:
+ /* 33 bit reflected Barrett constant m - (4^32)/n */
+ .octa 0x000000000000000000000000dea713f1 /* x^64 div p(x)` */
+ /* 33 bit reflected Barrett constant n */
+ .octa 0x00000000000000000000000105ec76f1
+
+ .text
+
+#if defined(__BIG_ENDIAN__)
+#define BYTESWAP_DATA
+#else
+#undef BYTESWAP_DATA
+#endif
+
+#define off16 r25
+#define off32 r26
+#define off48 r27
+#define off64 r28
+#define off80 r29
+#define off96 r30
+#define off112 r31
+
+#define const1 v24
+#define const2 v25
+
+#define byteswap v26
+#define mask_32bit v27
+#define mask_64bit v28
+#define zeroes v29
+
+#ifdef BYTESWAP_DATA
+#define VPERM(A, B, C, D) vperm A, B, C, D
+#else
+#define VPERM(A, B, C, D)
+#endif
+
+/* unsigned int __crc32c_vpmsum(unsigned int crc, void *p, unsigned long len) */
+FUNC_START(__crc32c_vpmsum)
+ std r31,-8(r1)
+ std r30,-16(r1)
+ std r29,-24(r1)
+ std r28,-32(r1)
+ std r27,-40(r1)
+ std r26,-48(r1)
+ std r25,-56(r1)
+
+ li off16,16
+ li off32,32
+ li off48,48
+ li off64,64
+ li off80,80
+ li off96,96
+ li off112,112
+ li r0,0
+
+ /* Enough room for saving 10 non volatile VMX registers */
+ subi r6,r1,56+10*16
+ subi r7,r1,56+2*16
+
+ stvx v20,0,r6
+ stvx v21,off16,r6
+ stvx v22,off32,r6
+ stvx v23,off48,r6
+ stvx v24,off64,r6
+ stvx v25,off80,r6
+ stvx v26,off96,r6
+ stvx v27,off112,r6
+ stvx v28,0,r7
+ stvx v29,off16,r7
+
+ mr r10,r3
+
+ vxor zeroes,zeroes,zeroes
+ vspltisw v0,-1
+
+ vsldoi mask_32bit,zeroes,v0,4
+ vsldoi mask_64bit,zeroes,v0,8
+
+ /* Get the initial value into v8 */
+ vxor v8,v8,v8
+ MTVRD(v8, R3)
+ vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */
+
+#ifdef BYTESWAP_DATA
+ addis r3,r2,.byteswap_constant@toc@ha
+ addi r3,r3,.byteswap_constant@toc@l
+
+ lvx byteswap,0,r3
+ addi r3,r3,16
+#endif
+
+ cmpdi r5,256
+ blt .Lshort
+
+ rldicr r6,r5,0,56
+
+ /* Checksum in blocks of MAX_SIZE */
+1: lis r7,MAX_SIZE@h
+ ori r7,r7,MAX_SIZE@l
+ mr r9,r7
+ cmpd r6,r7
+ bgt 2f
+ mr r7,r6
+2: subf r6,r7,r6
+
+ /* our main loop does 128 bytes at a time */
+ srdi r7,r7,7
+
+ /*
+ * Work out the offset into the constants table to start at. Each
+ * constant is 16 bytes, and it is used against 128 bytes of input
+ * data - 128 / 16 = 8
+ */
+ sldi r8,r7,4
+ srdi r9,r9,3
+ subf r8,r8,r9
+
+ /* We reduce our final 128 bytes in a separate step */
+ addi r7,r7,-1
+ mtctr r7
+
+ addis r3,r2,.constants@toc@ha
+ addi r3,r3,.constants@toc@l
+
+ /* Find the start of our constants */
+ add r3,r3,r8
+
+ /* zero v0-v7 which will contain our checksums */
+ vxor v0,v0,v0
+ vxor v1,v1,v1
+ vxor v2,v2,v2
+ vxor v3,v3,v3
+ vxor v4,v4,v4
+ vxor v5,v5,v5
+ vxor v6,v6,v6
+ vxor v7,v7,v7
+
+ lvx const1,0,r3
+
+ /*
+ * If we are looping back to consume more data we use the values
+ * already in v16-v23.
+ */
+ cmpdi r0,1
+ beq 2f
+
+ /* First warm up pass */
+ lvx v16,0,r4
+ lvx v17,off16,r4
+ VPERM(v16,v16,v16,byteswap)
+ VPERM(v17,v17,v17,byteswap)
+ lvx v18,off32,r4
+ lvx v19,off48,r4
+ VPERM(v18,v18,v18,byteswap)
+ VPERM(v19,v19,v19,byteswap)
+ lvx v20,off64,r4
+ lvx v21,off80,r4
+ VPERM(v20,v20,v20,byteswap)
+ VPERM(v21,v21,v21,byteswap)
+ lvx v22,off96,r4
+ lvx v23,off112,r4
+ VPERM(v22,v22,v22,byteswap)
+ VPERM(v23,v23,v23,byteswap)
+ addi r4,r4,8*16
+
+ /* xor in initial value */
+ vxor v16,v16,v8
+
+2: bdz .Lfirst_warm_up_done
+
+ addi r3,r3,16
+ lvx const2,0,r3
+
+ /* Second warm up pass */
+ VPMSUMD(v8,v16,const1)
+ lvx v16,0,r4
+ VPERM(v16,v16,v16,byteswap)
+ ori r2,r2,0
+
+ VPMSUMD(v9,v17,const1)
+ lvx v17,off16,r4
+ VPERM(v17,v17,v17,byteswap)
+ ori r2,r2,0
+
+ VPMSUMD(v10,v18,const1)
+ lvx v18,off32,r4
+ VPERM(v18,v18,v18,byteswap)
+ ori r2,r2,0
+
+ VPMSUMD(v11,v19,const1)
+ lvx v19,off48,r4
+ VPERM(v19,v19,v19,byteswap)
+ ori r2,r2,0
+
+ VPMSUMD(v12,v20,const1)
+ lvx v20,off64,r4
+ VPERM(v20,v20,v20,byteswap)
+ ori r2,r2,0
+
+ VPMSUMD(v13,v21,const1)
+ lvx v21,off80,r4
+ VPERM(v21,v21,v21,byteswap)
+ ori r2,r2,0
+
+ VPMSUMD(v14,v22,const1)
+ lvx v22,off96,r4
+ VPERM(v22,v22,v22,byteswap)
+ ori r2,r2,0
+
+ VPMSUMD(v15,v23,const1)
+ lvx v23,off112,r4
+ VPERM(v23,v23,v23,byteswap)
+
+ addi r4,r4,8*16
+
+ bdz .Lfirst_cool_down
+
+ /*
+ * main loop. We modulo schedule it such that it takes three iterations
+ * to complete - first iteration load, second iteration vpmsum, third
+ * iteration xor.
+ */
+ .balign 16
+4: lvx const1,0,r3
+ addi r3,r3,16
+ ori r2,r2,0
+
+ vxor v0,v0,v8
+ VPMSUMD(v8,v16,const2)
+ lvx v16,0,r4
+ VPERM(v16,v16,v16,byteswap)
+ ori r2,r2,0
+
+ vxor v1,v1,v9
+ VPMSUMD(v9,v17,const2)
+ lvx v17,off16,r4
+ VPERM(v17,v17,v17,byteswap)
+ ori r2,r2,0
+
+ vxor v2,v2,v10
+ VPMSUMD(v10,v18,const2)
+ lvx v18,off32,r4
+ VPERM(v18,v18,v18,byteswap)
+ ori r2,r2,0
+
+ vxor v3,v3,v11
+ VPMSUMD(v11,v19,const2)
+ lvx v19,off48,r4
+ VPERM(v19,v19,v19,byteswap)
+ lvx const2,0,r3
+ ori r2,r2,0
+
+ vxor v4,v4,v12
+ VPMSUMD(v12,v20,const1)
+ lvx v20,off64,r4
+ VPERM(v20,v20,v20,byteswap)
+ ori r2,r2,0
+
+ vxor v5,v5,v13
+ VPMSUMD(v13,v21,const1)
+ lvx v21,off80,r4
+ VPERM(v21,v21,v21,byteswap)
+ ori r2,r2,0
+
+ vxor v6,v6,v14
+ VPMSUMD(v14,v22,const1)
+ lvx v22,off96,r4
+ VPERM(v22,v22,v22,byteswap)
+ ori r2,r2,0
+
+ vxor v7,v7,v15
+ VPMSUMD(v15,v23,const1)
+ lvx v23,off112,r4
+ VPERM(v23,v23,v23,byteswap)
+
+ addi r4,r4,8*16
+
+ bdnz 4b
+
+.Lfirst_cool_down:
+ /* First cool down pass */
+ lvx const1,0,r3
+ addi r3,r3,16
+
+ vxor v0,v0,v8
+ VPMSUMD(v8,v16,const1)
+ ori r2,r2,0
+
+ vxor v1,v1,v9
+ VPMSUMD(v9,v17,const1)
+ ori r2,r2,0
+
+ vxor v2,v2,v10
+ VPMSUMD(v10,v18,const1)
+ ori r2,r2,0
+
+ vxor v3,v3,v11
+ VPMSUMD(v11,v19,const1)
+ ori r2,r2,0
+
+ vxor v4,v4,v12
+ VPMSUMD(v12,v20,const1)
+ ori r2,r2,0
+
+ vxor v5,v5,v13
+ VPMSUMD(v13,v21,const1)
+ ori r2,r2,0
+
+ vxor v6,v6,v14
+ VPMSUMD(v14,v22,const1)
+ ori r2,r2,0
+
+ vxor v7,v7,v15
+ VPMSUMD(v15,v23,const1)
+ ori r2,r2,0
+
+.Lsecond_cool_down:
+ /* Second cool down pass */
+ vxor v0,v0,v8
+ vxor v1,v1,v9
+ vxor v2,v2,v10
+ vxor v3,v3,v11
+ vxor v4,v4,v12
+ vxor v5,v5,v13
+ vxor v6,v6,v14
+ vxor v7,v7,v15
+
+ /*
+ * vpmsumd produces a 96 bit result in the least significant bits
+ * of the register. Since we are bit reflected we have to shift it
+ * left 32 bits so it occupies the least significant bits in the
+ * bit reflected domain.
+ */
+ vsldoi v0,v0,zeroes,4
+ vsldoi v1,v1,zeroes,4
+ vsldoi v2,v2,zeroes,4
+ vsldoi v3,v3,zeroes,4
+ vsldoi v4,v4,zeroes,4
+ vsldoi v5,v5,zeroes,4
+ vsldoi v6,v6,zeroes,4
+ vsldoi v7,v7,zeroes,4
+
+ /* xor with last 1024 bits */
+ lvx v8,0,r4
+ lvx v9,off16,r4
+ VPERM(v8,v8,v8,byteswap)
+ VPERM(v9,v9,v9,byteswap)
+ lvx v10,off32,r4
+ lvx v11,off48,r4
+ VPERM(v10,v10,v10,byteswap)
+ VPERM(v11,v11,v11,byteswap)
+ lvx v12,off64,r4
+ lvx v13,off80,r4
+ VPERM(v12,v12,v12,byteswap)
+ VPERM(v13,v13,v13,byteswap)
+ lvx v14,off96,r4
+ lvx v15,off112,r4
+ VPERM(v14,v14,v14,byteswap)
+ VPERM(v15,v15,v15,byteswap)
+
+ addi r4,r4,8*16
+
+ vxor v16,v0,v8
+ vxor v17,v1,v9
+ vxor v18,v2,v10
+ vxor v19,v3,v11
+ vxor v20,v4,v12
+ vxor v21,v5,v13
+ vxor v22,v6,v14
+ vxor v23,v7,v15
+
+ li r0,1
+ cmpdi r6,0
+ addi r6,r6,128
+ bne 1b
+
+ /* Work out how many bytes we have left */
+ andi. r5,r5,127
+
+ /* Calculate where in the constant table we need to start */
+ subfic r6,r5,128
+ add r3,r3,r6
+
+ /* How many 16 byte chunks are in the tail */
+ srdi r7,r5,4
+ mtctr r7
+
+ /*
+ * Reduce the previously calculated 1024 bits to 64 bits, shifting
+ * 32 bits to include the trailing 32 bits of zeros
+ */
+ lvx v0,0,r3
+ lvx v1,off16,r3
+ lvx v2,off32,r3
+ lvx v3,off48,r3
+ lvx v4,off64,r3
+ lvx v5,off80,r3
+ lvx v6,off96,r3
+ lvx v7,off112,r3
+ addi r3,r3,8*16
+
+ VPMSUMW(v0,v16,v0)
+ VPMSUMW(v1,v17,v1)
+ VPMSUMW(v2,v18,v2)
+ VPMSUMW(v3,v19,v3)
+ VPMSUMW(v4,v20,v4)
+ VPMSUMW(v5,v21,v5)
+ VPMSUMW(v6,v22,v6)
+ VPMSUMW(v7,v23,v7)
+
+ /* Now reduce the tail (0 - 112 bytes) */
+ cmpdi r7,0
+ beq 1f
+
+ lvx v16,0,r4
+ lvx v17,0,r3
+ VPERM(v16,v16,v16,byteswap)
+ VPMSUMW(v16,v16,v17)
+ vxor v0,v0,v16
+ bdz 1f
+
+ lvx v16,off16,r4
+ lvx v17,off16,r3
+ VPERM(v16,v16,v16,byteswap)
+ VPMSUMW(v16,v16,v17)
+ vxor v0,v0,v16
+ bdz 1f
+
+ lvx v16,off32,r4
+ lvx v17,off32,r3
+ VPERM(v16,v16,v16,byteswap)
+ VPMSUMW(v16,v16,v17)
+ vxor v0,v0,v16
+ bdz 1f
+
+ lvx v16,off48,r4
+ lvx v17,off48,r3
+ VPERM(v16,v16,v16,byteswap)
+ VPMSUMW(v16,v16,v17)
+ vxor v0,v0,v16
+ bdz 1f
+
+ lvx v16,off64,r4
+ lvx v17,off64,r3
+ VPERM(v16,v16,v16,byteswap)
+ VPMSUMW(v16,v16,v17)
+ vxor v0,v0,v16
+ bdz 1f
+
+ lvx v16,off80,r4
+ lvx v17,off80,r3
+ VPERM(v16,v16,v16,byteswap)
+ VPMSUMW(v16,v16,v17)
+ vxor v0,v0,v16
+ bdz 1f
+
+ lvx v16,off96,r4
+ lvx v17,off96,r3
+ VPERM(v16,v16,v16,byteswap)
+ VPMSUMW(v16,v16,v17)
+ vxor v0,v0,v16
+
+ /* Now xor all the parallel chunks together */
+1: vxor v0,v0,v1
+ vxor v2,v2,v3
+ vxor v4,v4,v5
+ vxor v6,v6,v7
+
+ vxor v0,v0,v2
+ vxor v4,v4,v6
+
+ vxor v0,v0,v4
+
+.Lbarrett_reduction:
+ /* Barrett constants */
+ addis r3,r2,.barrett_constants@toc@ha
+ addi r3,r3,.barrett_constants@toc@l
+
+ lvx const1,0,r3
+ lvx const2,off16,r3
+
+ vsldoi v1,v0,v0,8
+ vxor v0,v0,v1 /* xor two 64 bit results together */
+
+ /* shift left one bit */
+ vspltisb v1,1
+ vsl v0,v0,v1
+
+ vand v0,v0,mask_64bit
+
+ /*
+ * The reflected version of Barrett reduction. Instead of bit
+ * reflecting our data (which is expensive to do), we bit reflect our
+ * constants and our algorithm, which means the intermediate data in
+ * our vector registers goes from 0-63 instead of 63-0. We can reflect
+ * the algorithm because we don't carry in mod 2 arithmetic.
+ */
+ vand v1,v0,mask_32bit /* bottom 32 bits of a */
+ VPMSUMD(v1,v1,const1) /* ma */
+ vand v1,v1,mask_32bit /* bottom 32bits of ma */
+ VPMSUMD(v1,v1,const2) /* qn */
+ vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
+
+ /*
+ * Since we are bit reflected, the result (ie the low 32 bits) is in
+ * the high 32 bits. We just need to shift it left 4 bytes
+ * V0 [ 0 1 X 3 ]
+ * V0 [ 0 X 2 3 ]
+ */
+ vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */
+
+ /* Get it into r3 */
+ MFVRD(R3, v0)
+
+.Lout:
+ subi r6,r1,56+10*16
+ subi r7,r1,56+2*16
+
+ lvx v20,0,r6
+ lvx v21,off16,r6
+ lvx v22,off32,r6
+ lvx v23,off48,r6
+ lvx v24,off64,r6
+ lvx v25,off80,r6
+ lvx v26,off96,r6
+ lvx v27,off112,r6
+ lvx v28,0,r7
+ lvx v29,off16,r7
+
+ ld r31,-8(r1)
+ ld r30,-16(r1)
+ ld r29,-24(r1)
+ ld r28,-32(r1)
+ ld r27,-40(r1)
+ ld r26,-48(r1)
+ ld r25,-56(r1)
+
+ blr
+
+.Lfirst_warm_up_done:
+ lvx const1,0,r3
+ addi r3,r3,16
+
+ VPMSUMD(v8,v16,const1)
+ VPMSUMD(v9,v17,const1)
+ VPMSUMD(v10,v18,const1)
+ VPMSUMD(v11,v19,const1)
+ VPMSUMD(v12,v20,const1)
+ VPMSUMD(v13,v21,const1)
+ VPMSUMD(v14,v22,const1)
+ VPMSUMD(v15,v23,const1)
+
+ b .Lsecond_cool_down
+
+.Lshort:
+ cmpdi r5,0
+ beq .Lzero
+
+ addis r3,r2,.short_constants@toc@ha
+ addi r3,r3,.short_constants@toc@l
+
+ /* Calculate where in the constant table we need to start */
+ subfic r6,r5,256
+ add r3,r3,r6
+
+ /* How many 16 byte chunks? */
+ srdi r7,r5,4
+ mtctr r7
+
+ vxor v19,v19,v19
+ vxor v20,v20,v20
+
+ lvx v0,0,r4
+ lvx v16,0,r3
+ VPERM(v0,v0,v16,byteswap)
+ vxor v0,v0,v8 /* xor in initial value */
+ VPMSUMW(v0,v0,v16)
+ bdz .Lv0
+
+ lvx v1,off16,r4
+ lvx v17,off16,r3
+ VPERM(v1,v1,v17,byteswap)
+ VPMSUMW(v1,v1,v17)
+ bdz .Lv1
+
+ lvx v2,off32,r4
+ lvx v16,off32,r3
+ VPERM(v2,v2,v16,byteswap)
+ VPMSUMW(v2,v2,v16)
+ bdz .Lv2
+
+ lvx v3,off48,r4
+ lvx v17,off48,r3
+ VPERM(v3,v3,v17,byteswap)
+ VPMSUMW(v3,v3,v17)
+ bdz .Lv3
+
+ lvx v4,off64,r4
+ lvx v16,off64,r3
+ VPERM(v4,v4,v16,byteswap)
+ VPMSUMW(v4,v4,v16)
+ bdz .Lv4
+
+ lvx v5,off80,r4
+ lvx v17,off80,r3
+ VPERM(v5,v5,v17,byteswap)
+ VPMSUMW(v5,v5,v17)
+ bdz .Lv5
+
+ lvx v6,off96,r4
+ lvx v16,off96,r3
+ VPERM(v6,v6,v16,byteswap)
+ VPMSUMW(v6,v6,v16)
+ bdz .Lv6
+
+ lvx v7,off112,r4
+ lvx v17,off112,r3
+ VPERM(v7,v7,v17,byteswap)
+ VPMSUMW(v7,v7,v17)
+ bdz .Lv7
+
+ addi r3,r3,128
+ addi r4,r4,128
+
+ lvx v8,0,r4
+ lvx v16,0,r3
+ VPERM(v8,v8,v16,byteswap)
+ VPMSUMW(v8,v8,v16)
+ bdz .Lv8
+
+ lvx v9,off16,r4
+ lvx v17,off16,r3
+ VPERM(v9,v9,v17,byteswap)
+ VPMSUMW(v9,v9,v17)
+ bdz .Lv9
+
+ lvx v10,off32,r4
+ lvx v16,off32,r3
+ VPERM(v10,v10,v16,byteswap)
+ VPMSUMW(v10,v10,v16)
+ bdz .Lv10
+
+ lvx v11,off48,r4
+ lvx v17,off48,r3
+ VPERM(v11,v11,v17,byteswap)
+ VPMSUMW(v11,v11,v17)
+ bdz .Lv11
+
+ lvx v12,off64,r4
+ lvx v16,off64,r3
+ VPERM(v12,v12,v16,byteswap)
+ VPMSUMW(v12,v12,v16)
+ bdz .Lv12
+
+ lvx v13,off80,r4
+ lvx v17,off80,r3
+ VPERM(v13,v13,v17,byteswap)
+ VPMSUMW(v13,v13,v17)
+ bdz .Lv13
+
+ lvx v14,off96,r4
+ lvx v16,off96,r3
+ VPERM(v14,v14,v16,byteswap)
+ VPMSUMW(v14,v14,v16)
+ bdz .Lv14
+
+ lvx v15,off112,r4
+ lvx v17,off112,r3
+ VPERM(v15,v15,v17,byteswap)
+ VPMSUMW(v15,v15,v17)
+
+.Lv15: vxor v19,v19,v15
+.Lv14: vxor v20,v20,v14
+.Lv13: vxor v19,v19,v13
+.Lv12: vxor v20,v20,v12
+.Lv11: vxor v19,v19,v11
+.Lv10: vxor v20,v20,v10
+.Lv9: vxor v19,v19,v9
+.Lv8: vxor v20,v20,v8
+.Lv7: vxor v19,v19,v7
+.Lv6: vxor v20,v20,v6
+.Lv5: vxor v19,v19,v5
+.Lv4: vxor v20,v20,v4
+.Lv3: vxor v19,v19,v3
+.Lv2: vxor v20,v20,v2
+.Lv1: vxor v19,v19,v1
+.Lv0: vxor v20,v20,v0
+
+ vxor v0,v19,v20
+
+ b .Lbarrett_reduction
+
+.Lzero:
+ mr r3,r10
+ b .Lout
+
+FUNC_END(__crc32_vpmsum)
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
new file mode 100644
index 000000000000..bfe3d37a24ef
--- /dev/null
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -0,0 +1,167 @@
+#include <linux/crc32.h>
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <asm/switch_to.h>
+
+#define CHKSUM_BLOCK_SIZE 1
+#define CHKSUM_DIGEST_SIZE 4
+
+#define VMX_ALIGN 16
+#define VMX_ALIGN_MASK (VMX_ALIGN-1)
+
+#define VECTOR_BREAKPOINT 512
+
+u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len);
+
+static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
+{
+ unsigned int prealign;
+ unsigned int tail;
+
+ if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
+ return __crc32c_le(crc, p, len);
+
+ if ((unsigned long)p & VMX_ALIGN_MASK) {
+ prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
+ crc = __crc32c_le(crc, p, prealign);
+ len -= prealign;
+ p += prealign;
+ }
+
+ if (len & ~VMX_ALIGN_MASK) {
+ pagefault_disable();
+ enable_kernel_altivec();
+ crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
+ pagefault_enable();
+ }
+
+ tail = len & VMX_ALIGN_MASK;
+ if (tail) {
+ p += len & ~VMX_ALIGN_MASK;
+ crc = __crc32c_le(crc, p, tail);
+ }
+
+ return crc;
+}
+
+static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = 0;
+
+ return 0;
+}
+
+/*
+ * Setting the seed allows arbitrary accumulators and flexible XOR policy
+ * If your algorithm starts with ~0, then XOR with ~0 before you set
+ * the seed.
+ */
+static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *mctx = crypto_shash_ctx(hash);
+
+ if (keylen != sizeof(u32)) {
+ crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ *mctx = le32_to_cpup((__le32 *)key);
+ return 0;
+}
+
+static int crc32c_vpmsum_init(struct shash_desc *desc)
+{
+ u32 *mctx = crypto_shash_ctx(desc->tfm);
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *crcp = *mctx;
+
+ return 0;
+}
+
+static int crc32c_vpmsum_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *crcp = crc32c_vpmsum(*crcp, data, len);
+
+ return 0;
+}
+
+static int __crc32c_vpmsum_finup(u32 *crcp, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ *(__le32 *)out = ~cpu_to_le32(crc32c_vpmsum(*crcp, data, len));
+
+ return 0;
+}
+
+static int crc32c_vpmsum_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __crc32c_vpmsum_finup(shash_desc_ctx(desc), data, len, out);
+}
+
+static int crc32c_vpmsum_final(struct shash_desc *desc, u8 *out)
+{
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *(__le32 *)out = ~cpu_to_le32p(crcp);
+
+ return 0;
+}
+
+static int crc32c_vpmsum_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __crc32c_vpmsum_finup(crypto_shash_ctx(desc->tfm), data, len,
+ out);
+}
+
+static struct shash_alg alg = {
+ .setkey = crc32c_vpmsum_setkey,
+ .init = crc32c_vpmsum_init,
+ .update = crc32c_vpmsum_update,
+ .final = crc32c_vpmsum_final,
+ .finup = crc32c_vpmsum_finup,
+ .digest = crc32c_vpmsum_digest,
+ .descsize = sizeof(u32),
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc32c",
+ .cra_driver_name = "crc32c-vpmsum",
+ .cra_priority = 200,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(u32),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32c_vpmsum_cra_init,
+ }
+};
+
+static int __init crc32c_vpmsum_mod_init(void)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ return crypto_register_shash(&alg);
+}
+
+static void __exit crc32c_vpmsum_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(crc32c_vpmsum_mod_init);
+module_exit(crc32c_vpmsum_mod_fini);
+
+MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
+MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c-vpmsum");
diff --git a/arch/powerpc/include/asm/accounting.h b/arch/powerpc/include/asm/accounting.h
new file mode 100644
index 000000000000..c133246df467
--- /dev/null
+++ b/arch/powerpc/include/asm/accounting.h
@@ -0,0 +1,24 @@
+/*
+ * Common time accounting prototypes and such for all ppc machines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __POWERPC_ACCOUNTING_H
+#define __POWERPC_ACCOUNTING_H
+
+/* Stuff for accurate time accounting */
+struct cpu_accounting_data {
+ unsigned long user_time; /* accumulated usermode TB ticks */
+ unsigned long system_time; /* accumulated system TB ticks */
+ unsigned long user_time_scaled; /* accumulated usermode SPURR ticks */
+ unsigned long starttime; /* TB value snapshot */
+ unsigned long starttime_user; /* TB value on exit to usermode */
+ unsigned long startspurr; /* SPURR value snapshot */
+ unsigned long utime_sspurr; /* ->user_time when ->startspurr set */
+};
+
+#endif
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index dc85dcb891cf..cee3aa087653 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -36,11 +36,13 @@
#define PPC_MIN_STKFRM 112
#ifdef __BIG_ENDIAN__
+#define LHZX_BE stringify_in_c(lhzx)
#define LWZX_BE stringify_in_c(lwzx)
#define LDX_BE stringify_in_c(ldx)
#define STWX_BE stringify_in_c(stwx)
#define STDX_BE stringify_in_c(stdx)
#else
+#define LHZX_BE stringify_in_c(lhbrx)
#define LWZX_BE stringify_in_c(lwbrx)
#define LDX_BE stringify_in_c(ldbrx)
#define STWX_BE stringify_in_c(stwbrx)
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..e71b9097594c
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
+#define _ASM_POWERPC_ASM_PROTOTYPES_H
+/*
+ * This file is for prototypes of C functions that are only called
+ * from asm, and any associated variables.
+ *
+ * Copyright 2016, Daniel Axtens, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/threads.h>
+#include <linux/kprobes.h>
+
+/* SMP */
+extern struct thread_info *current_set[NR_CPUS];
+extern struct thread_info *secondary_ti;
+void start_secondary(void *unused);
+
+/* kexec */
+struct paca_struct;
+struct kimage;
+extern struct paca_struct kexec_paca;
+void kexec_copy_flush(struct kimage *image);
+
+/* pseries hcall tracing */
+extern struct static_key hcall_tracepoint_key;
+void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
+void __trace_hcall_exit(long opcode, unsigned long retval,
+ unsigned long *retbuf);
+/* OPAL tracing */
+#ifdef HAVE_JUMP_LABEL
+extern struct static_key opal_tracepoint_key;
+#endif
+
+void __trace_opal_entry(unsigned long opcode, unsigned long *args);
+void __trace_opal_exit(long opcode, unsigned long retval);
+
+/* VMX copying */
+int enter_vmx_usercopy(void);
+int exit_vmx_usercopy(void);
+int enter_vmx_copy(void);
+void * exit_vmx_copy(void *dest);
+
+/* Traps */
+long machine_check_early(struct pt_regs *regs);
+long hmi_exception_realmode(struct pt_regs *regs);
+void SMIException(struct pt_regs *regs);
+void handle_hmi_exception(struct pt_regs *regs);
+void instruction_breakpoint_exception(struct pt_regs *regs);
+void RunModeException(struct pt_regs *regs);
+void __kprobes single_step_exception(struct pt_regs *regs);
+void __kprobes program_check_exception(struct pt_regs *regs);
+void alignment_exception(struct pt_regs *regs);
+void StackOverflow(struct pt_regs *regs);
+void nonrecoverable_exception(struct pt_regs *regs);
+void kernel_fp_unavailable_exception(struct pt_regs *regs);
+void altivec_unavailable_exception(struct pt_regs *regs);
+void vsx_unavailable_exception(struct pt_regs *regs);
+void fp_unavailable_tm(struct pt_regs *regs);
+void altivec_unavailable_tm(struct pt_regs *regs);
+void vsx_unavailable_tm(struct pt_regs *regs);
+void facility_unavailable_exception(struct pt_regs *regs);
+void TAUException(struct pt_regs *regs);
+void altivec_assist_exception(struct pt_regs *regs);
+void unrecoverable_exception(struct pt_regs *regs);
+void kernel_bad_stack(struct pt_regs *regs);
+void system_reset_exception(struct pt_regs *regs);
+void machine_check_exception(struct pt_regs *regs);
+void __kprobes emulation_assist_interrupt(struct pt_regs *regs);
+
+#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index ae0751ef8788..f08d567e0ca4 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -78,21 +78,53 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
return t; \
}
+#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
+static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+{ \
+ int res, t; \
+ \
+ __asm__ __volatile__( \
+"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
+ #asm_op " %1,%3,%0\n" \
+ PPC405_ERR77(0, %4) \
+" stwcx. %1,0,%4\n" \
+" bne- 1b\n" \
+ : "=&r" (res), "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ \
+ return res; \
+}
+
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
- ATOMIC_OP_RETURN_RELAXED(op, asm_op)
+ ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
+ ATOMIC_FETCH_OP_RELAXED(op, asm_op)
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
-ATOMIC_OP(and, and)
-ATOMIC_OP(or, or)
-ATOMIC_OP(xor, xor)
-
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
@@ -329,20 +361,53 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
return t; \
}
+#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
+static inline long \
+atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
+{ \
+ long res, t; \
+ \
+ __asm__ __volatile__( \
+"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
+ #asm_op " %1,%3,%0\n" \
+" stdcx. %1,0,%4\n" \
+" bne- 1b\n" \
+ : "=&r" (res), "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ \
+ return res; \
+}
+
#define ATOMIC64_OPS(op, asm_op) \
ATOMIC64_OP(op, asm_op) \
- ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
+ ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
+ ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(or, or)
-ATOMIC64_OP(xor, xor)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC64_OPS(and, and)
+ATOMIC64_OPS(or, or)
+ATOMIC64_OPS(xor, xor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+
#undef ATOPIC64_OPS
+#undef ATOMIC64_FETCH_OP_RELAXED
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index a2350194fc76..8e21bb492dca 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- tlb_flush_pgtable(tlb, address);
pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page_address(table), 0);
}
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 290157e8d5b2..5eaf86ac143d 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -88,6 +88,7 @@
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
+#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
#define HPTE_R_G ASM_CONST(0x0000000000000008)
#define HPTE_R_M ASM_CONST(0x0000000000000010)
@@ -123,6 +124,45 @@
#ifndef __ASSEMBLY__
+struct mmu_hash_ops {
+ void (*hpte_invalidate)(unsigned long slot,
+ unsigned long vpn,
+ int bpsize, int apsize,
+ int ssize, int local);
+ long (*hpte_updatepp)(unsigned long slot,
+ unsigned long newpp,
+ unsigned long vpn,
+ int bpsize, int apsize,
+ int ssize, unsigned long flags);
+ void (*hpte_updateboltedpp)(unsigned long newpp,
+ unsigned long ea,
+ int psize, int ssize);
+ long (*hpte_insert)(unsigned long hpte_group,
+ unsigned long vpn,
+ unsigned long prpn,
+ unsigned long rflags,
+ unsigned long vflags,
+ int psize, int apsize,
+ int ssize);
+ long (*hpte_remove)(unsigned long hpte_group);
+ int (*hpte_removebolted)(unsigned long ea,
+ int psize, int ssize);
+ void (*flush_hash_range)(unsigned long number, int local);
+ void (*hugepage_invalidate)(unsigned long vsid,
+ unsigned long addr,
+ unsigned char *hpte_slot_array,
+ int psize, int ssize, int local);
+ /*
+ * Special for kexec.
+ * To be called in real mode with interrupts disabled. No locks are
+ * taken as such, concurrent access on pre POWER5 hardware could result
+ * in a deadlock.
+ * The linear mapping is destroyed as well.
+ */
+ void (*hpte_clear_all)(void);
+};
+extern struct mmu_hash_ops mmu_hash_ops;
+
struct hash_pte {
__be64 v;
__be64 r;
@@ -351,10 +391,13 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
+#ifdef CONFIG_PPC_PSERIES
+void hpte_init_pseries(void);
+#else
+static inline void hpte_init_pseries(void) { }
+#endif
+
extern void hpte_init_native(void);
-extern void hpte_init_lpar(void);
-extern void hpte_init_beat(void);
-extern void hpte_init_beat_v3(void);
extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);
@@ -434,7 +477,7 @@ extern void slb_set_size(u16 size);
* function. Used in slb_allocate() and do_stab_bolted. The function
* computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
*
- * rt = register continaing the proto-VSID and into which the
+ * rt = register containing the proto-VSID and into which the
* VSID will be stored
* rx = scratch register (clobbered)
*
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 5854263d4d6e..d4eda6420523 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -23,7 +23,12 @@ struct mmu_psize_def {
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+#ifdef CONFIG_PPC_RADIX_MMU
#define radix_enabled() mmu_has_feature(MMU_FTR_RADIX)
+#else
+#define radix_enabled() (0)
+#endif
+
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 488279edb1f0..cd5e7aa8cc34 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
pgtable_cache[(shift) - 1]; \
})
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern void pte_fragment_free(unsigned long *, int);
@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
return (pgd_t *)__get_free_page(PGALLOC_GFP);
#else
struct page *page;
- page = alloc_pages(PGALLOC_GFP, 4);
+ page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
if (!page)
return NULL;
return (pgd_t *) page_address(page);
@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
}
@@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- tlb_flush_pgtable(tlb, address);
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, 0);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index 71e9abced493..9db83b4e017d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -11,7 +11,7 @@ static inline int pmd_huge(pmd_t pmd)
* leaf pte for huge page
*/
if (radix_enabled())
- return !!(pmd_val(pmd) & _PAGE_PTE);
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
return 0;
}
@@ -21,7 +21,7 @@ static inline int pud_huge(pud_t pud)
* leaf pte for huge page
*/
if (radix_enabled())
- return !!(pud_val(pud) & _PAGE_PTE);
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
return 0;
}
@@ -31,7 +31,7 @@ static inline int pgd_huge(pgd_t pgd)
* leaf pte for huge page
*/
if (radix_enabled())
- return !!(pgd_val(pgd) & _PAGE_PTE);
+ return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
return 0;
}
#define pgd_huge pgd_huge
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index cb2d0a5fa3f8..0d2845b44763 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -15,7 +15,7 @@ static inline int pmd_huge(pmd_t pmd)
/*
* leaf pte for huge page
*/
- return !!(pmd_val(pmd) & _PAGE_PTE);
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
static inline int pud_huge(pud_t pud)
@@ -23,7 +23,7 @@ static inline int pud_huge(pud_t pud)
/*
* leaf pte for huge page
*/
- return !!(pud_val(pud) & _PAGE_PTE);
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
static inline int pgd_huge(pgd_t pgd)
@@ -31,7 +31,7 @@ static inline int pgd_huge(pgd_t pgd)
/*
* leaf pte for huge page
*/
- return !!(pgd_val(pgd) & _PAGE_PTE);
+ return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
}
#define pgd_huge pgd_huge
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 88a5ecaa157b..263bf39ced40 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
#define KERN_VIRT_SIZE __kernel_virt_size
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
+extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */
#include <asm/book3s/64/hash.h>
@@ -317,7 +318,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
+ if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
@@ -335,8 +336,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
-
- if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
+ if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
@@ -345,7 +345,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
+ if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
@@ -364,17 +364,35 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
{
pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
-static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_WRITE);}
-static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
-static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
-static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
+
+static inline int pte_write(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED));
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
+}
+
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline bool pte_soft_dirty(pte_t pte)
{
- return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY));
}
+
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
@@ -394,14 +412,14 @@ static inline pte_t pte_clear_soft_dirty(pte_t pte)
*/
static inline int pte_protnone(pte_t pte)
{
- return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
- (_PAGE_PRESENT | _PAGE_PRIVILEGED);
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED);
}
#endif /* CONFIG_NUMA_BALANCING */
static inline int pte_present(pte_t pte)
{
- return !!(pte_val(pte) & _PAGE_PRESENT);
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
}
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -473,7 +491,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
static inline bool pte_user(pte_t pte)
{
- return !(pte_val(pte) & _PAGE_PRIVILEGED);
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
}
/* Encode and de-code a swap entry */
@@ -516,10 +534,12 @@ static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
}
+
static inline bool pte_swp_soft_dirty(pte_t pte)
{
- return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
}
+
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
@@ -625,8 +645,16 @@ static inline void pmd_clear(pmd_t *pmdp)
*pmdp = __pmd(0);
}
-#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_present(pmd) (!pmd_none(pmd))
+static inline int pmd_none(pmd_t pmd)
+{
+ return !pmd_raw(pmd);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+
+ return !pmd_none(pmd);
+}
static inline int pmd_bad(pmd_t pmd)
{
@@ -645,19 +673,26 @@ static inline void pud_clear(pud_t *pudp)
*pudp = __pud(0);
}
-#define pud_none(pud) (!pud_val(pud))
-#define pud_present(pud) (pud_val(pud) != 0)
+static inline int pud_none(pud_t pud)
+{
+ return !pud_raw(pud);
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return !pud_none(pud);
+}
extern struct page *pud_page(pud_t pud);
extern struct page *pmd_page(pmd_t pmd);
static inline pte_t pud_pte(pud_t pud)
{
- return __pte(pud_val(pud));
+ return __pte_raw(pud_raw(pud));
}
static inline pud_t pte_pud(pte_t pte)
{
- return __pud(pte_val(pte));
+ return __pud_raw(pte_raw(pte));
}
#define pud_write(pud) pte_write(pud_pte(pud))
@@ -680,17 +715,24 @@ static inline void pgd_clear(pgd_t *pgdp)
*pgdp = __pgd(0);
}
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_present(pgd) (!pgd_none(pgd))
+static inline int pgd_none(pgd_t pgd)
+{
+ return !pgd_raw(pgd);
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+ return !pgd_none(pgd);
+}
static inline pte_t pgd_pte(pgd_t pgd)
{
- return __pte(pgd_val(pgd));
+ return __pte_raw(pgd_raw(pgd));
}
static inline pgd_t pte_pgd(pte_t pte)
{
- return __pgd(pte_val(pte));
+ return __pgd_raw(pte_raw(pte));
}
static inline int pgd_bad(pgd_t pgd)
@@ -782,12 +824,12 @@ struct page *realmode_pfn_to_page(unsigned long pfn);
static inline pte_t pmd_pte(pmd_t pmd)
{
- return __pte(pmd_val(pmd));
+ return __pte_raw(pmd_raw(pmd));
}
static inline pmd_t pte_pmd(pte_t pte)
{
- return __pmd(pte_val(pte));
+ return __pmd_raw(pte_raw(pte));
}
static inline pte_t *pmdp_ptep(pmd_t *pmd)
@@ -848,7 +890,7 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
static inline int pmd_large(pmd_t pmd)
{
- return !!(pmd_val(pmd) & _PAGE_PTE);
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
@@ -864,7 +906,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
+ if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
return 0;
old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
@@ -875,7 +917,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
- if ((pmd_val(*pmdp) & _PAGE_WRITE) == 0)
+ if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 937d4e247ac3..df294224e280 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz);
+
+static inline unsigned long radix__get_tree_size(void)
+{
+ unsigned long rts_field;
+ /*
+ * we support 52 bits, hence 52-31 = 21, 0b10101
+ * RTS encoding details
+ * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
+ * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
+ */
+ rts_field = (0x5UL << 5); /* 6 - 8 bits */
+ rts_field |= (0x2UL << 61);
+
+ return rts_field;
+}
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 13ef38828dfe..00703e7e4c94 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -18,16 +18,21 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid);
+extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_SMP
extern void radix__flush_tlb_mm(struct mm_struct *mm);
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid);
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
#else
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i)
+#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
#endif
-
+extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
+ unsigned long page_size);
+extern void radix__flush_tlb_lpid(unsigned long lpid);
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index d98424ae356c..96e5769b18b0 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
#endif /* CONFIG_SMP */
+/*
+ * flush the page walk cache for the address
+ */
+static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+ /*
+ * Flush the page table walk cache on freeing a page table. We already
+ * have marked the upper/higher level page table entry none by now.
+ * So it is safe to flush PWC here.
+ */
+ if (!radix_enabled())
+ return;
+ radix__flush_tlb_pwc(tlb, address);
+}
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/pgalloc.h b/arch/powerpc/include/asm/book3s/pgalloc.h
index 54f591e9572e..c0a69ae92256 100644
--- a/arch/powerpc/include/asm/book3s/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/pgalloc.h
@@ -4,11 +4,6 @@
#include <linux/mm.h>
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
- unsigned long address)
-{
-
-}
#ifdef CONFIG_PPC64
#include <asm/book3s/64/pgalloc.h>
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 994c60a857ce..2015b072422c 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -49,8 +49,7 @@ void __patch_exception(int exc, unsigned long addr);
static inline unsigned long ppc_function_entry(void *func)
{
-#if defined(CONFIG_PPC64)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
u32 *insn = func;
/*
@@ -75,14 +74,13 @@ static inline unsigned long ppc_function_entry(void *func)
return (unsigned long)(insn + 2);
else
return (unsigned long)func;
-#else
+#elif defined(PPC64_ELF_ABI_v1)
/*
* On PPC64 ABIv1 the function pointer actually points to the
* function's descriptor. The first entry in the descriptor is the
* address of the function text.
*/
return ((func_descr_t *)func)->entry;
-#endif
#else
return (unsigned long)func;
#endif
@@ -90,7 +88,7 @@ static inline unsigned long ppc_function_entry(void *func)
static inline unsigned long ppc_global_function_entry(void *func)
{
-#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
/* PPC64 ABIv2 the global entry point is at the address */
return (unsigned long)func;
#else
@@ -106,7 +104,7 @@ static inline unsigned long ppc_global_function_entry(void *func)
*/
/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define R2_STACK_OFFSET 24
#else
#define R2_STACK_OFFSET 40
diff --git a/arch/powerpc/include/asm/cpufeature.h b/arch/powerpc/include/asm/cpufeature.h
new file mode 100644
index 000000000000..19e6290699ea
--- /dev/null
+++ b/arch/powerpc/include/asm/cpufeature.h
@@ -0,0 +1,40 @@
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see asm/cputable.h for powerpc CPU features.
+ *
+ * Copyright 2016 Alastair D'Silva, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_POWERPC_CPUFEATURE_H
+#define __ASM_POWERPC_CPUFEATURE_H
+
+#include <asm/cputable.h>
+
+/* Keep these in step with powerpc/include/asm/cputable.h */
+#define MAX_CPU_FEATURES (2 * 32)
+
+/*
+ * Currently we don't have a need for any of the feature bits defined in
+ * cpu_user_features. When we do, they should be defined such as:
+ *
+ * #define PPC_MODULE_FEATURE_32 (ilog2(PPC_FEATURE_32))
+ */
+
+#define PPC_MODULE_FEATURE_VEC_CRYPTO (32 + ilog2(PPC_FEATURE2_VEC_CRYPTO))
+
+#define cpu_feature(x) (x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ if (num < 32)
+ return !!(cur_cpu_spec->cpu_user_features & 1UL << num);
+ else
+ return !!(cur_cpu_spec->cpu_user_features2 & 1UL << (num - 32));
+}
+
+#endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index d2f99ca1e3a6..3d7fc06532a1 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -13,6 +13,8 @@
#ifndef __ASSEMBLY__
extern u32 pnv_fastsleep_workaround_at_entry[];
extern u32 pnv_fastsleep_workaround_at_exit[];
+
+extern u64 pnv_first_deep_stop_state;
#endif
#endif
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index e2452550bcb1..2dfd4fc41f3e 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -90,11 +90,10 @@ static inline void setup_cputime_one_jiffy(void)
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
u64 ct;
- u64 sec;
+ u64 sec = jif;
/* have to be a little careful about overflow */
- ct = jif % HZ;
- sec = jif / HZ;
+ ct = do_div(sec, HZ);
if (ct) {
ct *= tb_ticks_per_sec;
do_div(ct, HZ);
@@ -230,7 +229,16 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
+/*
+ * PPC64 uses PACA which is task independent for storing accounting data while
+ * PPC32 uses struct thread_info, therefore at task switch the accounting data
+ * has to be populated in the new task
+ */
+#ifdef CONFIG_PPC64
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
+#else
+void arch_vtime_task_switch(struct task_struct *tsk);
+#endif
#endif /* __KERNEL__ */
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index fb9f376ae27b..8e37b71674f4 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -57,7 +57,7 @@ struct pci_dn;
/*
* The struct is used to trace PE related EEH functionality.
* In theory, there will have one instance of the struct to
- * be created against particular PE. In nature, PEs corelate
+ * be created against particular PE. In nature, PEs correlate
* to each other. the struct has to reflect that hierarchy in
* order to easily pick up those affected PEs when one particular
* PE has EEH errors.
@@ -274,7 +274,7 @@ void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
-void *eeh_dev_init(struct pci_dn *pdn, void *data);
+struct eeh_dev *eeh_dev_init(struct pci_dn *pdn);
void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
int eeh_init(void);
int __init eeh_ops_register(struct eeh_ops *ops);
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 93ae809fe5ea..bed66e5743b3 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -287,7 +287,7 @@ do_kvm_##n: \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r10,GPR1(r1); /* save r1 in stackframe */ \
beq 4f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r9, r10); \
+ ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
SAVE_PPR(area, r9, r10); \
4: EXCEPTION_PROLOG_COMMON_2(area) \
EXCEPTION_PROLOG_COMMON_3(n) \
@@ -403,6 +403,8 @@ label##_relon_hv: \
#define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL
#define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI
#define SOFTEN_VALUE_0xe62 PACA_IRQ_HMI
+#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
+#define SOFTEN_VALUE_0xea2 PACA_IRQ_EE
#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 9a67a38bf7b9..57fec8ac7b92 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -184,4 +184,8 @@ label##3: \
FTR_ENTRY_OFFSET label##1b-label##3b; \
.popsection;
+#ifndef __ASSEMBLY__
+void apply_feature_fixups(void);
+#endif
+
#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index b0629249778b..1e0b5a5d660a 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -126,6 +126,12 @@ extern int fwnmi_active;
extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
+#ifdef CONFIG_PPC_PSERIES
+void pseries_probe_fw_features(void);
+#else
+static inline void pseries_probe_fw_features(void) { };
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 90f604bbcd19..4508b322f2cd 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -51,6 +51,13 @@ enum fixed_addresses {
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
+#ifdef CONFIG_PPC_8xx
+ /* For IMMR we need an aligned 512K area */
+#define FIX_IMMR_SIZE (512 * 1024 / PAGE_SIZE)
+ FIX_IMMR_START,
+ FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
+ FIX_IMMR_SIZE,
+#endif
/* FIX_PCIE_MCFG, */
__end_of_fixed_addresses
};
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 50ca7585abe2..686c5f70eb84 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -1,6 +1,8 @@
#ifndef _ASM_POWERPC_FTRACE
#define _ASM_POWERPC_FTRACE
+#include <asm/types.h>
+
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
@@ -65,8 +67,8 @@ struct dyn_arch_ftrace {
#endif
#endif
-#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__)
+#ifdef PPC64_ELF_ABI_v1
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
@@ -79,6 +81,6 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
return !strcmp(sym + 4, name + 3);
}
#endif
-#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
+#endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 0bc9c284aa10..708edebcf147 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -431,17 +431,6 @@ static inline unsigned long cmo_get_page_size(void)
{
return CMO_PageSize;
}
-
-extern long pSeries_enable_reloc_on_exc(void);
-extern long pSeries_disable_reloc_on_exc(void);
-
-extern long pseries_big_endian_exceptions(void);
-
-#else
-
-#define pSeries_enable_reloc_on_exc() do {} while (0)
-#define pSeries_disable_reloc_on_exc() do {} while (0)
-
#endif /* CONFIG_PPC_PSERIES */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index b59ac27a6b7d..c7d82ff62a33 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -130,6 +130,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
extern bool prep_irq_for_idle(void);
+extern void force_external_irq_replay(void);
+
#else /* CONFIG_PPC64 */
#define SET_MSR_EE(x) mtmsr(x)
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7b87bab09564..f49a72a9062d 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -273,7 +273,6 @@ extern void iommu_init_early_pSeries(void);
extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
extern void iommu_init_early_pasemi(void);
-extern void alloc_dart_table(void);
#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
static inline void iommu_save(void)
{
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 039b583db029..2c9759bdb63b 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -40,8 +40,7 @@ struct kprobe;
typedef ppc_opcode_t kprobe_opcode_t;
#define MAX_INSN_SIZE 1
-#ifdef CONFIG_PPC64
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
/* PPC64 ABIv2 needs local entry point */
#define kprobe_lookup_name(name, addr) \
{ \
@@ -49,7 +48,7 @@ typedef ppc_opcode_t kprobe_opcode_t;
if (addr) \
addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
}
-#else
+#elif defined(PPC64_ELF_ABI_v1)
/*
* 64bit powerpc ABIv1 uses function descriptors:
* - Check for the dot variant of the symbol first.
@@ -92,8 +91,7 @@ typedef ppc_opcode_t kprobe_opcode_t;
addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); \
} \
}
-#endif /* defined(_CALL_ELF) && _CALL_ELF == 2 */
-#endif /* CONFIG_PPC64 */
+#endif
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 72b6225aca73..d318d432caa9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -162,7 +162,7 @@ struct kvmppc_book3s_shadow_vcpu {
/* Values for kvm_state */
#define KVM_HWTHREAD_IN_KERNEL 0
-#define KVM_HWTHREAD_IN_NAP 1
+#define KVM_HWTHREAD_IN_IDLE 1
#define KVM_HWTHREAD_IN_KVM 2
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
index e3ad5c72724a..0cf5e21179fc 100644
--- a/arch/powerpc/include/asm/linkage.h
+++ b/arch/powerpc/include/asm/linkage.h
@@ -1,8 +1,9 @@
#ifndef _ASM_POWERPC_LINKAGE_H
#define _ASM_POWERPC_LINKAGE_H
-#ifdef CONFIG_PPC64
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#include <asm/types.h>
+
+#ifdef PPC64_ELF_ABI_v1
#define cond_syscall(x) \
asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
"\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
@@ -10,6 +11,5 @@
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
"\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
#endif
-#endif
#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 6bdcd0da9e21..76f5398e7152 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -34,42 +34,6 @@ struct pci_host_bridge;
struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
- void (*hpte_invalidate)(unsigned long slot,
- unsigned long vpn,
- int bpsize, int apsize,
- int ssize, int local);
- long (*hpte_updatepp)(unsigned long slot,
- unsigned long newpp,
- unsigned long vpn,
- int bpsize, int apsize,
- int ssize, unsigned long flags);
- void (*hpte_updateboltedpp)(unsigned long newpp,
- unsigned long ea,
- int psize, int ssize);
- long (*hpte_insert)(unsigned long hpte_group,
- unsigned long vpn,
- unsigned long prpn,
- unsigned long rflags,
- unsigned long vflags,
- int psize, int apsize,
- int ssize);
- long (*hpte_remove)(unsigned long hpte_group);
- int (*hpte_removebolted)(unsigned long ea,
- int psize, int ssize);
- void (*flush_hash_range)(unsigned long number, int local);
- void (*hugepage_invalidate)(unsigned long vsid,
- unsigned long addr,
- unsigned char *hpte_slot_array,
- int psize, int ssize, int local);
- /*
- * Special for kexec.
- * To be called in real mode with interrupts disabled. No locks are
- * taken as such, concurrent access on pre POWER5 hardware could result
- * in a deadlock.
- * The linear mapping is destroyed as well.
- */
- void (*hpte_clear_all)(void);
-
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
unsigned long flags, void *caller);
void (*iounmap)(volatile void __iomem *token);
@@ -89,7 +53,6 @@ struct machdep_calls {
int (*probe)(void);
void (*setup_arch)(void); /* Optional, may be NULL */
- void (*init_early)(void);
/* Optional, may be NULL. */
void (*show_cpuinfo)(struct seq_file *m);
void (*show_percpuinfo)(struct seq_file *m, int i);
@@ -111,8 +74,8 @@ struct machdep_calls {
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
- void (*restart)(char *cmd);
- void (*halt)(void);
+ void __noreturn (*restart)(char *cmd);
+ void __noreturn (*halt)(void);
void (*panic)(char *str);
void (*cpu_die)(void);
@@ -256,7 +219,8 @@ struct machdep_calls {
#ifdef CONFIG_ARCH_RANDOM
int (*get_random_seed)(unsigned long *v);
#endif
- int (*update_partition_table)(u64);
+ int (*register_process_table)(unsigned long base, unsigned long page_size,
+ unsigned long tbl_size);
};
extern void e500_idle(void);
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 0a566f15f985..3e0e4927811c 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -169,6 +169,9 @@ typedef struct {
unsigned int active;
unsigned long vdso_base;
} mm_context_t;
+
+#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
+#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
#endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC_4K_PAGES)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e53ebebff474..54471228f7b8 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -24,6 +24,11 @@
/*
* This is individual features
*/
+/*
+ * We need to clear top 16bits of va (from the remaining 64 bits )in
+ * tlbie* instructions
+ */
+#define MMU_FTR_TLBIE_CROP_VA ASM_CONST(0x00008000)
/* Enable use of high BAT registers */
#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
@@ -97,7 +102,7 @@
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
-#define MMU_FTRS_PPC970 MMU_FTRS_POWER4
+#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
@@ -124,7 +129,7 @@ enum {
MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
- MMU_FTR_1T_SEGMENT |
+ MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
#ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_RADIX |
#endif
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 0acc7c7c28d1..e94cede14522 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -275,7 +275,7 @@ extern int mpc5200_psc_ac97_gpio_reset(int psc_number);
extern void mpc52xx_map_common_devices(void);
extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
-extern void mpc52xx_restart(char *cmd);
+extern void __noreturn mpc52xx_restart(char *cmd);
/* mpc52xx_gpt.c */
struct mpc52xx_gpt_priv;
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 127ab23e1f6c..078155fa1189 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -124,7 +124,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
+ if (likely(atomic_read(count) == 1 && __mutex_cmpxchg_lock(count, 1, 0) == 1))
return 1;
return 0;
}
diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h
index fdab41c654ef..0656ff81e5b0 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h
@@ -32,7 +32,7 @@
* - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
*
* Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
- * TLB2 storage attibute fields. Those are:
+ * TLB2 storage attribute fields. Those are:
*
* TLB2:
* 0...10 11 12 13 14 15 16...31
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 0c12a3bfe2ab..897d2e1c8a9b 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -172,7 +171,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- pte_fragment_fre((unsigned long *)pte, 1);
+ pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
@@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 570fb30be21c..908324574f77 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
@@ -23,6 +23,7 @@
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE PTE_FRAG_SIZE
#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE (0)
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 9bb8ddf0be37..0e2e57bcab50 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -158,17 +158,33 @@
#define OPAL_LEDS_SET_INDICATOR 115
#define OPAL_CEC_REBOOT2 116
#define OPAL_CONSOLE_FLUSH 117
-#define OPAL_LAST 117
+#define OPAL_GET_DEVICE_TREE 118
+#define OPAL_PCI_GET_PRESENCE_STATE 119
+#define OPAL_PCI_GET_POWER_STATE 120
+#define OPAL_PCI_SET_POWER_STATE 121
+#define OPAL_INT_GET_XIRR 122
+#define OPAL_INT_SET_CPPR 123
+#define OPAL_INT_EOI 124
+#define OPAL_INT_SET_MFRR 125
+#define OPAL_PCI_TCE_KILL 126
+#define OPAL_LAST 126
/* Device tree flags */
-/* Flags set in power-mgmt nodes in device tree if
- * respective idle states are supported in the platform.
+/*
+ * Flags set in power-mgmt nodes in device tree describing
+ * idle states that are supported in the platform.
*/
+
+#define OPAL_PM_TIMEBASE_STOP 0x00000002
+#define OPAL_PM_LOSE_HYP_CONTEXT 0x00002000
+#define OPAL_PM_LOSE_FULL_CONTEXT 0x00004000
#define OPAL_PM_NAP_ENABLED 0x00010000
#define OPAL_PM_SLEEP_ENABLED 0x00020000
#define OPAL_PM_WINKLE_ENABLED 0x00040000
#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
+#define OPAL_PM_STOP_INST_FAST 0x00100000
+#define OPAL_PM_STOP_INST_DEEP 0x00200000
/*
* OPAL_CONFIG_CPU_IDLE_STATE parameters
@@ -344,6 +360,18 @@ enum OpalPciResetState {
OPAL_ASSERT_RESET = 1
};
+enum OpalPciSlotPresence {
+ OPAL_PCI_SLOT_EMPTY = 0,
+ OPAL_PCI_SLOT_PRESENT = 1
+};
+
+enum OpalPciSlotPower {
+ OPAL_PCI_SLOT_POWER_OFF = 0,
+ OPAL_PCI_SLOT_POWER_ON = 1,
+ OPAL_PCI_SLOT_OFFLINE = 2,
+ OPAL_PCI_SLOT_ONLINE = 3
+};
+
enum OpalSlotLedType {
OPAL_SLOT_LED_TYPE_ID = 0, /* IDENTIFY LED */
OPAL_SLOT_LED_TYPE_FAULT = 1, /* FAULT LED */
@@ -802,7 +830,7 @@ struct opal_sg_entry {
};
/*
- * Candiate image SG list.
+ * Candidate image SG list.
*
* length = VER | length
*/
@@ -825,6 +853,7 @@ enum {
OPAL_PHB_CAPI_MODE_CAPI = 1,
OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
+ OPAL_PHB_CAPI_MODE_DMA = 4,
};
/* OPAL I2C request */
@@ -852,7 +881,7 @@ struct opal_i2c_request {
* with individual elements being 16 bits wide to fetch the system
* wide EPOW status. Each element in the buffer will contain the
* EPOW status in it's bit representation for a particular EPOW sub
- * class as defiend here. So multiple detailed EPOW status bits
+ * class as defined here. So multiple detailed EPOW status bits
* specific for any sub class can be represented in a single buffer
* element as it's bit representation.
*/
@@ -891,6 +920,13 @@ enum {
OPAL_REBOOT_PLATFORM_ERROR = 1,
};
+/* Argument to OPAL_PCI_TCE_KILL */
+enum {
+ OPAL_PCI_TCE_KILL_PAGES,
+ OPAL_PCI_TCE_KILL_PE,
+ OPAL_PCI_TCE_KILL_ALL,
+};
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9d86c6651716..ee05bd203630 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -131,7 +131,7 @@ int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number, uint16_t
int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number,
uint16_t dma_window_number, uint64_t pci_start_addr,
uint64_t pci_mem_size);
-int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state);
+int64_t opal_pci_reset(uint64_t id, uint8_t reset_scope, uint8_t assert_state);
int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer,
uint64_t diag_buffer_len);
@@ -148,7 +148,7 @@ int64_t opal_get_dpo_status(__be64 *dpo_timeout);
int64_t opal_set_system_attention_led(uint8_t led_action);
int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
__be16 *pci_error_type, __be16 *severity);
-int64_t opal_pci_poll(uint64_t phb_id);
+int64_t opal_pci_poll(uint64_t id);
int64_t opal_return_cpu(void);
int64_t opal_check_token(uint64_t token);
int64_t opal_reinit_cpus(uint64_t flags);
@@ -178,6 +178,8 @@ int64_t opal_dump_ack(uint32_t dump_id);
int64_t opal_dump_resend_notification(void);
int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_write_oppanel_async(uint64_t token, oppanel_line_t *lines,
+ uint64_t num_lines);
int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
int64_t opal_sync_host_reboot(void);
int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
@@ -209,12 +211,30 @@ int64_t opal_flash_write(uint64_t id, uint64_t offset, uint64_t buf,
uint64_t size, uint64_t token);
int64_t opal_flash_erase(uint64_t id, uint64_t offset, uint64_t size,
uint64_t token);
+int64_t opal_get_device_tree(uint32_t phandle, uint64_t buf, uint64_t len);
+int64_t opal_pci_get_presence_state(uint64_t id, uint64_t data);
+int64_t opal_pci_get_power_state(uint64_t id, uint64_t data);
+int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
+ uint64_t data);
+int64_t opal_pci_poll2(uint64_t id, uint64_t data);
+
+int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_int_set_cppr(uint8_t cppr);
+int64_t opal_int_eoi(uint32_t xirr);
+int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
+int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
+ uint32_t pe_num, uint32_t tce_size,
+ uint64_t dma_addr, uint32_t npages);
+int64_t opal_rm_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
+ uint32_t pe_num, uint32_t tce_size,
+ uint64_t dma_addr, uint32_t npages);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data);
+extern void opal_configure_cores(void);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
@@ -276,6 +296,16 @@ extern int opal_error_code(int rc);
ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count);
+static inline int opal_get_async_rc(struct opal_msg msg)
+{
+ if (msg.msg_type != OPAL_MSG_ASYNC_COMP)
+ return OPAL_PARAMETER;
+ else
+ return be64_to_cpu(msg.params[1]);
+}
+
+void opal_wake_poller(void);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 546540b91095..ad171e979ab0 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -25,6 +25,7 @@
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_book3s_asm.h>
#endif
+#include <asm/accounting.h>
register struct paca_struct *local_paca asm("r13");
@@ -184,13 +185,7 @@ struct paca_struct {
#endif
/* Stuff for accurate time accounting */
- u64 user_time; /* accumulated usermode TB ticks */
- u64 system_time; /* accumulated system TB ticks */
- u64 user_time_scaled; /* accumulated usermode SPURR ticks */
- u64 starttime; /* TB value snapshot */
- u64 starttime_user; /* TB value on exit to usermode */
- u64 startspurr; /* SPURR value snapshot */
- u64 utime_sspurr; /* ->user_time when ->startspurr set */
+ struct cpu_accounting_data accounting;
u64 stolen_time; /* TB ticks taken by hypervisor */
u64 dtl_ridx; /* read index in dispatch log */
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 51db3a37bced..56398e7e6100 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -96,7 +96,7 @@ extern unsigned int HPAGE_SHIFT;
extern phys_addr_t memstart_addr;
extern phys_addr_t kernstart_addr;
-#ifdef CONFIG_RELOCATABLE_PPC32
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
extern long long virt_phys_offset;
#endif
@@ -139,9 +139,9 @@ extern long long virt_phys_offset;
* determine MEMORY_START until then. However we can determine PHYSICAL_START
* from information at hand (program counter, TLB lookup).
*
- * On BookE with RELOCATABLE (RELOCATABLE_PPC32)
+ * On BookE with RELOCATABLE && PPC32
*
- * With RELOCATABLE_PPC32, we support loading the kernel at any physical
+ * With RELOCATABLE && PPC32, we support loading the kernel at any physical
* address without any restriction on the page alignment.
*
* We find the runtime address of _stext and relocate ourselves based on
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 467c0b05b6fb..b5e88e4a171a 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -33,6 +33,8 @@ struct pci_controller_ops {
/* Called during PCI resource reassignment */
resource_size_t (*window_alignment)(struct pci_bus *bus,
unsigned long type);
+ void (*setup_bridge)(struct pci_bus *bus,
+ unsigned long type);
void (*reset_secondary_bus)(struct pci_dev *pdev);
#ifdef CONFIG_PCI_MSI
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index e2bf208605b1..49c0a5a80efa 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -6,6 +6,7 @@
/* PTE level */
typedef struct { __be64 pte; } pte_t;
#define __pte(x) ((pte_t) { cpu_to_be64(x) })
+#define __pte_raw(x) ((pte_t) { (x) })
static inline unsigned long pte_val(pte_t x)
{
return be64_to_cpu(x.pte);
@@ -20,6 +21,7 @@ static inline __be64 pte_raw(pte_t x)
#ifdef CONFIG_PPC64
typedef struct { __be64 pmd; } pmd_t;
#define __pmd(x) ((pmd_t) { cpu_to_be64(x) })
+#define __pmd_raw(x) ((pmd_t) { (x) })
static inline unsigned long pmd_val(pmd_t x)
{
return be64_to_cpu(x.pmd);
@@ -37,21 +39,34 @@ static inline __be64 pmd_raw(pmd_t x)
#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
typedef struct { __be64 pud; } pud_t;
#define __pud(x) ((pud_t) { cpu_to_be64(x) })
+#define __pud_raw(x) ((pud_t) { (x) })
static inline unsigned long pud_val(pud_t x)
{
return be64_to_cpu(x.pud);
}
+
+static inline __be64 pud_raw(pud_t x)
+{
+ return x.pud;
+}
+
#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC64 */
/* PGD level */
typedef struct { __be64 pgd; } pgd_t;
#define __pgd(x) ((pgd_t) { cpu_to_be64(x) })
+#define __pgd_raw(x) ((pgd_t) { (x) })
static inline unsigned long pgd_val(pgd_t x)
{
return be64_to_cpu(x.pgd);
}
+static inline __be64 pgd_raw(pgd_t x)
+{
+ return x.pgd;
+}
+
/* Page protection bits */
typedef struct { unsigned long pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index ee09e99097f0..9bd87f269d6d 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -71,10 +71,8 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *shift)
{
- if (!arch_irqs_disabled()) {
- pr_info("%s called with irq enabled\n", __func__);
- dump_stack();
- }
+ VM_WARN(!arch_irqs_disabled(),
+ "%s called with irq enabled\n", __func__);
return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
}
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
index 925697968946..e08e829261b6 100644
--- a/arch/powerpc/include/asm/pmac_feature.h
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -210,7 +210,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node,
/* PMAC_FTR_SOUND_CHIP_ENABLE (struct device_node* node, 0, int value)
* enable/disable the sound chip, whatever it is and provided it can
- * acually be controlled
+ * actually be controlled
*/
#define PMAC_FTR_SOUND_CHIP_ENABLE PMAC_FTR_DEF(9)
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index 6f77f71ee964..0cbd8134ce81 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -11,7 +11,20 @@
#define _ASM_PNV_PCI_H
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
#include <misc/cxl-base.h>
+#include <asm/opal-api.h>
+
+#define PCI_SLOT_ID_PREFIX 0x8000000000000000
+#define PCI_SLOT_ID(phb_id, bdfn) \
+ (PCI_SLOT_ID_PREFIX | ((uint64_t)(bdfn) << 16) | (phb_id))
+
+extern int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id);
+extern int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len);
+extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state);
+extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
+extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
+ struct opal_msg *msg);
int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
@@ -26,6 +39,40 @@ int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pci_dev *dev, int num);
void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pci_dev *dev);
+
+/* Support for the cxl kernel api on the real PHB (instead of vPHB) */
+int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable);
+bool pnv_pci_on_cxl_phb(struct pci_dev *dev);
+struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose);
+void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu);
+
#endif
+struct pnv_php_slot {
+ struct hotplug_slot slot;
+ struct hotplug_slot_info slot_info;
+ uint64_t id;
+ char *name;
+ int slot_no;
+ struct kref kref;
+#define PNV_PHP_STATE_INITIALIZED 0
+#define PNV_PHP_STATE_REGISTERED 1
+#define PNV_PHP_STATE_POPULATED 2
+#define PNV_PHP_STATE_OFFLINE 3
+ int state;
+ struct device_node *dn;
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+ bool power_state_check;
+ void *fdt;
+ void *dt;
+ struct of_changeset ocs;
+ struct pnv_php_slot *parent;
+ struct list_head children;
+ struct list_head link;
+};
+extern struct pnv_php_slot *pnv_php_find_slot(struct device_node *dn);
+extern int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
+ uint8_t state);
+
#endif
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 1d035c1cc889..127ebf5862b4 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -131,6 +131,8 @@
/* sorted alphabetically */
#define PPC_INST_BHRBE 0x7c00025c
#define PPC_INST_CLRBHRB 0x7c00035c
+#define PPC_INST_COPY 0x7c00060c
+#define PPC_INST_COPY_FIRST 0x7c20060c
#define PPC_INST_CP_ABORT 0x7c00068c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
@@ -142,9 +144,11 @@
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
+#define PPC_INST_STDCX 0x7c0001ad
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
+#define PPC_INST_STWCX 0x7c00012d
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
@@ -159,6 +163,8 @@
#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
+#define PPC_INST_PASTE 0x7c00070c
+#define PPC_INST_PASTE_LAST 0x7c20070d
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_POPCNTD 0x7c0003f4
@@ -174,7 +180,10 @@
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MFVSRD 0x7c000066
+#define PPC_INST_MTVSRD 0x7c000166
#define PPC_INST_SLBFEE 0x7c0007a7
+#define PPC_INST_SLBIA 0x7c0003e4
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -184,10 +193,13 @@
#define PPC_INST_STSWX 0x7c00052a
#define PPC_INST_STXVD2X 0x7c000798
#define PPC_INST_TLBIE 0x7c000264
+#define PPC_INST_TLBIEL 0x7c000224
#define PPC_INST_TLBILX 0x7c000024
#define PPC_INST_WAIT 0x7c00007c
#define PPC_INST_TLBIVAX 0x7c000624
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
+#define PPC_INST_VPMSUMW 0x10000488
+#define PPC_INST_VPMSUMD 0x100004c8
#define PPC_INST_XXLOR 0xf0000510
#define PPC_INST_XXSWAPD 0xf0000250
#define PPC_INST_XVCPSGNDP 0xf0000780
@@ -199,6 +211,8 @@
#define PPC_INST_SLEEP 0x4c0003a4
#define PPC_INST_WINKLE 0x4c0003e4
+#define PPC_INST_STOP 0x4c0002e4
+
/* A2 specific instructions */
#define PPC_INST_ERATWE 0x7c0001a6
#define PPC_INST_ERATRE 0x7c000166
@@ -211,8 +225,11 @@
#define PPC_INST_LBZ 0x88000000
#define PPC_INST_LD 0xe8000000
#define PPC_INST_LHZ 0xa0000000
-#define PPC_INST_LHBRX 0x7c00062c
#define PPC_INST_LWZ 0x80000000
+#define PPC_INST_LHBRX 0x7c00062c
+#define PPC_INST_LDBRX 0x7c000428
+#define PPC_INST_STB 0x98000000
+#define PPC_INST_STH 0xb0000000
#define PPC_INST_STD 0xf8000000
#define PPC_INST_STDU 0xf8000001
#define PPC_INST_STW 0x90000000
@@ -221,22 +238,34 @@
#define PPC_INST_MTLR 0x7c0803a6
#define PPC_INST_CMPWI 0x2c000000
#define PPC_INST_CMPDI 0x2c200000
+#define PPC_INST_CMPW 0x7c000000
+#define PPC_INST_CMPD 0x7c200000
#define PPC_INST_CMPLW 0x7c000040
+#define PPC_INST_CMPLD 0x7c200040
#define PPC_INST_CMPLWI 0x28000000
+#define PPC_INST_CMPLDI 0x28200000
#define PPC_INST_ADDI 0x38000000
#define PPC_INST_ADDIS 0x3c000000
#define PPC_INST_ADD 0x7c000214
#define PPC_INST_SUB 0x7c000050
#define PPC_INST_BLR 0x4e800020
#define PPC_INST_BLRL 0x4e800021
+#define PPC_INST_MULLD 0x7c0001d2
#define PPC_INST_MULLW 0x7c0001d6
#define PPC_INST_MULHWU 0x7c000016
#define PPC_INST_MULLI 0x1c000000
#define PPC_INST_DIVWU 0x7c000396
+#define PPC_INST_DIVD 0x7c0003d2
#define PPC_INST_RLWINM 0x54000000
+#define PPC_INST_RLWIMI 0x50000000
+#define PPC_INST_RLDICL 0x78000000
#define PPC_INST_RLDICR 0x78000004
#define PPC_INST_SLW 0x7c000030
+#define PPC_INST_SLD 0x7c000036
#define PPC_INST_SRW 0x7c000430
+#define PPC_INST_SRD 0x7c000436
+#define PPC_INST_SRAD 0x7c000634
+#define PPC_INST_SRADI 0x7c000674
#define PPC_INST_AND 0x7c000038
#define PPC_INST_ANDDOT 0x7c000039
#define PPC_INST_OR 0x7c000378
@@ -247,6 +276,7 @@
#define PPC_INST_XORI 0x68000000
#define PPC_INST_XORIS 0x6c000000
#define PPC_INST_NEG 0x7c0000d0
+#define PPC_INST_EXTSW 0x7c0007b4
#define PPC_INST_BRANCH 0x48000000
#define PPC_INST_BRANCH_COND 0x40800000
#define PPC_INST_LBZCIX 0x7c0006aa
@@ -257,6 +287,9 @@
#define ___PPC_RB(b) (((b) & 0x1f) << 11)
#define ___PPC_RS(s) (((s) & 0x1f) << 21)
#define ___PPC_RT(t) ___PPC_RS(t)
+#define ___PPC_R(r) (((r) & 0x1) << 16)
+#define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
+#define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
#define __PPC_RA(a) ___PPC_RA(__REG_##a)
#define __PPC_RA0(a) ___PPC_RA(__REGA0_##a)
#define __PPC_RB(b) ___PPC_RB(__REG_##b)
@@ -272,6 +305,8 @@
#define __PPC_SH(s) __PPC_WS(s)
#define __PPC_MB(s) (((s) & 0x1f) << 6)
#define __PPC_ME(s) (((s) & 0x1f) << 1)
+#define __PPC_MB64(s) (__PPC_MB(s) | ((s) & 0x20))
+#define __PPC_ME64(s) __PPC_MB64(s)
#define __PPC_BI(s) (((s) & 0x1f) << 16)
#define __PPC_CT(t) (((t) & 0x0f) << 21)
@@ -321,6 +356,16 @@
__PPC_WC(w))
#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \
___PPC_RB(a) | ___PPC_RS(lp))
+#define PPC_TLBIE_5(rb,rs,ric,prs,r) \
+ stringify_in_c(.long PPC_INST_TLBIE | \
+ ___PPC_RB(rb) | ___PPC_RS(rs) | \
+ ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+ ___PPC_R(r))
+#define PPC_TLBIEL(rb,rs,ric,prs,r) \
+ stringify_in_c(.long PPC_INST_TLBIEL | \
+ ___PPC_RB(rb) | ___PPC_RS(rs) | \
+ ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+ ___PPC_R(r))
#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
__PPC_RA0(a) | __PPC_RB(b))
#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
@@ -359,6 +404,14 @@
VSX_XX1((s), a, b))
#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
VSX_XX1((s), a, b))
+#define MFVRD(a, t) stringify_in_c(.long PPC_INST_MFVSRD | \
+ VSX_XX1((t)+32, a, R0))
+#define MTVRD(t, a) stringify_in_c(.long PPC_INST_MTVSRD | \
+ VSX_XX1((t)+32, a, R0))
+#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMW | \
+ VSX_XX3((t), a, b))
+#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMD | \
+ VSX_XX3((t), a, b))
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
VSX_XX3((t), a, b))
#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \
@@ -370,6 +423,8 @@
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
#define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE)
+#define PPC_STOP stringify_in_c(.long PPC_INST_STOP)
+
/* BHRB instructions */
#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \
@@ -400,5 +455,7 @@
___PPC_RA(a) | \
___PPC_RB(b))
+#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \
+ ((IH & 0x7) << 21))
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 8753e4eb9ab5..0f73de069f19 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -39,8 +39,6 @@ void *pci_traverse_device_nodes(struct device_node *start,
void *traverse_pci_dn(struct pci_dn *root,
void *(*fn)(struct pci_dn *, void *),
void *data);
-
-extern void pci_devs_phb_init(void);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
/* From rtas_pci.h */
diff --git a/arch/powerpc/include/asm/ppc4xx.h b/arch/powerpc/include/asm/ppc4xx.h
index 033039a80c42..610a5119ad8c 100644
--- a/arch/powerpc/include/asm/ppc4xx.h
+++ b/arch/powerpc/include/asm/ppc4xx.h
@@ -13,6 +13,6 @@
#ifndef __ASM_POWERPC_PPC4xx_H__
#define __ASM_POWERPC_PPC4xx_H__
-extern void ppc4xx_reset_system(char *cmd);
+extern void __noreturn ppc4xx_reset_system(char *cmd);
#endif /* __ASM_POWERPC_PPC4xx_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 2b31632376a5..d5d5b5e348f2 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -24,27 +24,27 @@
*/
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
-#define ACCOUNT_CPU_USER_EXIT(ra, rb)
+#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
+#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
#define ACCOUNT_STOLEN_TIME
#else
-#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
+#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
MFTB(ra); /* get timebase */ \
- ld rb,PACA_STARTTIME_USER(r13); \
- std ra,PACA_STARTTIME(r13); \
+ PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \
+ PPC_STL ra, ACCOUNT_STARTTIME(ptr); \
subf rb,rb,ra; /* subtract start value */ \
- ld ra,PACA_USER_TIME(r13); \
+ PPC_LL ra, ACCOUNT_USER_TIME(ptr); \
add ra,ra,rb; /* add on to user time */ \
- std ra,PACA_USER_TIME(r13); \
+ PPC_STL ra, ACCOUNT_USER_TIME(ptr); \
-#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
+#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \
MFTB(ra); /* get timebase */ \
- ld rb,PACA_STARTTIME(r13); \
- std ra,PACA_STARTTIME_USER(r13); \
+ PPC_LL rb, ACCOUNT_STARTTIME(ptr); \
+ PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \
subf rb,rb,ra; /* subtract start value */ \
- ld ra,PACA_SYSTEM_TIME(r13); \
+ PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
add ra,ra,rb; /* add on to system time */ \
- std ra,PACA_SYSTEM_TIME(r13)
+ PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
#ifdef CONFIG_PPC_SPLPAR
#define ACCOUNT_STOLEN_TIME \
@@ -189,7 +189,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define __STK_REG(i) (112 + ((i)-14)*8)
#define STK_REG(i) __STK_REG(__REG_##i)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define STK_GOT 24
#define __STK_PARAM(i) (32 + ((i)-3)*8)
#else
@@ -198,7 +198,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif
#define STK_PARAM(i) __STK_PARAM(__REG_##i)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define _GLOBAL(name) \
.section ".text"; \
@@ -286,6 +286,9 @@ n:
#endif
+#define FUNC_START(name) _GLOBAL(name)
+#define FUNC_END(name)
+
/*
* LOAD_REG_IMMEDIATE(rn, expr)
* Loads the value of the constant expression 'expr' into register 'rn'
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 009fab130cd8..68e3bf57b027 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -224,7 +224,7 @@ struct thread_struct {
unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_PPC64
unsigned long start_tb; /* Start purr when proc switched in */
- unsigned long accum_tb; /* Total accumilated purr for process */
+ unsigned long accum_tb; /* Total accumulated purr for process */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *ptrace_bps[HBP_NUM];
/*
@@ -314,6 +314,8 @@ struct thread_struct {
unsigned long mmcr2;
unsigned mmcr0;
unsigned used_ebb;
+ unsigned long lmrr;
+ unsigned long lmser;
#endif
};
@@ -347,6 +349,7 @@ struct thread_struct {
.fs = KERNEL_DS, \
.fpexc_mode = 0, \
.ppr = INIT_PPR, \
+ .fscr = FSCR_TAR | FSCR_EBB \
}
#endif
@@ -457,6 +460,8 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */
extern unsigned long power7_nap(int check_irq);
extern unsigned long power7_sleep(void);
extern unsigned long power7_winkle(void);
+extern unsigned long power9_idle_stop(unsigned long stop_level);
+
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index a1bc7e758422..a19f831a4cc9 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -526,4 +526,6 @@ void ps3_sync_irq(int node);
u32 ps3_get_hw_thread_id(int cpu);
u64 ps3_get_spe_id(void *arg);
+void ps3_early_mm_init(void);
+
#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index 0427b0b53d2d..a1dc784d70e8 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -104,7 +104,7 @@
#define PS3AV_CMD_AV_INPUTLEN_16 0x02
#define PS3AV_CMD_AV_INPUTLEN_20 0x0a
#define PS3AV_CMD_AV_INPUTLEN_24 0x0b
-/* alayout */
+/* av_layout */
#define PS3AV_CMD_AV_LAYOUT_32 (1 << 0)
#define PS3AV_CMD_AV_LAYOUT_44 (1 << 1)
#define PS3AV_CMD_AV_LAYOUT_48 (1 << 2)
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 2eeaf80d41b7..4ba26dd259fd 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -96,7 +96,7 @@ static inline bool pte_user(pte_t pte)
#define PTE_RPN_SHIFT (PAGE_SHIFT)
#endif
-/* The mask convered by the RPN must be a ULL on 32-bit platforms with
+/* The mask covered by the RPN must be a ULL on 32-bit platforms with
* 64-bit PTEs
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index c0c61fa9cd9e..e4923686e43a 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -47,7 +47,7 @@
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define STACK_FRAME_MIN_SIZE 32
#else
#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c1e82e968506..40f3615bf940 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -145,6 +145,15 @@
#define MSR_64BIT 0
#endif
+/* Power Management - Processor Stop Status and Control Register Fields */
+#define PSSCR_RL_MASK 0x0000000F /* Requested Level */
+#define PSSCR_MTL_MASK 0x000000F0 /* Maximum Transition Level */
+#define PSSCR_TR_MASK 0x00000300 /* Transition State */
+#define PSSCR_PSLL_MASK 0x000F0000 /* Power-Saving Level Limit */
+#define PSSCR_EC 0x00100000 /* Exit Criterion */
+#define PSSCR_ESL 0x00200000 /* Enable State Loss */
+#define PSSCR_SD 0x00400000 /* Status Disable */
+
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000 /* FPU exception summary */
#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
@@ -268,6 +277,7 @@
#define DSISR_KEYFAULT 0x00200000 /* Key fault */
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
+#define SPRN_CIR 0x11B /* Chip Information Register (hyper, R/0) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
@@ -282,15 +292,19 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_LMRR 0x32D /* Load Monitor Region Register */
+#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
#define SPRN_LDBAR 0x352 /* LD Base Address Register */
#define SPRN_PMICR 0x354 /* Power Management Idle Control Reg */
#define SPRN_PMSR 0x355 /* Power Management Status Reg */
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
+#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PMCR 0x374 /* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
@@ -300,10 +314,12 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_LM __MASK(FSCR_LM_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_LM __MASK(FSCR_LM_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
@@ -314,40 +330,43 @@
#define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
-#define LPCR_VPM0 (1ul << (63-0))
-#define LPCR_VPM1 (1ul << (63-1))
-#define LPCR_ISL (1ul << (63-2))
-#define LPCR_VC_SH (63-2)
-#define LPCR_DPFD_SH (63-11)
-#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
-#define LPCR_VRMASD (0x1ful << (63-16))
-#define LPCR_VRMA_L (1ul << (63-12))
-#define LPCR_VRMA_LP0 (1ul << (63-15))
-#define LPCR_VRMA_LP1 (1ul << (63-16))
-#define LPCR_VRMASD_SH (63-16)
-#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
-#define LPCR_RMLS_SH (63-37)
-#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
-#define LPCR_AIL 0x01800000 /* Alternate interrupt location */
-#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
-#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
-#define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */
-#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */
-#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */
-#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */
-#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
-#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
-#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
-#define LPCR_MER 0x00000800 /* Mediated External Exception */
-#define LPCR_MER_SH 11
-#define LPCR_TC 0x00000200 /* Translation control */
-#define LPCR_LPES 0x0000000c
-#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
-#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
-#define LPCR_LPES_SH 2
-#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
-#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
-#define LPCR_UPRT 0x00400000 /* Use Process Table (ISA 3) */
+#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
+#define LPCR_VPM1 ASM_CONST(0x4000000000000000)
+#define LPCR_ISL ASM_CONST(0x2000000000000000)
+#define LPCR_VC_SH 61
+#define LPCR_DPFD_SH 52
+#define LPCR_DPFD (ASM_CONST(7) << LPCR_DPFD_SH)
+#define LPCR_VRMASD_SH 47
+#define LPCR_VRMASD (ASM_CONST(1) << LPCR_VRMASD_SH)
+#define LPCR_VRMA_L ASM_CONST(0x0008000000000000)
+#define LPCR_VRMA_LP0 ASM_CONST(0x0001000000000000)
+#define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
+#define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
+#define LPCR_RMLS_SH 26
+#define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
+#define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
+#define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
+#define LPCR_AIL_3 ASM_CONST(0x0000000001800000) /* MMU on exception offset 0xc00...4xxx */
+#define LPCR_ONL ASM_CONST(0x0000000000040000) /* online - PURR/SPURR count */
+#define LPCR_LD ASM_CONST(0x0000000000020000) /* large decremeter */
+#define LPCR_PECE ASM_CONST(0x000000000001f000) /* powersave exit cause enable */
+#define LPCR_PECEDP ASM_CONST(0x0000000000010000) /* directed priv dbells cause exit */
+#define LPCR_PECEDH ASM_CONST(0x0000000000008000) /* directed hyp dbells cause exit */
+#define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
+#define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
+#define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
+#define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
+#define LPCR_MER_SH 11
+#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
+#define LPCR_LPES 0x0000000c
+#define LPCR_LPES0 ASM_CONST(0x0000000000000008) /* LPAR Env selector 0 */
+#define LPCR_LPES1 ASM_CONST(0x0000000000000004) /* LPAR Env selector 1 */
+#define LPCR_LPES_SH 2
+#define LPCR_RMI ASM_CONST(0x0000000000000002) /* real mode is cache inhibit */
+#define LPCR_HVICE ASM_CONST(0x0000000000000002) /* P9: HV interrupt enable */
+#define LPCR_HDICE ASM_CONST(0x0000000000000001) /* Hyp Decr enable (HV,PR,EE) */
+#define LPCR_UPRT ASM_CONST(0x0000000000400000) /* Use Process Table (ISA 3) */
+#define LPCR_HR ASM_CONST(0x0000000000100000)
#ifndef SPRN_LPID
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
#endif
@@ -717,7 +736,7 @@
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
-#define SPRN_MMCR2 769
+#define SPRN_MMCR2 785
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -754,13 +773,13 @@
#define SPRN_PMC6 792
#define SPRN_PMC7 793
#define SPRN_PMC8 794
-#define SPRN_SIAR 780
-#define SPRN_SDAR 781
#define SPRN_SIER 784
#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
+#define SPRN_SIAR 796
+#define SPRN_SDAR 797
#define SPRN_TACR 888
#define SPRN_TCSCR 889
#define SPRN_CSIGR 890
@@ -1288,6 +1307,7 @@ static inline unsigned long mfvtb (void)
asm volatile("mfspr %0, %1" : "=r" (rval) : \
"i" (SPRN_TBRU)); rval;})
#endif
+#define mftb() mftbl()
#endif /* !__powerpc64__ */
#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 51400baa8d48..9c23baa10b81 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -339,9 +339,9 @@ extern int rtas_service_present(const char *service);
extern int rtas_call(int token, int, int, int *, ...);
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
int nret, ...);
-extern void rtas_restart(char *cmd);
+extern void __noreturn rtas_restart(char *cmd);
extern void rtas_power_off(void);
-extern void rtas_halt(void);
+extern void __noreturn rtas_halt(void);
extern void rtas_os_term(char *str);
extern int rtas_get_sensor(int sensor, int index, int *state);
extern int rtas_get_sensor_fast(int sensor, int index, int *state);
@@ -351,7 +351,6 @@ extern bool rtas_indicator_present(int token, int *maxindex);
extern int rtas_set_indicator(int indicator, int index, int new_value);
extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
extern void rtas_progress(char *s, unsigned short hex);
-extern void rtas_initialize(void);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
extern int rtas_online_cpus_mask(cpumask_var_t cpus);
@@ -460,9 +459,11 @@ static inline int page_is_rtas_user_buf(unsigned long pfn)
/* Not the best place to put pSeries_coalesce_init, will be fixed when we
* move some of the rtas suspend-me stuff to pseries */
extern void pSeries_coalesce_init(void);
+void rtas_initialize(void);
#else
static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
static inline void pSeries_coalesce_init(void) { }
+static inline void rtas_initialize(void) { };
#endif
extern int call_rtas(const char *, int, int, unsigned long *, ...);
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index abf5866e08c6..7dc006b58369 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -62,7 +62,7 @@ static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
#endif
}
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#ifdef PPC64_ELF_ABI_v1
#undef dereference_function_descriptor
static inline void *dereference_function_descriptor(void *ptr)
{
@@ -73,7 +73,7 @@ static inline void *dereference_function_descriptor(void *ptr)
ptr = p;
return ptr;
}
-#endif
+#endif /* PPC64_ELF_ABI_v1 */
#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index e9d384cbd021..654d64c9f3ac 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -26,6 +26,18 @@ void initmem_init(void);
void setup_panic(void);
#define ARCH_PANIC_TIMEOUT 180
+#ifdef CONFIG_PPC_PSERIES
+extern void pseries_enable_reloc_on_exc(void);
+extern void pseries_disable_reloc_on_exc(void);
+extern void pseries_big_endian_exceptions(void);
+extern void pseries_little_endian_exceptions(void);
+#else
+static inline void pseries_enable_reloc_on_exc(void) {}
+static inline void pseries_disable_reloc_on_exc(void) {}
+static inline void pseries_big_endian_exceptions(void) {}
+static inline void pseries_little_endian_exceptions(void) {}
+#endif /* CONFIG_PPC_PSERIES */
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index e1afd4c4f695..0d02c11dc331 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -160,9 +160,6 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
{
paca[cpu].hw_cpu_id = phys;
}
-
-extern void smp_release_cpus(void);
-
#else
/* 32-bit */
#ifndef CONFIG_SMP
@@ -179,6 +176,12 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
#endif /* !CONFIG_SMP */
#endif /* !CONFIG_PPC64 */
+#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC))
+extern void smp_release_cpus(void);
+#else
+static inline void smp_release_cpus(void) { };
+#endif
+
extern int smt_enabled_at_boot;
extern void smp_mpic_probe(void);
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index f280dd11243f..09f98e861869 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -185,7 +185,7 @@
* x = processor mask
* y = op. point index
* z = processor freq. step index
- * I haven't yet decyphered result codes
+ * I haven't yet deciphered result codes
*
*/
#define SMU_CMD_POWER_COMMAND 0xaa
@@ -471,13 +471,6 @@ extern int smu_get_rtc_time(struct rtc_time *time, int spinwait);
extern int smu_set_rtc_time(struct rtc_time *time, int spinwait);
/*
- * SMU command buffer absolute address, exported by pmac_setup,
- * this is allocated very early during boot.
- */
-extern unsigned long smu_cmdbuf_abs;
-
-
-/*
* Kernel asynchronous i2c interface
*/
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 523673d7583c..fa37fe93bc02 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -162,12 +162,38 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
lock->slock = 0;
}
-#ifdef CONFIG_PPC64
-extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
-#else
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-#endif
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lock_val;
+
+ smp_mb();
+
+ /*
+ * Atomically load and store back the lock value (unchanged). This
+ * ensures that our observation of the lock value is ordered with
+ * respect to other lock operations.
+ */
+ __asm__ __volatile__(
+"1: " PPC_LWARX(%0, 0, %2, 0) "\n"
+" stwcx. %0, 0, %2\n"
+" bne- 1b\n"
+ : "=&r" (lock_val), "+m" (*lock)
+ : "r" (lock)
+ : "cr0", "xer");
+
+ if (arch_spin_value_unlocked(lock_val))
+ goto out;
+
+ while (lock->slock) {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __spin_yield(lock);
+ }
+ HMT_medium();
+
+out:
+ smp_mb();
+}
/*
* Read-write spinlocks, allowing multiple readers
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index e40010abcaf1..da3cdffca440 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -3,12 +3,8 @@
#ifdef __KERNEL__
-#define __HAVE_ARCH_STRCPY
#define __HAVE_ARCH_STRNCPY
-#define __HAVE_ARCH_STRLEN
-#define __HAVE_ARCH_STRCMP
#define __HAVE_ARCH_STRNCMP
-#define __HAVE_ARCH_STRCAT
#define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY
#define __HAVE_ARCH_MEMMOVE
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index c50868681f9e..78efe8d5d775 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -13,7 +13,6 @@
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
-extern void do_final_fixups(void);
static inline void eieio(void)
{
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
index 743f36b38e5d..12e362935160 100644
--- a/arch/powerpc/include/asm/tce.h
+++ b/arch/powerpc/include/asm/tce.h
@@ -31,9 +31,6 @@
*/
#define TCE_VB 0
#define TCE_PCI 1
-#define TCE_PCI_SWINV_CREATE 2
-#define TCE_PCI_SWINV_FREE 4
-#define TCE_PCI_SWINV_PAIR 8
/* TCE page size is 4096 bytes (1 << 12) */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 8febc3f66d53..b21bb1f72314 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <linux/stringify.h>
+#include <asm/accounting.h>
/*
* low level task data.
@@ -46,6 +47,9 @@ struct thread_info {
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
#endif
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32)
+ struct cpu_accounting_data accounting;
+#endif
/* low level flags - has atomic operations done on it */
unsigned long flags ____cacheline_aligned_in_smp;
};
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 1092fdd7e737..09211640a0e0 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -146,7 +146,7 @@ static inline void set_tb(unsigned int upper, unsigned int lower)
* in auto-reload mode. The problem is PIT stops counting when it
* hits zero. If it would wrap, we could use it just like a decrementer.
*/
-static inline unsigned int get_dec(void)
+static inline u64 get_dec(void)
{
#if defined(CONFIG_40x)
return (mfspr(SPRN_PIT));
@@ -160,10 +160,10 @@ static inline unsigned int get_dec(void)
* in when the decrementer generates its interrupt: on the 1 to 0
* transition for Book E/4xx, but on the 0 to -1 transition for others.
*/
-static inline void set_dec(int val)
+static inline void set_dec(u64 val)
{
#if defined(CONFIG_40x)
- mtspr(SPRN_PIT, val);
+ mtspr(SPRN_PIT, (u32) val);
#else
#ifndef CONFIG_BOOKE
--val;
diff --git a/arch/powerpc/include/asm/tsi108.h b/arch/powerpc/include/asm/tsi108.h
index d531d9e173ef..c2a955bb0daa 100644
--- a/arch/powerpc/include/asm/tsi108.h
+++ b/arch/powerpc/include/asm/tsi108.h
@@ -77,7 +77,7 @@
* nodes if your board uses the Broadcom PHYs
*/
#define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */
-#define TSI108_PHY_BCM54XX 1 /* Broardcom BCM54xx PHY */
+#define TSI108_PHY_BCM54XX 1 /* Broadcom BCM54xx PHY */
/* Global variables */
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index bfb6ded38ffa..49a0678a53fa 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -15,6 +15,14 @@
#include <uapi/asm/types.h>
+#ifdef __powerpc64__
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define PPC64_ELF_ABI_v2
+#else
+#define PPC64_ELF_ABI_v1
+#endif
+#endif /* __powerpc64__ */
+
#ifndef __ASSEMBLY__
typedef __vector128 vector128;
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 04ef3ae511da..f5f729c11578 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -42,6 +42,12 @@ extern int icp_hv_init(void);
static inline int icp_hv_init(void) { return -ENODEV; }
#endif
+#ifdef CONFIG_PPC_POWERNV
+extern int icp_opal_init(void);
+#else
+static inline int icp_opal_init(void) { return -ENODEV; }
+#endif
+
/* ICP ops */
struct icp_ops {
unsigned int (*get_irq)(void);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 2da380fcc34c..fe4c075bcf50 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -42,12 +42,11 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
-obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
-obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
+obj-$(CONFIG_PPC_P7_NAP) += idle_book3s.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
@@ -87,7 +86,7 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-y += vmlinux.lds
-obj-$(CONFIG_RELOCATABLE_PPC32) += reloc_32.o
+obj-$(CONFIG_RELOCATABLE) += reloc_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 8e7cb8e2b21a..c7097f933114 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -228,9 +228,7 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#else
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
-#endif
-
-#ifdef __LITTLE_ENDIAN__
+#else
#define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
#endif
@@ -875,6 +873,20 @@ int fix_alignment(struct pt_regs *regs)
return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
}
#endif
+
+ /*
+ * ISA 3.0 (such as P9) copy, copy_first, paste and paste_last alignment
+ * check.
+ *
+ * Send a SIGBUS to the process that caused the fault.
+ *
+ * We do not emulate these because paste may contain additional metadata
+ * when pasting to a co-processor. Furthermore, paste_last is the
+ * synchronisation point for preceding copy/paste sequences.
+ */
+ if ((instruction & 0xfc0006fe) == PPC_INST_COPY)
+ return -EIO;
+
/* A size of 0 indicates an instruction we don't support, with
* the exception of DCBZ which is handled as a special case here
*/
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9ea09551a2cd..b89d14c0352c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -68,17 +68,18 @@
#include "../mm/mmu_decl.h"
#endif
+#ifdef CONFIG_PPC_8xx
+#include <asm/fixmap.h>
+#endif
+
int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64
- DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
- DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
- DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
@@ -132,17 +133,6 @@ int main(void)
DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- DEFINE(THREAD_TAR, offsetof(struct thread_struct, tar));
- DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
- DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
- DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
- DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
- DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
- DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
- DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
- DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
-#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
@@ -178,7 +168,6 @@ int main(void)
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
- DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
@@ -255,13 +244,28 @@ int main(void)
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_DSCR_DEFAULT, offsetof(struct paca_struct, dscr_default));
- DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
- DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user));
- DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
- DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
+ DEFINE(ACCOUNT_STARTTIME,
+ offsetof(struct paca_struct, accounting.starttime));
+ DEFINE(ACCOUNT_STARTTIME_USER,
+ offsetof(struct paca_struct, accounting.starttime_user));
+ DEFINE(ACCOUNT_USER_TIME,
+ offsetof(struct paca_struct, accounting.user_time));
+ DEFINE(ACCOUNT_SYSTEM_TIME,
+ offsetof(struct paca_struct, accounting.system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso));
+#else /* CONFIG_PPC64 */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ DEFINE(ACCOUNT_STARTTIME,
+ offsetof(struct thread_info, accounting.starttime));
+ DEFINE(ACCOUNT_STARTTIME_USER,
+ offsetof(struct thread_info, accounting.starttime_user));
+ DEFINE(ACCOUNT_USER_TIME,
+ offsetof(struct thread_info, accounting.user_time));
+ DEFINE(ACCOUNT_SYSTEM_TIME,
+ offsetof(struct thread_info, accounting.system_time));
+#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -275,12 +279,6 @@ int main(void)
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
-
- /* hcall statistics */
- DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats));
- DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls));
- DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total));
- DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total));
#endif /* CONFIG_PPC64 */
DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
@@ -298,23 +296,6 @@ int main(void)
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
#ifndef CONFIG_PPC64
DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
- DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
- DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
- DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
- DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
- DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
- DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
- DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
- DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
- DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
- DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
- DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
- DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
- DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
- DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
- DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
- DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
- DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
#endif /* CONFIG_PPC64 */
/*
* Note: these symbols include _ because they overlap with special
@@ -332,7 +313,6 @@ int main(void)
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
#ifndef CONFIG_PPC64
- DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
/*
* The PowerPC 400-class & Book-E processors have neither the DAR
* nor the DSISR SPRs. Hence, we overload them to hold the similar
@@ -369,8 +349,6 @@ int main(void)
DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
#endif
#endif
- DEFINE(CLONE_VM, CLONE_VM);
- DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
#ifndef CONFIG_PPC64
DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
@@ -380,7 +358,6 @@ int main(void)
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
- DEFINE(CPU_DOWN_FLUSH, offsetof(struct cpu_spec, cpu_down_flush));
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
@@ -395,7 +372,6 @@ int main(void)
DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp));
DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec));
DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs));
- DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec));
DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count));
DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
@@ -517,7 +493,6 @@ int main(void)
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
- DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
@@ -528,7 +503,6 @@ int main(void)
DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu));
#endif
#ifdef CONFIG_PPC_BOOK3S
- DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
@@ -566,7 +540,6 @@ int main(void)
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
- DEFINE(VCPU_SHADOW_FSCR, offsetof(struct kvm_vcpu, arch.shadow_fscr));
DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
@@ -576,7 +549,6 @@ int main(void)
DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
- DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
@@ -693,7 +665,6 @@ int main(void)
DEFINE(KVM_SPLIT_RPR, offsetof(struct kvm_split_mode, rpr));
DEFINE(KVM_SPLIT_PMMAR, offsetof(struct kvm_split_mode, pmmar));
DEFINE(KVM_SPLIT_LDBAR, offsetof(struct kvm_split_mode, ldbar));
- DEFINE(KVM_SPLIT_SIZE, offsetof(struct kvm_split_mode, subcore_size));
DEFINE(KVM_SPLIT_DO_NAP, offsetof(struct kvm_split_mode, do_nap));
DEFINE(KVM_SPLIT_NAPPED, offsetof(struct kvm_split_mode, napped));
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
@@ -756,7 +727,6 @@ int main(void)
#ifdef CONFIG_KVM_BOOKE_HV
DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
- DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
@@ -783,5 +753,9 @@ int main(void)
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
+#ifdef CONFIG_PPC_8xx
+ DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index f8cd9fba4d35..c5e5a94d9892 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -156,7 +156,7 @@ setup_7410_workarounds:
blr
/* 740/750/7400/7410
- * Enable Store Gathering (SGE), Address Brodcast (ABE),
+ * Enable Store Gathering (SGE), Address Broadcast (ABE),
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
* Clear Instruction cache throttling (ICTC)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 584e119fa8b0..52ff3f025437 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -51,6 +51,7 @@ _GLOBAL(__setup_cpu_power8)
mflr r11
bl __init_FSCR
bl __init_PMU
+ bl __init_PMU_ISA207
bl __init_hvmode_206
mtlr r11
beqlr
@@ -62,6 +63,7 @@ _GLOBAL(__setup_cpu_power8)
bl __init_HFSCR
bl __init_tlb_power8
bl __init_PMU_HV
+ bl __init_PMU_HV_ISA207
mtlr r11
blr
@@ -69,6 +71,7 @@ _GLOBAL(__restore_cpu_power8)
mflr r11
bl __init_FSCR
bl __init_PMU
+ bl __init_PMU_ISA207
mfmsr r3
rldicl. r0,r3,4,63
mtlr r11
@@ -81,12 +84,14 @@ _GLOBAL(__restore_cpu_power8)
bl __init_HFSCR
bl __init_tlb_power8
bl __init_PMU_HV
+ bl __init_PMU_HV_ISA207
mtlr r11
blr
_GLOBAL(__setup_cpu_power9)
mflr r11
bl __init_FSCR
+ bl __init_PMU
bl __init_hvmode_206
mtlr r11
beqlr
@@ -94,15 +99,18 @@ _GLOBAL(__setup_cpu_power9)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
+ ori r3, r3, LPCR_HVICE
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power9
+ bl __init_PMU_HV
mtlr r11
blr
_GLOBAL(__restore_cpu_power9)
mflr r11
bl __init_FSCR
+ bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
mtlr r11
@@ -111,9 +119,11 @@ _GLOBAL(__restore_cpu_power9)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
+ ori r3, r3, LPCR_HVICE
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power9
+ bl __init_PMU_HV
mtlr r11
blr
@@ -208,14 +218,22 @@ __init_tlb_power9:
__init_PMU_HV:
li r5,0
mtspr SPRN_MMCRC,r5
+ blr
+
+__init_PMU_HV_ISA207:
+ li r5,0
mtspr SPRN_MMCRH,r5
blr
__init_PMU:
li r5,0
- mtspr SPRN_MMCRS,r5
mtspr SPRN_MMCRA,r5
mtspr SPRN_MMCR0,r5
mtspr SPRN_MMCR1,r5
mtspr SPRN_MMCR2,r5
blr
+
+__init_PMU_ISA207:
+ li r5,0
+ mtspr SPRN_MMCRS,r5
+ blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index eeeacf6235a3..d81f826d1029 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -137,7 +137,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (gp)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
- .mmu_features = MMU_FTRS_POWER4,
+ .mmu_features = MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -152,7 +152,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4+ (gq)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
- .mmu_features = MMU_FTRS_POWER4,
+ .mmu_features = MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 2bb252c01f07..47b63de81f9b 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -48,8 +48,8 @@ int crashing_cpu = -1;
static int time_to_dump;
#define CRASH_HANDLER_MAX 3
-/* NULL terminated list of shutdown handles */
-static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
+/* List of shutdown handles */
+static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX];
static DEFINE_SPINLOCK(crash_handlers_lock);
static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
@@ -65,7 +65,7 @@ static int handle_fault(struct pt_regs *regs)
#ifdef CONFIG_SMP
static atomic_t cpus_in_crash;
-void crash_ipi_callback(struct pt_regs *regs)
+static void crash_ipi_callback(struct pt_regs *regs)
{
static cpumask_t cpus_state_saved = CPU_MASK_NONE;
@@ -288,9 +288,14 @@ int crash_shutdown_unregister(crash_shutdown_t handler)
rc = 1;
} else {
/* Shift handles down */
- for (; crash_shutdown_handles[i]; i++)
+ for (; i < (CRASH_HANDLER_MAX - 1); i++)
crash_shutdown_handles[i] =
crash_shutdown_handles[i+1];
+ /*
+ * Reset last entry to NULL now that it has been shifted down,
+ * this will allow new handles to be added here.
+ */
+ crash_shutdown_handles[i] = NULL;
rc = 0;
}
@@ -346,7 +351,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
old_handler = __debugger_fault_handler;
__debugger_fault_handler = handle_fault;
crash_shutdown_cpu = smp_processor_id();
- for (i = 0; crash_shutdown_handles[i]; i++) {
+ for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) {
if (setjmp(crash_shutdown_buf) == 0) {
/*
* Insert syncs and delay to ensure
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index ddbcfab7efdf..d4cc26618809 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -114,9 +114,9 @@ static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
- pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n",
+ pr_debug("PCI: %s addr range %d [%pap-%pap]: %s\n",
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
- piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
+ &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
cnt++;
n = rb_next(n);
}
@@ -159,8 +159,8 @@ eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
piar->flags = flags;
#ifdef DEBUG
- pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n",
- alo, ahi, pci_name(dev));
+ pr_debug("PIAR: insert range=[%pap:%pap] dev=%s\n",
+ &alo, &ahi, pci_name(dev));
#endif
rb_link_node(&piar->rb_node, parent, p);
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
index 7815095fe3d8..d6b2ca70d14d 100644
--- a/arch/powerpc/kernel/eeh_dev.c
+++ b/arch/powerpc/kernel/eeh_dev.c
@@ -44,14 +44,13 @@
/**
* eeh_dev_init - Create EEH device according to OF node
* @pdn: PCI device node
- * @data: PHB
*
* It will create EEH device according to the given OF node. The function
* might be called by PCI emunation, DR, PHB hotplug.
*/
-void *eeh_dev_init(struct pci_dn *pdn, void *data)
+struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
{
- struct pci_controller *phb = data;
+ struct pci_controller *phb = pdn->phb;
struct eeh_dev *edev;
/* Allocate EEH device */
@@ -69,7 +68,7 @@ void *eeh_dev_init(struct pci_dn *pdn, void *data)
INIT_LIST_HEAD(&edev->list);
INIT_LIST_HEAD(&edev->rmv_list);
- return NULL;
+ return edev;
}
/**
@@ -81,16 +80,8 @@ void *eeh_dev_init(struct pci_dn *pdn, void *data)
*/
void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
{
- struct pci_dn *root = phb->pci_data;
-
/* EEH PE for PHB */
eeh_phb_pe_create(phb);
-
- /* EEH device for PHB */
- eeh_dev_init(root, phb);
-
- /* EEH devices for children OF nodes */
- traverse_pci_dn(root, eeh_dev_init, phb);
}
/**
@@ -106,8 +97,6 @@ static int __init eeh_dev_phb_init(void)
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
eeh_dev_phb_init_dynamic(phb);
- pr_info("EEH: devices created\n");
-
return 0;
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 2714a3b81d24..5f36e8a70daa 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -139,7 +139,7 @@ static void eeh_enable_irq(struct pci_dev *dev)
* into it.
*
* That's just wrong.The warning in the core code is
- * there to tell people to fix their assymetries in
+ * there to tell people to fix their asymmetries in
* their own code, not by abusing the core information
* to avoid it.
*
@@ -642,13 +642,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
} else {
- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
}
} else if (frozen_bus) {
- eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
+ eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
}
/*
@@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
*/
edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
- if (pe->type & EEH_PE_VF)
+ if (pe->type & EEH_PE_VF) {
eeh_add_virt_device(edev, NULL);
- else
+ } else {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_hp_add_devices(bus);
+ }
} else if (frozen_bus && rmv_data->removed) {
pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
ssleep(5);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 2405631e91a2..9899032230b4 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -175,6 +175,12 @@ transfer_to_handler:
addi r12,r12,-1
stw r12,4(r11)
#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ CURRENT_THREAD_INFO(r9, r1)
+ tophys(r9, r9)
+ ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
+#endif
+
b 3f
2: /* if from kernel, check interrupted DOZE/NAP mode and
@@ -398,6 +404,13 @@ BEGIN_FTR_SECTION
lwarx r7,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ andi. r4,r8,MSR_PR
+ beq 3f
+ CURRENT_THREAD_INFO(r4, r1)
+ ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
+3:
+#endif
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
@@ -769,6 +782,10 @@ restore_user:
andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ CURRENT_THREAD_INFO(r9, r1)
+ ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
+#endif
b restore
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 73e461a3dfbb..fcb2887f5a33 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -72,7 +72,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
std r0,GPR0(r1)
std r10,GPR1(r1)
beq 2f /* if from kernel mode */
- ACCOUNT_CPU_USER_ENTRY(r10, r11)
+ ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
2: std r2,GPR2(r1)
std r3,GPR3(r1)
mfcr r2
@@ -246,7 +246,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
ld r4,_LINK(r1)
beq- 1f
- ACCOUNT_CPU_USER_EXIT(r11, r12)
+ ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
BEGIN_FTR_SECTION
HMT_MEDIUM_LOW
@@ -453,7 +453,7 @@ _GLOBAL(ret_from_kernel_thread)
REST_NVGPRS(r1)
mtlr r14
mr r3,r15
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
mr r12,r14
#endif
blrl
@@ -859,7 +859,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
mtspr SPRN_PPR,r2 /* Restore PPR */
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ACCOUNT_CPU_USER_EXIT(r2, r4)
+ ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
REST_GPR(13, r1)
1:
mtspr SPRN_SRR1,r3
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 488e6314f993..38a1f96430e1 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -386,7 +386,7 @@ exc_##n##_common: \
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
beq 2f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \
+ ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \
2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
ld r4,excf+EX_R11(r13); /* get back r11 */ \
mfspr r5,scratch; /* get back r13 */ \
@@ -453,7 +453,7 @@ exc_##n##_bad_stack: \
sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \
b bad_stack_book3e; /* bad stack error */
-/* WARNING: If you change the layout of this stub, make sure you chcek
+/* WARNING: If you change the layout of this stub, make sure you check
* the debug exception handler which handles single stepping
* into exceptions from userspace, and the MM code in
* arch/powerpc/mm/tlb_nohash.c which patches the branch here
@@ -1059,7 +1059,7 @@ fast_exception_return:
andi. r6,r10,MSR_PR
REST_2GPRS(6, r1)
beq 1f
- ACCOUNT_CPU_USER_EXIT(r10, r11)
+ ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
ld r0,GPR13(r1)
1: stdcx. r0,0,r1 /* to clear the reservation */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4c9440629128..6200e4925d26 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -107,25 +107,9 @@ BEGIN_FTR_SECTION
beq 9f
cmpwi cr3,r13,2
-
- /*
- * Check if last bit of HSPGR0 is set. This indicates whether we are
- * waking up from winkle.
- */
GET_PACA(r13)
- clrldi r5,r13,63
- clrrdi r13,r13,1
- cmpwi cr4,r5,1
- mtspr SPRN_HSPRG0,r13
-
- lbz r0,PACA_THREAD_IDLE_STATE(r13)
- cmpwi cr2,r0,PNV_THREAD_NAP
- bgt cr2,8f /* Either sleep or Winkle */
-
- /* Waking up from nap should not cause hypervisor state loss */
- bgt cr3,.
+ bl pnv_restore_hyp_resource
- /* Waking up from nap */
li r0,PNV_THREAD_RUNNING
stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
@@ -143,13 +127,9 @@ BEGIN_FTR_SECTION
/* Return SRR1 from power7_nap() */
mfspr r3,SPRN_SRR1
- beq cr3,2f
- b power7_wakeup_noloss
-2: b power7_wakeup_loss
-
- /* Fast Sleep wakeup on PowerNV */
-8: GET_PACA(r13)
- b power7_wakeup_tb_loss
+ blt cr3,2f
+ b pnv_wakeup_loss
+2: b pnv_wakeup_noloss
9:
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
@@ -351,6 +331,12 @@ hv_doorbell_trampoline:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b h_doorbell_hv
+ . = 0xea0
+hv_virt_irq_trampoline:
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b h_virt_irq_hv
+
/* We need to deal with the Altivec unavailable exception
* here which is at 0xf20, thus in the middle of the
* prolog code of the PerformanceMonitor one. A little
@@ -601,6 +587,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
+ MASKABLE_EXCEPTION_HV_OOL(0xea2, h_virt_irq)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xea2)
+
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
@@ -680,6 +669,8 @@ _GLOBAL(__replay_interrupt)
BEGIN_FTR_SECTION
cmpwi r3,0xe80
beq h_doorbell_common
+ cmpwi r3,0xea0
+ beq h_virt_irq_common
FTR_SECTION_ELSE
cmpwi r3,0xa00
beq doorbell_super_common
@@ -754,6 +745,7 @@ kvmppc_skip_Hinterrupt:
#else
STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
#endif
+ STD_EXCEPTION_COMMON_ASYNC(0xea0, h_virt_irq, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
@@ -762,11 +754,6 @@ kvmppc_skip_Hinterrupt:
#else
STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
#endif
-#ifdef CONFIG_CBE_RAS
- STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
- STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
- STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
-#endif /* CONFIG_CBE_RAS */
/*
* Relocation-on interrupts: A subset of the interrupts can be delivered
@@ -877,6 +864,12 @@ h_doorbell_relon_trampoline:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b h_doorbell_relon_hv
+ . = 0x4ea0
+h_virt_irq_relon_trampoline:
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b h_virt_irq_relon_hv
+
. = 0x4f00
performance_monitor_relon_pseries_trampoline:
SET_SCRATCH0(r13)
@@ -1131,12 +1124,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
bl vsx_unavailable_exception
b ret_from_except
- STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
- STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
-
/* Equivalents to the above handlers for relocation-on interrupt vectors */
STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
+ MASKABLE_RELON_EXCEPTION_HV_OOL(0xea0, h_virt_irq)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
@@ -1170,6 +1161,15 @@ fwnmi_data_area:
. = 0x8000
#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
+ STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
+ STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
+
+#ifdef CONFIG_CBE_RAS
+ STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
+ STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
+ STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
+#endif /* CONFIG_CBE_RAS */
+
.globl hmi_exception_early
hmi_exception_early:
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0xe60)
@@ -1289,7 +1289,7 @@ machine_check_handle_early:
GET_PACA(r13)
ld r1,PACAR1(r13)
li r3,PNV_THREAD_NAP
- b power7_enter_nap_mode
+ b pnv_enter_arch207_idle_mode
4:
#endif
/*
@@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
-BEGIN_MMU_FTR_SECTION
- b 2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+BEGIN_MMU_FTR_SECTION
beq- 2f
+FTR_SECTION_ELSE
+ b 2f
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
.machine push
.machine "power4"
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 3cb3b02a13dd..b3a663333d36 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1009,8 +1009,7 @@ static int fadump_invalidate_dump(struct fadump_mem_struct *fdm)
} while (wait_time);
if (rc) {
- printk(KERN_ERR "Failed to invalidate firmware-assisted dump "
- "rgistration. unexpected error(%d).\n", rc);
+ pr_err("Failed to invalidate firmware-assisted dump registration. Unexpected error (%d).\n", rc);
return rc;
}
fw_dump.dump_active = 0;
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 1123a4d8d8dd..cc52d9795f88 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -144,6 +144,21 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
+#ifdef CC_USING_MPROFILE_KERNEL
+ /* When using -mkernel_profile there is no load to jump over */
+ pop = PPC_INST_NOP;
+
+ if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
+ pr_err("Fetching instruction at %lx failed.\n", ip - 4);
+ return -EFAULT;
+ }
+
+ /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
+ if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
+ pr_err("Unexpected instruction %08x around bl _mcount\n", op);
+ return -EINVAL;
+ }
+#else
/*
* Our original call site looks like:
*
@@ -170,24 +185,10 @@ __ftrace_make_nop(struct module *mod,
}
if (op != PPC_INST_LD_TOC) {
- unsigned int inst;
-
- if (probe_kernel_read(&inst, (void *)(ip - 4), 4)) {
- pr_err("Fetching instruction at %lx failed.\n", ip - 4);
- return -EFAULT;
- }
-
- /* We expect either a mlfr r0, or a std r0, LRSAVE(r1) */
- if (inst != PPC_INST_MFLR && inst != PPC_INST_STD_LR) {
- pr_err("Unexpected instructions around bl _mcount\n"
- "when enabling dynamic ftrace!\t"
- "(%08x,bl,%08x)\n", inst, op);
- return -EINVAL;
- }
-
- /* When using -mkernel_profile there is no load to jump over */
- pop = PPC_INST_NOP;
+ pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
+ return -EINVAL;
}
+#endif /* CC_USING_MPROFILE_KERNEL */
if (patch_instruction((unsigned int *)ip, pop)) {
pr_err("Patching NOP failed.\n");
@@ -608,7 +609,7 @@ unsigned long __init arch_syscall_addr(int nr)
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
-#if defined(CONFIG_PPC64) && (!defined(_CALL_ELF) || _CALL_ELF != 2)
+#ifdef PPC64_ELF_ABI_v1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
if (str[0] == '.' && search[0] != '.')
@@ -616,4 +617,4 @@ char *arch_ftrace_match_adjust(char *str, const char *search)
else
return str;
}
-#endif /* defined(CONFIG_PPC64) && (!defined(_CALL_ELF) || _CALL_ELF != 2) */
+#endif /* PPC64_ELF_ABI_v1 */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 2d14774af6b4..f765b0434731 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -401,7 +401,7 @@ generic_secondary_common_init:
ld r12,CPU_SPEC_RESTORE(r23)
cmpdi 0,r12,0
beq 3f
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#ifdef PPC64_ELF_ABI_v1
ld r12,0(r12)
#endif
mtctr r12
@@ -941,7 +941,7 @@ start_here_multiplatform:
mtspr SPRN_SRR1,r4
RFI
b . /* prevent speculative execution */
-
+
/* This is where all platforms converge execution */
start_here_common:
@@ -951,9 +951,6 @@ start_here_common:
/* Load the TOC (virtual address) */
ld r2,PACATOC(r13)
- /* Do more system initializations in virtual mode */
- bl setup_system
-
/* Mark interrupts soft and hard disabled (they might be enabled
* in the PACA when doing hotplug)
*/
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 80c69472314e..43ddaae42baf 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -30,6 +30,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/fixmap.h>
/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
@@ -383,28 +384,57 @@ InstructionTLBMiss:
EXCEPTION_EPILOG_0
rfi
+/*
+ * Bottom part of DataStoreTLBMiss handler for IMMR area
+ * not enough space in the DataStoreTLBMiss area
+ */
+DTLBMissIMMR:
+ mtcr r10
+ /* Set 512k byte guarded page and mark it valid */
+ li r10, MD_PS512K | MD_GUARDED | MD_SVALID
+ MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+ mfspr r10, SPRN_IMMR /* Get current IMMR */
+ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
+ _PAGE_PRESENT | _PAGE_NO_CACHE
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
+
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ EXCEPTION_EPILOG_0
+ rfi
+
. = 0x1200
DataStoreTLBMiss:
- mtspr SPRN_SPRG_SCRATCH2, r3
EXCEPTION_PROLOG_0
- mfcr r3
+ mfcr r10
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
- mfspr r10, SPRN_MD_EPN
- IS_KERNEL(r11, r10)
+ mfspr r11, SPRN_MD_EPN
+ rlwinm r11, r11, 16, 0xfff8
+#ifndef CONFIG_PIN_TLB_IMMR
+ cmpli cr0, r11, VIRT_IMMR_BASE@h
+#endif
+ cmpli cr7, r11, PAGE_OFFSET@h
+#ifndef CONFIG_PIN_TLB_IMMR
+_ENTRY(DTLBMiss_jmp)
+ beq- DTLBMissIMMR
+#endif
+ bge- cr7, 4f
+
mfspr r11, SPRN_M_TW /* Get level 1 table */
- BRANCH_UNLESS_KERNEL(3f)
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
+ mtcr r10
+#ifdef CONFIG_8xx_CPU6
+ mtspr SPRN_SPRG_SCRATCH2, r3
+#endif
+ mfspr r10, SPRN_MD_EPN
/* Insert level 1 index */
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
- mtcr r11
- bt- 28,DTLBMiss8M /* bit 28 = Large page (8M) */
- mtcr r3
/* We have a pte table, so load fetch the pte from the table.
*/
@@ -452,29 +482,30 @@ DataStoreTLBMiss:
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
+#ifdef CONFIG_8xx_CPU6
mfspr r3, SPRN_SPRG_SCRATCH2
+#endif
mtspr SPRN_DAR, r11 /* Tag DAR */
EXCEPTION_EPILOG_0
rfi
-DTLBMiss8M:
- mtcr r3
- ori r11, r11, MD_SVALID
- MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
-#ifdef CONFIG_PPC_16K_PAGES
- /*
- * In 16k pages mode, each PGD entry defines a 64M block.
- * Here we select the 8M page within the block.
- */
- rlwimi r11, r10, 0, 0x03800000
-#endif
- rlwinm r10, r11, 0, 0xff800000
+4:
+_ENTRY(DTLBMiss_cmp)
+ cmpli cr0, r11, (PAGE_OFFSET + 0x1800000)@h
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
+ bge- 3b
+
+ mtcr r10
+ /* Set 8M byte page and mark it valid */
+ li r10, MD_PS8MEG | MD_SVALID
+ MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+ mfspr r10, SPRN_MD_EPN
+ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
_PAGE_PRESENT
- MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
li r11, RPN_PATTERN
- mfspr r3, SPRN_SPRG_SCRATCH2
mtspr SPRN_DAR, r11 /* Tag DAR */
EXCEPTION_EPILOG_0
rfi
@@ -553,12 +584,14 @@ FixupDAR:/* Entry point for dcbx workaround. */
IS_KERNEL(r11, r10)
mfspr r11, SPRN_M_TW /* Get level 1 table */
BRANCH_UNLESS_KERNEL(3f)
+ rlwinm r11, r10, 16, 0xfff8
+_ENTRY(FixupDAR_cmp)
+ cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+ blt- cr7, 200f
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
/* Insert level 1 index */
3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
- mtcr r11
- bt 28,200f /* bit 28 = Large page (8M) */
rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
/* Insert level 2 index */
rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
@@ -584,8 +617,8 @@ FixupDAR:/* Entry point for dcbx workaround. */
141: mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Nope, go back to normal TLB processing */
- /* concat physical page address(r11) and page offset(r10) */
-200: rlwimi r11, r10, 0, 32 - (PAGE_SHIFT << 1), 31
+ /* create physical page address from effective address */
+200: tophys(r11, r10)
b 201b
144: mfspr r10, SPRN_DSISR
@@ -763,10 +796,18 @@ start_here:
* virtual to physical. Also, set the cache mode since that is defined
* by TLB entries and perform any additional mapping (like of the IMMR).
* If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
- * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
+ * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by
* these mappings is mapped by page tables.
*/
initial_mmu:
+ li r8, 0
+ mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
+ lis r10, MD_RESETVAL@h
+#ifndef CONFIG_8xx_COPYBACK
+ oris r10, r10, MD_WTDEF@h
+#endif
+ mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */
+
tlbia /* Invalidate all TLB entries */
/* Always pin the first 8 MB ITLB to prevent ITLB
misses while mucking around with SRR0/SRR1 in asm
@@ -777,34 +818,20 @@ initial_mmu:
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
#ifdef CONFIG_PIN_TLB
- lis r10, (MD_RSV4I | MD_RESETVAL)@h
- ori r10, r10, 0x1c00
- mr r8, r10
-#else
- lis r10, MD_RESETVAL@h
-#endif
-#ifndef CONFIG_8xx_COPYBACK
- oris r10, r10, MD_WTDEF@h
-#endif
+ oris r10, r10, MD_RSV4I@h
mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
+#endif
- /* Now map the lower 8 Meg into the TLBs. For this quick hack,
- * we can load the instruction and data TLB registers with the
- * same values.
- */
+ /* Now map the lower 8 Meg into the ITLB. */
lis r8, KERNELBASE@h /* Create vaddr for TLB */
ori r8, r8, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r8
- mtspr SPRN_MD_EPN, r8
li r8, MI_PS8MEG | (2 << 5) /* Set 8M byte page, APG 2 */
ori r8, r8, MI_SVALID /* Make it valid */
mtspr SPRN_MI_TWC, r8
- li r8, MI_PS8MEG /* Set 8M byte page, APG 0 */
- ori r8, r8, MI_SVALID /* Make it valid */
- mtspr SPRN_MD_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */
mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
- mtspr SPRN_MD_RPN, r8
+
lis r8, MI_APG_INIT@h /* Set protection modes */
ori r8, r8, MI_APG_INIT@l
mtspr SPRN_MI_AP, r8
@@ -812,51 +839,25 @@ initial_mmu:
ori r8, r8, MD_APG_INIT@l
mtspr SPRN_MD_AP, r8
- /* Map another 8 MByte at the IMMR to get the processor
+ /* Map a 512k page for the IMMR to get the processor
* internal registers (among other things).
*/
-#ifdef CONFIG_PIN_TLB
- addi r10, r10, 0x0100
+#ifdef CONFIG_PIN_TLB_IMMR
+ ori r10, r10, 0x1c00
mtspr SPRN_MD_CTR, r10
-#endif
+
mfspr r9, 638 /* Get current IMMR */
- andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
+ andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */
- mr r8, r9 /* Create vaddr for TLB */
+ lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */
ori r8, r8, MD_EVALID /* Mark it valid */
mtspr SPRN_MD_EPN, r8
- li r8, MD_PS8MEG /* Set 8M byte page */
+ li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
ori r8, r8, MD_SVALID /* Make it valid */
mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
mtspr SPRN_MD_RPN, r8
-
-#ifdef CONFIG_PIN_TLB
- /* Map two more 8M kernel data pages.
- */
- addi r10, r10, 0x0100
- mtspr SPRN_MD_CTR, r10
-
- lis r8, KERNELBASE@h /* Create vaddr for TLB */
- addis r8, r8, 0x0080 /* Add 8M */
- ori r8, r8, MI_EVALID /* Mark it valid */
- mtspr SPRN_MD_EPN, r8
- li r9, MI_PS8MEG /* Set 8M byte page */
- ori r9, r9, MI_SVALID /* Make it valid */
- mtspr SPRN_MD_TWC, r9
- li r11, MI_BOOTINIT /* Create RPN for address 0 */
- addis r11, r11, 0x0080 /* Add 8M */
- mtspr SPRN_MD_RPN, r11
-
- addi r10, r10, 0x0100
- mtspr SPRN_MD_CTR, r10
-
- addis r8, r8, 0x0080 /* Add 8M */
- mtspr SPRN_MD_EPN, r8
- mtspr SPRN_MD_TWC, r9
- addis r11, r11, 0x0080 /* Add 8M */
- mtspr SPRN_MD_RPN, r11
#endif
/* Since the cache is enabled according to the information we
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_book3s.S
index 470ceebd2d23..335eb6cedae5 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -1,5 +1,6 @@
/*
- * This file contains the power_save function for Power7 CPUs.
+ * This file contains idle entry/exit functions for POWER7,
+ * POWER8 and POWER9 CPUs.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -20,6 +21,7 @@
#include <asm/opal.h>
#include <asm/cpuidle.h>
#include <asm/book3s/64/mmu-hash.h>
+#include <asm/mmu.h>
#undef DEBUG
@@ -36,6 +38,11 @@
#define _AMOR GPR9
#define _WORT GPR10
#define _WORC GPR11
+#define _PTCR GPR12
+
+#define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \
+ PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
+ PSSCR_MTL_MASK
/* Idle state entry routines */
@@ -52,6 +59,45 @@
.text
/*
+ * Used by threads before entering deep idle states. Saves SPRs
+ * in interrupt stack frame
+ */
+save_sprs_to_stack:
+ /*
+ * Note all register i.e per-core, per-subcore or per-thread is saved
+ * here since any thread in the core might wake up first
+ */
+BEGIN_FTR_SECTION
+ mfspr r3,SPRN_PTCR
+ std r3,_PTCR(r1)
+ /*
+ * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
+ * SDR1 here
+ */
+FTR_SECTION_ELSE
+ mfspr r3,SPRN_SDR1
+ std r3,_SDR1(r1)
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
+ mfspr r3,SPRN_RPR
+ std r3,_RPR(r1)
+ mfspr r3,SPRN_SPURR
+ std r3,_SPURR(r1)
+ mfspr r3,SPRN_PURR
+ std r3,_PURR(r1)
+ mfspr r3,SPRN_TSCR
+ std r3,_TSCR(r1)
+ mfspr r3,SPRN_DSCR
+ std r3,_DSCR(r1)
+ mfspr r3,SPRN_AMOR
+ std r3,_AMOR(r1)
+ mfspr r3,SPRN_WORT
+ std r3,_WORT(r1)
+ mfspr r3,SPRN_WORC
+ std r3,_WORC(r1)
+
+ blr
+
+/*
* Used by threads when the lock bit of core_idle_state is set.
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
@@ -69,13 +115,16 @@ core_idle_lock_held:
/*
* Pass requested state in r3:
- * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
+ * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
+ * - Requested STOP state in POWER9
*
* To check IRQ_HAPPENED in r4
* 0 - don't check
* 1 - check
+ *
+ * Address to 'rfid' to in r5
*/
-_GLOBAL(power7_powersave_common)
+_GLOBAL(pnv_powersave_common)
/* Use r3 to pass state nap/sleep/winkle */
/* NAP is a state loss, we create a regs frame on the
* stack, fill it up with the state we care about and
@@ -126,28 +175,28 @@ _GLOBAL(power7_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
+
/*
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
* because as soon as we do that, another thread can switch
* the MMU context to the guest.
*/
- LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
+ LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
li r6, MSR_RI
andc r6, r9, r6
- LOAD_REG_ADDR(r7, power7_enter_nap_mode)
mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
- mtspr SPRN_SRR0, r7
- mtspr SPRN_SRR1, r5
+ mtspr SPRN_SRR0, r5
+ mtspr SPRN_SRR1, r7
rfid
- .globl power7_enter_nap_mode
-power7_enter_nap_mode:
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're napping */
- li r4,KVM_HWTHREAD_IN_NAP
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
+ .globl pnv_enter_arch207_idle_mode
+pnv_enter_arch207_idle_mode:
stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f
@@ -196,8 +245,7 @@ fastsleep_workaround_at_entry:
/* Fast sleep workaround */
li r3,1
li r4,1
- li r0,OPAL_CONFIG_CPU_IDLE_STATE
- bl opal_call_realmode
+ bl opal_rm_config_cpu_idle_state
/* Clear Lock bit */
li r0,0
@@ -206,30 +254,45 @@ fastsleep_workaround_at_entry:
b common_enter
enter_winkle:
- /*
- * Note all register i.e per-core, per-subcore or per-thread is saved
- * here since any thread in the core might wake up first
- */
- mfspr r3,SPRN_SDR1
- std r3,_SDR1(r1)
- mfspr r3,SPRN_RPR
- std r3,_RPR(r1)
- mfspr r3,SPRN_SPURR
- std r3,_SPURR(r1)
- mfspr r3,SPRN_PURR
- std r3,_PURR(r1)
- mfspr r3,SPRN_TSCR
- std r3,_TSCR(r1)
- mfspr r3,SPRN_DSCR
- std r3,_DSCR(r1)
- mfspr r3,SPRN_AMOR
- std r3,_AMOR(r1)
- mfspr r3,SPRN_WORT
- std r3,_WORT(r1)
- mfspr r3,SPRN_WORC
- std r3,_WORC(r1)
+ bl save_sprs_to_stack
+
IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
+/*
+ * r3 - requested stop state
+ */
+power_enter_stop:
+/*
+ * Check if the requested state is a deep idle state.
+ */
+ LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+ ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
+ cmpd r3,r4
+ bge 2f
+ IDLE_STATE_ENTER_SEQ(PPC_STOP)
+2:
+/*
+ * Entering deep idle state.
+ * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
+ * stack and enter stop
+ */
+ lbz r7,PACA_THREAD_MASK(r13)
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
+
+lwarx_loop_stop:
+ lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bnel core_idle_lock_held
+ andc r15,r15,r7 /* Clear thread bit */
+
+ stwcx. r15,0,r14
+ bne- lwarx_loop_stop
+ isync
+
+ bl save_sprs_to_stack
+
+ IDLE_STATE_ENTER_SEQ(PPC_STOP)
+
_GLOBAL(power7_idle)
/* Now check if user or arch enabled NAP mode */
LOAD_REG_ADDRBASE(r3,powersave_nap)
@@ -242,19 +305,22 @@ _GLOBAL(power7_idle)
_GLOBAL(power7_nap)
mr r4,r3
li r3,PNV_THREAD_NAP
- b power7_powersave_common
+ LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
+ b pnv_powersave_common
/* No return */
_GLOBAL(power7_sleep)
li r3,PNV_THREAD_SLEEP
li r4,1
- b power7_powersave_common
+ LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
+ b pnv_powersave_common
/* No return */
_GLOBAL(power7_winkle)
- li r3,3
+ li r3,PNV_THREAD_WINKLE
li r4,1
- b power7_powersave_common
+ LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
+ b pnv_powersave_common
/* No return */
#define CHECK_HMI_INTERRUPT \
@@ -270,25 +336,104 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
ld r2,PACATOC(r13); \
ld r1,PACAR1(r13); \
std r3,ORIG_GPR3(r1); /* Save original r3 */ \
- li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
- bl opal_call_realmode; \
+ bl opal_rm_handle_hmi; \
ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
20: nop;
-_GLOBAL(power7_wakeup_tb_loss)
+/*
+ * r3 - requested stop state
+ */
+_GLOBAL(power9_idle_stop)
+ LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE)
+ or r4,r4,r3
+ mtspr SPRN_PSSCR, r4
+ li r4, 1
+ LOAD_REG_ADDR(r5,power_enter_stop)
+ b pnv_powersave_common
+ /* No return */
+/*
+ * Called from reset vector. Check whether we have woken up with
+ * hypervisor state loss. If yes, restore hypervisor state and return
+ * back to reset vector.
+ *
+ * r13 - Contents of HSPRG0
+ * cr3 - set to gt if waking up with partial/complete hypervisor state loss
+ */
+_GLOBAL(pnv_restore_hyp_resource)
ld r2,PACATOC(r13);
+BEGIN_FTR_SECTION
+ /*
+ * POWER ISA 3. Use PSSCR to determine if we
+ * are waking up from deep idle state
+ */
+ LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+ ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
+
+ mfspr r5,SPRN_PSSCR
+ /*
+ * 0-3 bits correspond to Power-Saving Level Status
+ * which indicates the idle state we are waking up from
+ */
+ rldicl r5,r5,4,60
+ cmpd cr4,r5,r4
+ bge cr4,pnv_wakeup_tb_loss
+ /*
+ * Waking up without hypervisor state loss. Return to
+ * reset vector
+ */
+ blr
+
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
+ /*
+ * POWER ISA 2.07 or less.
+ * Check if last bit of HSPGR0 is set. This indicates whether we are
+ * waking up from winkle.
+ */
+ clrldi r5,r13,63
+ clrrdi r13,r13,1
+ cmpwi cr4,r5,1
+ mtspr SPRN_HSPRG0,r13
+
+ lbz r0,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi cr2,r0,PNV_THREAD_NAP
+ bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
+
+ /*
+ * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
+ * up from nap. At this stage CR3 shouldn't contains 'gt' since that
+ * indicates we are waking with hypervisor state loss from nap.
+ */
+ bgt cr3,.
+
+ blr /* Return back to System Reset vector from where
+ pnv_restore_hyp_resource was invoked */
+
+/*
+ * Called if waking up from idle state which can cause either partial or
+ * complete hyp state loss.
+ * In POWER8, called if waking up from fastsleep or winkle
+ * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
+ *
+ * r13 - PACA
+ * cr3 - gt if waking up with partial/complete hypervisor state loss
+ * cr4 - eq if waking up from complete hypervisor state loss.
+ */
+_GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13)
/*
* Before entering any idle state, the NVGPRs are saved in the stack
* and they are restored before switching to the process context. Hence
* until they are restored, they are free to be used.
*
- * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
- * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
- * wakeup reason if we branch to kvm_start_guest.
+ * Save SRR1 and LR in NVGPRs as they might be clobbered in
+ * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
+ * to determine the wakeup reason if we branch to kvm_start_guest. LR
+ * is required to return back to reset vector after hypervisor state
+ * restore is complete.
*/
-
+ mflr r17
mfspr r16,SPRN_SRR1
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
@@ -310,35 +455,35 @@ lwarx_loop2:
bnel core_idle_lock_held
cmpwi cr2,r15,0
- lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
- and r4,r4,r15
- cmpwi cr1,r4,0 /* Check if first in subcore */
/*
* At this stage
- * cr1 - 0b0100 if first thread to wakeup in subcore
- * cr2 - 0b0100 if first thread to wakeup in core
- * cr3- 0b0010 if waking up from sleep or winkle
- * cr4 - 0b0100 if waking up from winkle
+ * cr2 - eq if first thread to wakeup in core
+ * cr3- gt if waking up with partial/complete hypervisor state loss
+ * cr4 - eq if waking up from complete hypervisor state loss.
*/
- or r15,r15,r7 /* Set thread bit */
-
- beq cr1,first_thread_in_subcore
-
- /* Not first thread in subcore to wake up */
- stwcx. r15,0,r14
- bne- lwarx_loop2
- isync
- b common_exit
-
-first_thread_in_subcore:
- /* First thread in subcore to wakeup */
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
stwcx. r15,0,r14
bne- lwarx_loop2
isync
+BEGIN_FTR_SECTION
+ lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
+ and r4,r4,r15
+ cmpwi r4,0 /* Check if first in subcore */
+
+ or r15,r15,r7 /* Set thread bit */
+ beq first_thread_in_subcore
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+
+ or r15,r15,r7 /* Set thread bit */
+ beq cr2,first_thread_in_core
+
+ /* Not first thread in core or subcore to wake up */
+ b clear_lock
+
+first_thread_in_subcore:
/*
* If waking up from sleep, subcore state is not lost. Hence
* skip subcore state restore
@@ -348,6 +493,7 @@ first_thread_in_subcore:
/* Restore per-subcore state */
ld r4,_SDR1(r1)
mtspr SPRN_SDR1,r4
+
ld r4,_RPR(r1)
mtspr SPRN_RPR,r4
ld r4,_AMOR(r1)
@@ -363,32 +509,44 @@ subcore_state_restored:
first_thread_in_core:
/*
- * First thread in the core waking up from fastsleep. It needs to
+ * First thread in the core waking up from any state which can cause
+ * partial or complete hypervisor state loss. It needs to
* call the fastsleep workaround code if the platform requires it.
* Call it unconditionally here. The below branch instruction will
- * be patched out when the idle states are discovered if platform
- * does not require workaround.
+ * be patched out if the platform does not have fastsleep or does not
+ * require the workaround. Patching will be performed during the
+ * discovery of idle-states.
*/
.global pnv_fastsleep_workaround_at_exit
pnv_fastsleep_workaround_at_exit:
b fastsleep_workaround_at_exit
timebase_resync:
- /* Do timebase resync if we are waking up from sleep. Use cr3 value
- * set in exceptions-64s.S */
+ /*
+ * Use cr3 which indicates that we are waking up with atleast partial
+ * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
+ */
ble cr3,clear_lock
/* Time base re-sync */
- li r0,OPAL_RESYNC_TIMEBASE
- bl opal_call_realmode;
- /* TODO: Check r3 for failure */
-
+ bl opal_rm_resync_timebase;
/*
* If waking up from sleep, per core state is not lost, skip to
* clear_lock.
*/
bne cr4,clear_lock
- /* Restore per core state */
+ /*
+ * First thread in the core to wake up and its waking up with
+ * complete hypervisor state loss. Restore per core hypervisor
+ * state.
+ */
+BEGIN_FTR_SECTION
+ ld r4,_PTCR(r1)
+ mtspr SPRN_PTCR,r4
+ ld r4,_RPR(r1)
+ mtspr SPRN_RPR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
ld r4,_TSCR(r1)
mtspr SPRN_TSCR,r4
ld r4,_WORC(r1)
@@ -410,9 +568,9 @@ common_exit:
/* Waking up from winkle */
- /* Restore per thread state */
- bl __restore_cpu_power8
-
+BEGIN_MMU_FTR_SECTION
+ b no_segments
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
/* Restore SLB from PACA */
ld r8,PACA_SLBSHADOWPTR(r13)
@@ -426,6 +584,9 @@ common_exit:
slbmte r6,r5
1: addi r8,r8,16
.endr
+no_segments:
+
+ /* Restore per thread state */
ld r4,_SPURR(r1)
mtspr SPRN_SPURR,r4
@@ -436,48 +597,34 @@ common_exit:
ld r4,_WORT(r1)
mtspr SPRN_WORT,r4
-hypervisor_state_restored:
+ /* Call cur_cpu_spec->cpu_restore() */
+ LOAD_REG_ADDR(r4, cur_cpu_spec)
+ ld r4,0(r4)
+ ld r12,CPU_SPEC_RESTORE(r4)
+#ifdef PPC64_ELF_ABI_v1
+ ld r12,0(r12)
+#endif
+ mtctr r12
+ bctrl
- li r5,PNV_THREAD_RUNNING
- stb r5,PACA_THREAD_IDLE_STATE(r13)
+hypervisor_state_restored:
mtspr SPRN_SRR1,r16
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- li r0,KVM_HWTHREAD_IN_KERNEL
- stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* Order setting hwthread_state vs. testing hwthread_req */
- sync
- lbz r0,HSTATE_HWTHREAD_REQ(r13)
- cmpwi r0,0
- beq 6f
- b kvm_start_guest
-6:
-#endif
-
- REST_NVGPRS(r1)
- REST_GPR(2, r1)
- ld r3,_CCR(r1)
- ld r4,_MSR(r1)
- ld r5,_NIP(r1)
- addi r1,r1,INT_FRAME_SIZE
- mtcr r3
- mfspr r3,SPRN_SRR1 /* Return SRR1 */
- mtspr SPRN_SRR1,r4
- mtspr SPRN_SRR0,r5
- rfid
+ mtlr r17
+ blr /* Return back to System Reset vector from where
+ pnv_restore_hyp_resource was invoked */
fastsleep_workaround_at_exit:
li r3,1
li r4,0
- li r0,OPAL_CONFIG_CPU_IDLE_STATE
- bl opal_call_realmode
+ bl opal_rm_config_cpu_idle_state
b timebase_resync
/*
* R3 here contains the value that will be returned to the caller
* of power7_nap.
*/
-_GLOBAL(power7_wakeup_loss)
+_GLOBAL(pnv_wakeup_loss)
ld r1,PACAR1(r13)
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
@@ -497,10 +644,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
* R3 here contains the value that will be returned to the caller
* of power7_nap.
*/
-_GLOBAL(power7_wakeup_noloss)
+_GLOBAL(pnv_wakeup_noloss)
lbz r0,PACA_NAPSTATELOST(r13)
cmpwi r0,0
- bne power7_wakeup_loss
+ bne pnv_wakeup_loss
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 12e48d56f771..3963f0b68d52 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -38,6 +38,18 @@ EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
+#ifdef __powerpc64__
+u64 ioread64(void __iomem *addr)
+{
+ return readq(addr);
+}
+u64 ioread64be(void __iomem *addr)
+{
+ return readq_be(addr);
+}
+EXPORT_SYMBOL(ioread64);
+EXPORT_SYMBOL(ioread64be);
+#endif /* __powerpc64__ */
void iowrite8(u8 val, void __iomem *addr)
{
@@ -64,6 +76,18 @@ EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
+#ifdef __powerpc64__
+void iowrite64(u64 val, void __iomem *addr)
+{
+ writeq(val, addr);
+}
+void iowrite64be(u64 val, void __iomem *addr)
+{
+ writeq_be(val, addr);
+}
+EXPORT_SYMBOL(iowrite64);
+EXPORT_SYMBOL(iowrite64be);
+#endif /* __powerpc64__ */
/*
* These are the "repeat read/write" functions. Note the
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 3cb46a3b1de7..ac910d9982df 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -250,7 +250,7 @@ notrace void arch_local_irq_restore(unsigned long en)
if (WARN_ON(mfmsr() & MSR_EE))
__hard_irq_disable();
}
-#endif /* CONFIG_TRACE_IRQFLAG */
+#endif /* CONFIG_TRACE_IRQFLAGS */
set_soft_enabled(0);
@@ -342,6 +342,21 @@ bool prep_irq_for_idle(void)
return true;
}
+/*
+ * Force a replay of the external interrupt handler on this CPU.
+ */
+void force_external_irq_replay(void)
+{
+ /*
+ * This must only be called with interrupts soft-disabled,
+ * the replay will happen when re-enabling.
+ */
+ WARN_ON(!arch_irqs_disabled());
+
+ /* Indicate in the PACA that we have an interrupt to replay */
+ local_paca->irq_happened |= PACA_IRQ_EE;
+}
+
#endif /* CONFIG_PPC64 */
int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7c053f281406..3ed8ec09b5c9 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -278,12 +278,11 @@ no_kprobe:
* - When the probed function returns, this probe
* causes the handlers to fire
*/
-static void __used kretprobe_trampoline_holder(void)
-{
- asm volatile(".global kretprobe_trampoline\n"
- "kretprobe_trampoline:\n"
- "nop\n");
-}
+asm(".global kretprobe_trampoline\n"
+ ".type kretprobe_trampoline, @function\n"
+ "kretprobe_trampoline:\n"
+ "nop\n"
+ ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
/*
* Called when the probe at kretprobe trampoline is hit
@@ -506,13 +505,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
/* setup return addr to the jprobe handler routine */
regs->nip = arch_deref_entry_point(jp->entry);
-#ifdef CONFIG_PPC64
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
regs->gpr[12] = (unsigned long)jp->entry;
-#else
+#elif defined(PPC64_ELF_ABI_v1)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif
-#endif
return 1;
}
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index b8c202d63ecb..4c780a342282 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -29,6 +29,7 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/hw_breakpoint.h>
+#include <asm/asm-prototypes.h>
#ifdef CONFIG_PPC_BOOK3E
int default_machine_kexec_prepare(struct kimage *image)
@@ -54,7 +55,7 @@ int default_machine_kexec_prepare(struct kimage *image)
const unsigned long *basep;
const unsigned int *sizep;
- if (!ppc_md.hpte_clear_all)
+ if (!mmu_hash_ops.hpte_clear_all)
return -ENOENT;
/*
@@ -379,7 +380,12 @@ void default_machine_kexec(struct kimage *image)
*/
kexec_sequence(&kexec_stack, image->start, image,
page_address(image->control_code_page),
- ppc_md.hpte_clear_all);
+#ifdef CONFIG_PPC_STD_MMU
+ mmu_hash_ops.hpte_clear_all
+#else
+ NULL
+#endif
+ );
/* NOTREACHED */
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 285ca8c6cc2e..d9c912b6e632 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -104,20 +104,6 @@ _GLOBAL(mulhdu)
blr
/*
- * sub_reloc_offset(x) returns x - reloc_offset().
- */
-_GLOBAL(sub_reloc_offset)
- mflr r0
- bl 1f
-1: mflr r5
- lis r4,1b@ha
- addi r4,r4,1b@l
- subf r5,r4,r5
- subf r3,r5,r3
- mtlr r0
- blr
-
-/*
* reloc_got2 runs through the .got2 section adding an offset
* to each entry.
*/
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index f28754c497e5..cb195157b318 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -661,13 +661,13 @@ _GLOBAL(kexec_sequence)
#ifndef CONFIG_PPC_BOOK3E
/* clear out hardware hash page table and tlb */
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#ifdef PPC64_ELF_ABI_v1
ld r12,0(r27) /* deref function descriptor */
#else
mr r12,r27
#endif
mtctr r12
- bctrl /* ppc_md.hpte_clear_all(void); */
+ bctrl /* mmu_hash_ops.hpte_clear_all(void); */
#endif /* !CONFIG_PPC_BOOK3E */
/*
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 9ce9a25f58b5..183368e008cf 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -41,7 +41,7 @@
this, and makes other things simpler. Anton?
--RR. */
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
/* An address is simply the address of the function. */
typedef unsigned long func_desc_t;
@@ -132,7 +132,7 @@ static u32 ppc64_stub_insns[] = {
/* Save current r2 value in magic place on the stack. */
0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */
0xe98b0020, /* ld r12,32(r11) */
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#ifdef PPC64_ELF_ABI_v1
/* Set up new r2 from function descriptor */
0xe84b0028, /* ld r2,40(r11) */
#endif
@@ -494,9 +494,10 @@ static bool is_early_mcount_callsite(u32 *instruction)
restore r2. */
static int restore_r2(u32 *instruction, struct module *me)
{
+ if (is_early_mcount_callsite(instruction - 1))
+ return 1;
+
if (*instruction != PPC_INST_NOP) {
- if (is_early_mcount_callsite(instruction - 1))
- return 1;
pr_err("%s: Expect noop after relocate, got %08x\n",
me->name, *instruction);
return 0;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 856f9a7944cd..64174bf95611 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -444,7 +444,8 @@ static int nvram_pstore_write(enum pstore_type_id type,
*/
static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi)
+ bool *compressed, ssize_t *ecc_notice_size,
+ struct pstore_info *psi)
{
struct oops_log_info *oops_hdr;
unsigned int err_type, id_no, size = 0;
@@ -545,6 +546,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
return -ENOMEM;
kfree(buff);
+ *ecc_notice_size = 0;
if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
*compressed = true;
else
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0f7a60f1e9f6..f93942b4b6a6 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -41,11 +41,18 @@
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
+/* hose_spinlock protects accesses to the the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
-/* XXX kill that some day ... */
-static int global_phb_number; /* Global phb counter */
+/* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
+#define MAX_PHBS 0x10000
+
+/*
+ * For dynamic PHB numbering: used/free PHBs tracking bitmap.
+ * Accesses to this bitmap should be protected by hose_spinlock.
+ */
+static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
/* ISA Memory physical address */
resource_size_t isa_mem_base;
@@ -64,6 +71,42 @@ struct dma_map_ops *get_pci_dma_ops(void)
}
EXPORT_SYMBOL(get_pci_dma_ops);
+/*
+ * This function should run under locking protection, specifically
+ * hose_spinlock.
+ */
+static int get_phb_number(struct device_node *dn)
+{
+ int ret, phb_id = -1;
+ u64 prop;
+
+ /*
+ * Try fixed PHB numbering first, by checking archs and reading
+ * the respective device-tree properties. Firstly, try powernv by
+ * reading "ibm,opal-phbid", only present in OPAL environment.
+ */
+ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+ if (ret)
+ ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop);
+
+ if (!ret)
+ phb_id = (int)(prop & (MAX_PHBS - 1));
+
+ /* We need to be sure to not use the same PHB number twice. */
+ if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
+ return phb_id;
+
+ /*
+ * If not pseries nor powernv, or if fixed PHB numbering tried to add
+ * the same PHB number twice, then fallback to dynamic PHB numbering.
+ */
+ phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
+ BUG_ON(phb_id >= MAX_PHBS);
+ set_bit(phb_id, phb_bitmap);
+
+ return phb_id;
+}
+
struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
{
struct pci_controller *phb;
@@ -72,7 +115,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
if (phb == NULL)
return NULL;
spin_lock(&hose_spinlock);
- phb->global_number = global_phb_number++;
+ phb->global_number = get_phb_number(dev);
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
phb->dn = dev;
@@ -94,6 +137,11 @@ EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
void pcibios_free_controller(struct pci_controller *phb)
{
spin_lock(&hose_spinlock);
+
+ /* Clear bit of phb_bitmap to allow reuse of this PHB number. */
+ if (phb->global_number < MAX_PHBS)
+ clear_bit(phb->global_number, phb_bitmap);
+
list_del(&phb->list_node);
spin_unlock(&hose_spinlock);
@@ -124,6 +172,14 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
return 1;
}
+void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+
+ if (hose->controller_ops.setup_bridge)
+ hose->controller_ops.setup_bridge(bus, type);
+}
+
void pcibios_reset_secondary_bus(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
@@ -1362,8 +1418,10 @@ void __init pcibios_resource_survey(void)
/* Allocate and assign resources */
list_for_each_entry(b, &pci_root_buses, node)
pcibios_allocate_bus_resources(b);
- pcibios_allocate_resources(0);
- pcibios_allocate_resources(1);
+ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
+ pcibios_allocate_resources(0);
+ pcibios_allocate_resources(1);
+ }
/* Before we start assigning unassigned resource, we try to reserve
* the low IO area and the VGA memory area if they intersect the
@@ -1436,8 +1494,12 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
/* Allocate bus and devices resources */
pcibios_allocate_bus_resources(bus);
pcibios_claim_one_bus(bus);
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_assign_unassigned_bus_resources(bus);
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ if (bus->self)
+ pci_assign_unassigned_bridge_resources(bus->self);
+ else
+ pci_assign_unassigned_bus_resources(bus);
+ }
/* Fixup EEH */
eeh_add_device_tree_late(bus);
@@ -1485,9 +1547,9 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
res = &hose->io_resource;
if (!res->flags) {
- pr_info("PCI: I/O resource not set for host"
- " bridge %s (domain %d)\n",
- hose->dn->full_name, hose->global_number);
+ pr_debug("PCI: I/O resource not set for host"
+ " bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
} else {
offset = pcibios_io_space_offset(hose);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3759df52bd67..ed5e9ff61a68 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -47,7 +47,6 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n");
- pci_io_base = ISA_IO_BASE;
/* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/
@@ -82,7 +81,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
/* If this is not a PHB, we only flush the hash table over
* the area mapped by this bridge. We don't play with the PTE
- * mappings since we might have to deal with sub-page alignemnts
+ * mappings since we might have to deal with sub-page alignments
* so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index ecdccce78719..592693437070 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -31,6 +31,7 @@
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
+#include <asm/eeh.h>
/*
* The function is used to find the firmware data of one
@@ -181,7 +182,6 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct pci_dn *parent, *pdn;
- struct eeh_dev *edev;
int i;
/* Only support IOV for now */
@@ -199,6 +199,8 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
return NULL;
for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ struct eeh_dev *edev __maybe_unused;
+
pdn = add_one_dev_pci_data(parent, NULL, i,
pci_iov_virtfn_bus(pdev, i),
pci_iov_virtfn_devfn(pdev, i));
@@ -208,11 +210,12 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
return NULL;
}
+#ifdef CONFIG_EEH
/* Create the EEH device for the VF */
- eeh_dev_init(pdn, pci_bus_to_host(pdev->bus));
- edev = pdn_to_eeh_dev(pdn);
+ edev = eeh_dev_init(pdn);
BUG_ON(!edev);
edev->physfn = pdev;
+#endif /* CONFIG_EEH */
}
#endif /* CONFIG_PCI_IOV */
@@ -224,7 +227,6 @@ void remove_dev_pci_data(struct pci_dev *pdev)
#ifdef CONFIG_PCI_IOV
struct pci_dn *parent;
struct pci_dn *pdn, *tmp;
- struct eeh_dev *edev;
int i;
/*
@@ -260,18 +262,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
* a batch mode.
*/
for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ struct eeh_dev *edev __maybe_unused;
+
list_for_each_entry_safe(pdn, tmp,
&parent->child_list, list) {
if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
continue;
+#ifdef CONFIG_EEH
/* Release EEH device for the VF */
edev = pdn_to_eeh_dev(pdn);
if (edev) {
pdn->edev = NULL;
kfree(edev);
}
+#endif /* CONFIG_EEH */
if (!list_empty(&pdn->list))
list_del(&pdn->list);
@@ -289,8 +295,11 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
const __be32 *regs;
struct device_node *parent;
struct pci_dn *pdn;
+#ifdef CONFIG_EEH
+ struct eeh_dev *edev;
+#endif
- pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
+ pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
if (pdn == NULL)
return NULL;
dn->data = pdn;
@@ -319,6 +328,15 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
/* Extended config space */
pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
+ /* Create EEH device */
+#ifdef CONFIG_EEH
+ edev = eeh_dev_init(pdn);
+ if (!edev) {
+ kfree(pdn);
+ return NULL;
+ }
+#endif
+
/* Attach to parent node */
INIT_LIST_HEAD(&pdn->child_list);
INIT_LIST_HEAD(&pdn->list);
@@ -504,15 +522,19 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
* pci device found underneath. This routine runs once,
* early in the boot sequence.
*/
-void __init pci_devs_phb_init(void)
+static int __init pci_devs_phb_init(void)
{
struct pci_controller *phb, *tmp;
/* This must be done first so the device nodes have valid pci info! */
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
pci_devs_phb_init_dynamic(phb);
+
+ return 0;
}
+core_initcall(pci_devs_phb_init);
+
static void pci_dev_pdn_setup(struct pci_dev *pdev)
{
struct pci_dn *pdn;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e2f12cbcade9..a8cca88e972f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -139,12 +139,16 @@ EXPORT_SYMBOL(__msr_check_and_clear);
#ifdef CONFIG_PPC_FPU
void __giveup_fpu(struct task_struct *tsk)
{
+ unsigned long msr;
+
save_fpu(tsk);
- tsk->thread.regs->msr &= ~MSR_FP;
+ msr = tsk->thread.regs->msr;
+ msr &= ~MSR_FP;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
- tsk->thread.regs->msr &= ~MSR_VSX;
+ msr &= ~MSR_VSX;
#endif
+ tsk->thread.regs->msr = msr;
}
void giveup_fpu(struct task_struct *tsk)
@@ -219,12 +223,16 @@ static int restore_fp(struct task_struct *tsk) { return 0; }
static void __giveup_altivec(struct task_struct *tsk)
{
+ unsigned long msr;
+
save_altivec(tsk);
- tsk->thread.regs->msr &= ~MSR_VEC;
+ msr = tsk->thread.regs->msr;
+ msr &= ~MSR_VEC;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
- tsk->thread.regs->msr &= ~MSR_VSX;
+ msr &= ~MSR_VSX;
#endif
+ tsk->thread.regs->msr = msr;
}
void giveup_altivec(struct task_struct *tsk)
@@ -794,7 +802,7 @@ static void tm_reclaim_thread(struct thread_struct *thr,
* this state.
* We do this using the current MSR, rather tracking it in
* some specific thread_struct bit, as it has the additional
- * benifit of checking for a potential TM bad thing exception.
+ * benefit of checking for a potential TM bad thing exception.
*/
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
@@ -1009,6 +1017,14 @@ static inline void save_sprs(struct thread_struct *t)
*/
t->tar = mfspr(SPRN_TAR);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally save Load Monitor registers, if enabled */
+ if (t->fscr & FSCR_LM) {
+ t->lmrr = mfspr(SPRN_LMRR);
+ t->lmser = mfspr(SPRN_LMSER);
+ }
+ }
#endif
}
@@ -1023,18 +1039,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
- u64 fscr = old_thread->fscr & ~FSCR_DSCR;
-
- if (new_thread->dscr_inherit) {
+ if (new_thread->dscr_inherit)
dscr = new_thread->dscr;
- fscr |= FSCR_DSCR;
- }
if (old_thread->dscr != dscr)
mtspr(SPRN_DSCR, dscr);
-
- if (old_thread->fscr != fscr)
- mtspr(SPRN_FSCR, fscr);
}
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
@@ -1045,9 +1054,22 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->ebbrr != new_thread->ebbrr)
mtspr(SPRN_EBBRR, new_thread->ebbrr);
+ if (old_thread->fscr != new_thread->fscr)
+ mtspr(SPRN_FSCR, new_thread->fscr);
+
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally restore Load Monitor registers, if enabled */
+ if (new_thread->fscr & FSCR_LM) {
+ if (old_thread->lmrr != new_thread->lmrr)
+ mtspr(SPRN_LMRR, new_thread->lmrr);
+ if (old_thread->lmser != new_thread->lmser)
+ mtspr(SPRN_LMSER, new_thread->lmser);
+ }
+ }
#endif
}
@@ -1505,6 +1527,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.regs = regs - 1;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /*
+ * Clear any transactional state, we're exec()ing. The cause is
+ * not important as there will never be a recheckpoint so it's not
+ * user visible.
+ */
+ if (MSR_TM_SUSPENDED(mfmsr()))
+ tm_reclaim_current(0);
+#endif
+
memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0;
regs->link = 0;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 946e34ffeae9..bae3db791150 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -56,6 +56,8 @@
#include <asm/opal.h>
#include <asm/fadump.h>
#include <asm/debug.h>
+#include <asm/epapr_hcalls.h>
+#include <asm/firmware.h>
#include <mm/mmu_decl.h>
@@ -645,6 +647,14 @@ static void __init early_reserve_mem(void)
#endif
}
+static bool disable_radix;
+static int __init parse_disable_radix(char *p)
+{
+ disable_radix = true;
+ return 0;
+}
+early_param("disable_radix", parse_disable_radix);
+
void __init early_init_devtree(void *params)
{
phys_addr_t limit;
@@ -734,11 +744,26 @@ void __init early_init_devtree(void *params)
*/
spinning_secondaries = boot_cpu_count - 1;
#endif
+ /*
+ * now fixup radix MMU mode based on kernel command line
+ */
+ if (disable_radix)
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
#ifdef CONFIG_PPC_POWERNV
/* Scan and build the list of machine check recoverable ranges */
of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
#endif
+ epapr_paravirt_early_init();
+
+ /* Now try to figure out if we are running on LPAR and so on */
+ pseries_probe_fw_features();
+
+#ifdef CONFIG_PPC_PS3
+ /* Identify PS3 firmware */
+ if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
+ powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
+#endif
DBG(" <- early_init_devtree()\n");
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index da5192590c44..6ee4b72cda42 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
W(0xffff0000), W(0x003e0000), /* POWER6 */
W(0xffff0000), W(0x003f0000), /* POWER7 */
W(0xffff0000), W(0x004b0000), /* POWER8E */
+ W(0xffff0000), W(0x004c0000), /* POWER8NVL */
W(0xffff0000), W(0x004d0000), /* POWER8 */
W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
@@ -718,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
-#define IBM_ARCH_VEC_NRCORES_OFFSET 125
+#define IBM_ARCH_VEC_NRCORES_OFFSET 133
W(NR_CPUS), /* number of cores supported */
0,
0,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 30a03c03fe73..134bee9ac664 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32][0]));
+ offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
@@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
return 0;
#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32][0]));
+ offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
@@ -1783,12 +1783,12 @@ static int do_seccomp(struct pt_regs *regs)
* have already loaded -ENOSYS into r3, or seccomp has put
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
*/
- if (__secure_computing())
+ if (__secure_computing(NULL))
return -1;
/*
* The syscall was allowed by seccomp, restore the register
- * state to what ptrace and audit expect.
+ * state to what audit expects.
* Note that we use orig_gpr3, which means a seccomp tracer can
* modify the first syscall parameter (in orig_gpr3) and also
* allow the syscall to proceed.
@@ -1822,22 +1822,25 @@ static inline int do_seccomp(struct pt_regs *regs) { return 0; }
*/
long do_syscall_trace_enter(struct pt_regs *regs)
{
- bool abort = false;
-
user_exit();
+ /*
+ * The tracer may decide to abort the syscall, if so tracehook
+ * will return !0. Note that the tracer may also just change
+ * regs->gpr[0] to an invalid syscall number, that is handled
+ * below on the exit path.
+ */
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ goto skip;
+
+ /* Run seccomp after ptrace; allow it to set gpr[3]. */
if (do_seccomp(regs))
return -1;
- if (test_thread_flag(TIF_SYSCALL_TRACE)) {
- /*
- * The tracer may decide to abort the syscall, if so tracehook
- * will return !0. Note that the tracer may also just change
- * regs->gpr[0] to an invalid syscall number, that is handled
- * below on the exit path.
- */
- abort = tracehook_report_syscall_entry(regs) != 0;
- }
+ /* Avoid trace and audit when syscall is invalid. */
+ if (regs->gpr[0] >= NR_syscalls)
+ goto skip;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gpr[0]);
@@ -1854,17 +1857,16 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gpr[5] & 0xffffffff,
regs->gpr[6] & 0xffffffff);
- if (abort || regs->gpr[0] >= NR_syscalls) {
- /*
- * If we are aborting explicitly, or if the syscall number is
- * now invalid, set the return value to -ENOSYS.
- */
- regs->gpr[3] = -ENOSYS;
- return -1;
- }
-
/* Return the possibly modified but valid syscall number */
return regs->gpr[0];
+
+skip:
+ /*
+ * If we are aborting explicitly, or if the syscall number is
+ * now invalid, set the return value to -ENOSYS.
+ */
+ regs->gpr[3] = -ENOSYS;
+ return -1;
}
void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index fb2fb3ea85e5..c82eed97bd22 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -698,7 +698,7 @@ static void check_location(struct seq_file *m, const char *c)
/*
* Format:
* ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
- * the '.' may be an abbrevation
+ * the '.' may be an abbreviation
*/
static void check_location_string(struct seq_file *m, const char *c)
{
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 28736ff27fea..6a3e5de544ce 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -685,7 +685,7 @@ int rtas_set_indicator_fast(int indicator, int index, int new_value)
return rc;
}
-void rtas_restart(char *cmd)
+void __noreturn rtas_restart(char *cmd)
{
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_RESTART);
@@ -704,7 +704,7 @@ void rtas_power_off(void)
for (;;);
}
-void rtas_halt(void)
+void __noreturn rtas_halt(void)
{
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_HALT);
@@ -1070,7 +1070,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
nret = be32_to_cpu(args.nret);
token = be32_to_cpu(args.token);
- if (nargs > ARRAY_SIZE(args.args)
+ if (nargs >= ARRAY_SIZE(args.args)
|| nret > ARRAY_SIZE(args.args)
|| nargs + nret > ARRAY_SIZE(args.args))
return -EINVAL;
@@ -1174,7 +1174,7 @@ void __init rtas_initialize(void)
* the stop-self token if any
*/
#ifdef CONFIG_PPC64
- if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
ibm_suspend_me_token = rtas_token("ibm,suspend-me");
}
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index c638e2487a9c..a26a02006576 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -483,7 +483,7 @@ static void rtas_event_scan(struct work_struct *w)
}
#ifdef CONFIG_PPC64
-static void retreive_nvram_error_log(void)
+static void retrieve_nvram_error_log(void)
{
unsigned int err_type ;
int rc ;
@@ -501,7 +501,7 @@ static void retreive_nvram_error_log(void)
}
}
#else /* CONFIG_PPC64 */
-static void retreive_nvram_error_log(void)
+static void retrieve_nvram_error_log(void)
{
}
#endif /* CONFIG_PPC64 */
@@ -513,7 +513,7 @@ static void start_event_scan(void)
(30000 / rtas_event_scan_rate));
/* Retrieve errors from nvram if any */
- retreive_nvram_error_log();
+ retrieve_nvram_error_log();
schedule_delayed_work_on(cpumask_first(cpu_online_mask),
&event_scan_work, event_scan_delay);
@@ -526,10 +526,8 @@ void rtas_cancel_event_scan(void)
}
EXPORT_SYMBOL_GPL(rtas_cancel_event_scan);
-static int __init rtas_init(void)
+static int __init rtas_event_scan_init(void)
{
- struct proc_dir_entry *entry;
-
if (!machine_is(pseries) && !machine_is(chrp))
return 0;
@@ -562,13 +560,27 @@ static int __init rtas_init(void)
return -ENOMEM;
}
+ start_event_scan();
+
+ return 0;
+}
+arch_initcall(rtas_event_scan_init);
+
+static int __init rtas_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ if (!machine_is(pseries) && !machine_is(chrp))
+ return 0;
+
+ if (!rtas_log_buf)
+ return -ENODEV;
+
entry = proc_create("powerpc/rtas/error_log", S_IRUSR, NULL,
&proc_rtas_log_operations);
if (!entry)
printk(KERN_ERR "Failed to create error_log proc entry\n");
- start_event_scan();
-
return 0;
}
__initcall(rtas_init);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 8ca79b7503d8..714b4ba7ab86 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -35,6 +35,7 @@
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/of_platform.h>
+#include <linux/hugetlb.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/prom.h>
@@ -61,6 +62,12 @@
#include <asm/cputhreads.h>
#include <mm/mmu_decl.h>
#include <asm/fadump.h>
+#include <asm/udbg.h>
+#include <asm/hugetlb.h>
+#include <asm/livepatch.h>
+#include <asm/mmu_context.h>
+
+#include "setup.h"
#ifdef DEBUG
#include <asm/udbg.h>
@@ -494,7 +501,7 @@ void __init smp_setup_cpu_maps(void)
* On pSeries LPAR, we need to know how many cpus
* could possibly be added to this partition.
*/
- if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
const __be32 *ireg;
@@ -575,6 +582,7 @@ void probe_machine(void)
{
extern struct machdep_calls __machine_desc_start;
extern struct machdep_calls __machine_desc_end;
+ unsigned int i;
/*
* Iterate all ppc_md structures until we find the proper
@@ -582,6 +590,17 @@ void probe_machine(void)
*/
DBG("Probing machine type ...\n");
+ /*
+ * Check ppc_md is empty, if not we have a bug, ie, we setup an
+ * entry before probe_machine() which will be overwritten
+ */
+ for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
+ if (((void **)&ppc_md)[i]) {
+ printk(KERN_ERR "Entry %d in ppc_md non empty before"
+ " machine probe !\n", i);
+ }
+ }
+
for (machine_id = &__machine_desc_start;
machine_id < &__machine_desc_end;
machine_id++) {
@@ -676,6 +695,8 @@ static struct notifier_block ppc_panic_block = {
void __init setup_panic(void)
{
+ if (!ppc_md.panic)
+ return;
atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
}
@@ -744,3 +765,169 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
set_dma_ops(&pdev->dev, &dma_direct_ops);
}
+
+static __init void print_system_info(void)
+{
+ pr_info("-----------------------------------------------------\n");
+#ifdef CONFIG_PPC_STD_MMU_64
+ pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
+#endif
+#ifdef CONFIG_PPC_STD_MMU_32
+ pr_info("Hash_size = 0x%lx\n", Hash_size);
+#endif
+ pr_info("phys_mem_size = 0x%llx\n",
+ (unsigned long long)memblock_phys_mem_size());
+
+ pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
+ pr_info("icache_bsize = 0x%x\n", icache_bsize);
+ if (ucache_bsize != 0)
+ pr_info("ucache_bsize = 0x%x\n", ucache_bsize);
+
+ pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
+ pr_info(" possible = 0x%016lx\n",
+ (unsigned long)CPU_FTRS_POSSIBLE);
+ pr_info(" always = 0x%016lx\n",
+ (unsigned long)CPU_FTRS_ALWAYS);
+ pr_info("cpu_user_features = 0x%08x 0x%08x\n",
+ cur_cpu_spec->cpu_user_features,
+ cur_cpu_spec->cpu_user_features2);
+ pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
+#ifdef CONFIG_PPC64
+ pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
+#endif
+
+#ifdef CONFIG_PPC_STD_MMU_64
+ if (htab_address)
+ pr_info("htab_address = 0x%p\n", htab_address);
+ if (htab_hash_mask)
+ pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#endif
+#ifdef CONFIG_PPC_STD_MMU_32
+ if (Hash)
+ pr_info("Hash = 0x%p\n", Hash);
+ if (Hash_mask)
+ pr_info("Hash_mask = 0x%lx\n", Hash_mask);
+#endif
+
+ if (PHYSICAL_START > 0)
+ pr_info("physical_start = 0x%llx\n",
+ (unsigned long long)PHYSICAL_START);
+ pr_info("-----------------------------------------------------\n");
+}
+
+/*
+ * Called into from start_kernel this initializes memblock, which is used
+ * to manage page allocation until mem_init is called.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+ *cmdline_p = boot_command_line;
+
+ /* Set a half-reasonable default so udelay does something sensible */
+ loops_per_jiffy = 500000000 / HZ;
+
+ /* Unflatten the device-tree passed by prom_init or kexec */
+ unflatten_device_tree();
+
+ /*
+ * Initialize cache line/block info from device-tree (on ppc64) or
+ * just cputable (on ppc32).
+ */
+ initialize_cache_info();
+
+ /* Initialize RTAS if available. */
+ rtas_initialize();
+
+ /* Check if we have an initrd provided via the device-tree. */
+ check_for_initrd();
+
+ /* Probe the machine type, establish ppc_md. */
+ probe_machine();
+
+ /* Setup panic notifier if requested by the platform. */
+ setup_panic();
+
+ /*
+ * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
+ * it from their respective probe() function.
+ */
+ setup_power_save();
+
+ /* Discover standard serial ports. */
+ find_legacy_serial_ports();
+
+ /* Register early console with the printk subsystem. */
+ register_early_udbg_console();
+
+ /* Setup the various CPU maps based on the device-tree. */
+ smp_setup_cpu_maps();
+
+ /* Initialize xmon. */
+ xmon_setup();
+
+ /* Check the SMT related command line arguments (ppc64). */
+ check_smt_enabled();
+
+ /* On BookE, setup per-core TLB data structures. */
+ setup_tlb_core_data();
+
+ /*
+ * Release secondary cpus out of their spinloops at 0x60 now that
+ * we can map physical -> logical CPU ids.
+ *
+ * Freescale Book3e parts spin in a loop provided by firmware,
+ * so smp_release_cpus() does nothing for them.
+ */
+#ifdef CONFIG_SMP
+ smp_release_cpus();
+#endif
+
+ /* Print various info about the machine that has been gathered so far. */
+ print_system_info();
+
+ /* Reserve large chunks of memory for use by CMA for KVM. */
+ kvm_cma_reserve();
+
+ /*
+ * Reserve any gigantic pages requested on the command line.
+ * memblock needs to have been initialized by the time this is
+ * called since this will reserve memory.
+ */
+ reserve_hugetlb_gpages();
+
+ klp_init_thread_info(&init_thread_info);
+
+ init_mm.start_code = (unsigned long)_stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = klimit;
+#ifdef CONFIG_PPC_64K_PAGES
+ init_mm.context.pte_frag = NULL;
+#endif
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ mm_iommu_init(&init_mm.context);
+#endif
+ irqstack_early_init();
+ exc_lvl_early_init();
+ emergency_stack_init();
+
+ initmem_init();
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+ if (ppc_md.setup_arch)
+ ppc_md.setup_arch();
+
+ paging_init();
+
+ /* Initialize the MMU context management stuff. */
+ mmu_context_init();
+
+#ifdef CONFIG_PPC64
+ /* Interrupt code needs to be 64K-aligned. */
+ if ((unsigned long)_stext & 0xffff)
+ panic("Kernelbase not 64K-aligned (0x%lx)!\n",
+ (unsigned long)_stext);
+#endif
+}
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
new file mode 100644
index 000000000000..cfba134b3024
--- /dev/null
+++ b/arch/powerpc/kernel/setup.h
@@ -0,0 +1,58 @@
+/*
+ * Prototypes for functions that are shared between setup_(32|64|common).c
+ *
+ * Copyright 2016 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ARCH_POWERPC_KERNEL_SETUP_H
+#define __ARCH_POWERPC_KERNEL_SETUP_H
+
+void initialize_cache_info(void);
+void irqstack_early_init(void);
+
+#ifdef CONFIG_PPC32
+void setup_power_save(void);
+#else
+static inline void setup_power_save(void) { };
+#endif
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
+void check_smt_enabled(void);
+#else
+static inline void check_smt_enabled(void) { };
+#endif
+
+#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
+void setup_tlb_core_data(void);
+#else
+static inline void setup_tlb_core_data(void) { };
+#endif
+
+#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+void exc_lvl_early_init(void);
+#else
+static inline void exc_lvl_early_init(void) { };
+#endif
+
+#ifdef CONFIG_PPC64
+void emergency_stack_init(void);
+#else
+static inline void emergency_stack_init(void) { };
+#endif
+
+/*
+ * Having this in kvm_ppc.h makes include dependencies too
+ * tricky to solve for setup-common.c so have it here.
+ */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void kvm_cma_reserve(void);
+#else
+static inline void kvm_cma_reserve(void) { };
+#endif
+
+#endif /* __ARCH_POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index d544fa311757..00f57754407e 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -36,8 +36,6 @@
#include <asm/time.h>
#include <asm/serial.h>
#include <asm/udbg.h>
-#include <asm/mmu_context.h>
-#include <asm/epapr_hcalls.h>
#include <asm/code-patching.h>
#define DBG(fmt...)
@@ -62,9 +60,7 @@ int icache_bsize;
int ucache_bsize;
/*
- * We're called here very early in the boot. We determine the machine
- * type and call the appropriate low-level setup functions.
- * -- Cort <cort@fsmlabs.com>
+ * We're called here very early in the boot.
*
* Note that the kernel may be running at an address which is different
* from the address that it was linked at, so we must use RELOC/PTRRELOC
@@ -73,7 +69,6 @@ int ucache_bsize;
notrace unsigned long __init early_init(unsigned long dt_ptr)
{
unsigned long offset = reloc_offset();
- struct cpu_spec *spec;
/* First zero the BSS -- use memset_io, some platforms don't have
* caches on yet */
@@ -84,27 +79,19 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
- spec = identify_cpu(offset, mfspr(SPRN_PVR));
+ identify_cpu(offset, mfspr(SPRN_PVR));
- do_feature_fixups(spec->cpu_features,
- PTRRELOC(&__start___ftr_fixup),
- PTRRELOC(&__stop___ftr_fixup));
-
- do_feature_fixups(spec->mmu_features,
- PTRRELOC(&__start___mmu_ftr_fixup),
- PTRRELOC(&__stop___mmu_ftr_fixup));
-
- do_lwsync_fixups(spec->cpu_features,
- PTRRELOC(&__start___lwsync_fixup),
- PTRRELOC(&__stop___lwsync_fixup));
-
- do_final_fixups();
+ apply_feature_fixups();
return KERNELBASE + offset;
}
/*
+ * This is run before start_kernel(), the kernel has been relocated
+ * and we are running with enough of the MMU enabled to have our
+ * proper kernel virtual addresses
+ *
* Find out what kind of machine we're on and save any data we need
* from the early boot process (devtree is copied on pmac by prom_init()).
* This is called very early on the boot process, after a minimal
@@ -123,27 +110,9 @@ notrace void __init machine_init(u64 dt_ptr)
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
- epapr_paravirt_early_init();
-
early_init_mmu();
- probe_machine();
-
setup_kdump_trampoline();
-
-#ifdef CONFIG_6xx
- if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
- cpu_has_feature(CPU_FTR_CAN_NAP))
- ppc_md.power_save = ppc6xx_idle;
-#endif
-
-#ifdef CONFIG_E500
- if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
- cpu_has_feature(CPU_FTR_CAN_NAP))
- ppc_md.power_save = e500_idle;
-#endif
- if (ppc_md.progress)
- ppc_md.progress("id mach(): done", 0x200);
}
/* Checks "l2cr=xxxx" command-line option */
@@ -221,7 +190,7 @@ int __init ppc_init(void)
arch_initcall(ppc_init);
-static void __init irqstack_early_init(void)
+void __init irqstack_early_init(void)
{
unsigned int i;
@@ -236,7 +205,7 @@ static void __init irqstack_early_init(void)
}
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
-static void __init exc_lvl_early_init(void)
+void __init exc_lvl_early_init(void)
{
unsigned int i, hw_cpu;
@@ -259,33 +228,25 @@ static void __init exc_lvl_early_init(void)
#endif
}
}
-#else
-#define exc_lvl_early_init()
#endif
-/* Warning, IO base is not yet inited */
-void __init setup_arch(char **cmdline_p)
+void __init setup_power_save(void)
{
- *cmdline_p = boot_command_line;
-
- /* so udelay does something sensible, assume <= 1000 bogomips */
- loops_per_jiffy = 500000000 / HZ;
-
- unflatten_device_tree();
- check_for_initrd();
-
- if (ppc_md.init_early)
- ppc_md.init_early();
-
- find_legacy_serial_ports();
-
- smp_setup_cpu_maps();
-
- /* Register early console */
- register_early_udbg_console();
+#ifdef CONFIG_6xx
+ if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
+ cpu_has_feature(CPU_FTR_CAN_NAP))
+ ppc_md.power_save = ppc6xx_idle;
+#endif
- xmon_setup();
+#ifdef CONFIG_E500
+ if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
+ cpu_has_feature(CPU_FTR_CAN_NAP))
+ ppc_md.power_save = e500_idle;
+#endif
+}
+__init void initialize_cache_info(void)
+{
/*
* Set cache line size based on type of cpu as a default.
* Systems with OF can look in the properties on the cpu node(s)
@@ -296,32 +257,4 @@ void __init setup_arch(char **cmdline_p)
ucache_bsize = 0;
if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
ucache_bsize = icache_bsize = dcache_bsize;
-
- if (ppc_md.panic)
- setup_panic();
-
- init_mm.start_code = (unsigned long)_stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
-
- exc_lvl_early_init();
-
- irqstack_early_init();
-
- initmem_init();
- if ( ppc_md.progress ) ppc_md.progress("setup_arch: initmem", 0x3eab);
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-
- if (ppc_md.setup_arch)
- ppc_md.setup_arch();
- if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
-
- paging_init();
-
- /* Initialize the MMU context management stuff */
- mmu_context_init();
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 96d4a2b23d0f..d8216aed22b7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -35,7 +35,6 @@
#include <linux/pci.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
-#include <linux/hugetlb.h>
#include <linux/memory.h>
#include <linux/nmi.h>
@@ -64,12 +63,10 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
-#include <asm/mmu_context.h>
#include <asm/code-patching.h>
-#include <asm/kvm_ppc.h>
-#include <asm/hugetlb.h>
-#include <asm/epapr_hcalls.h>
#include <asm/livepatch.h>
+#include <asm/opal.h>
+#include <asm/cputhreads.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -100,7 +97,7 @@ int icache_bsize;
int ucache_bsize;
#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
-static void setup_tlb_core_data(void)
+void __init setup_tlb_core_data(void)
{
int cpu;
@@ -133,10 +130,6 @@ static void setup_tlb_core_data(void)
}
}
}
-#else
-static void setup_tlb_core_data(void)
-{
-}
#endif
#ifdef CONFIG_SMP
@@ -144,7 +137,7 @@ static void setup_tlb_core_data(void)
static char *smt_enabled_cmdline;
/* Look for ibm,smt-enabled OF option */
-static void check_smt_enabled(void)
+void __init check_smt_enabled(void)
{
struct device_node *dn;
const char *smt_option;
@@ -193,12 +186,10 @@ static int __init early_smt_enabled(char *p)
}
early_param("smt-enabled", early_smt_enabled);
-#else
-#define check_smt_enabled()
#endif /* CONFIG_SMP */
/** Fix up paca fields required for the boot cpu */
-static void fixup_boot_paca(void)
+static void __init fixup_boot_paca(void)
{
/* The boot cpu is started */
get_paca()->cpu_start = 1;
@@ -206,23 +197,50 @@ static void fixup_boot_paca(void)
get_paca()->data_offset = 0;
}
-static void cpu_ready_for_interrupts(void)
+static void __init configure_exceptions(void)
{
- /* Set IR and DR in PACA MSR */
- get_paca()->kernel_msr = MSR_KERNEL;
-
/*
- * Enable AIL if supported, and we are in hypervisor mode. If we are
- * not in hypervisor mode, we enable relocation-on interrupts later
- * in pSeries_setup_arch() using the H_SET_MODE hcall.
+ * Setup the trampolines from the lowmem exception vectors
+ * to the kdump kernel when not using a relocatable kernel.
*/
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
- cpu_has_feature(CPU_FTR_ARCH_207S)) {
- unsigned long lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ setup_kdump_trampoline();
+
+ /* Under a PAPR hypervisor, we need hypercalls */
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ /* Enable AIL if possible */
+ pseries_enable_reloc_on_exc();
+
+ /*
+ * Tell the hypervisor that we want our exceptions to
+ * be taken in little endian mode.
+ *
+ * We don't call this for big endian as our calling convention
+ * makes us always enter in BE, and the call may fail under
+ * some circumstances with kdump.
+ */
+#ifdef __LITTLE_ENDIAN__
+ pseries_little_endian_exceptions();
+#endif
+ } else {
+ /* Set endian mode using OPAL */
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ opal_configure_cores();
+
+ /* Enable AIL if supported, and we are in hypervisor mode */
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ unsigned long lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ }
}
}
+static void cpu_ready_for_interrupts(void)
+{
+ /* Set IR and DR in PACA MSR */
+ get_paca()->kernel_msr = MSR_KERNEL;
+}
+
/*
* Early initialization entry point. This is called by head.S
* with MMU translation disabled. We rely on the "feature" of
@@ -270,22 +288,22 @@ void __init early_setup(unsigned long dt_ptr)
*/
early_init_devtree(__va(dt_ptr));
- epapr_paravirt_early_init();
-
/* Now we know the logical id of our boot cpu, setup the paca. */
setup_paca(&paca[boot_cpuid]);
fixup_boot_paca();
- /* Probe the machine type */
- probe_machine();
-
- setup_kdump_trampoline();
-
- DBG("Found, Initializing memory management...\n");
+ /*
+ * Configure exception handlers. This include setting up trampolines
+ * if needed, setting exception endian mode, etc...
+ */
+ configure_exceptions();
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ /* Apply all the dynamic patching */
+ apply_feature_fixups();
+
/*
* At this point, we can let interrupts switch to virtual mode
* (the MMU has been setup), so adjust the MSR in the PACA to
@@ -293,16 +311,6 @@ void __init early_setup(unsigned long dt_ptr)
*/
cpu_ready_for_interrupts();
- /* Reserve large chunks of memory for use by CMA for KVM */
- kvm_cma_reserve();
-
- /*
- * Reserve any gigantic pages requested on the command line.
- * memblock needs to have been initialized by the time this is
- * called since this will reserve memory.
- */
- reserve_hugetlb_gpages();
-
DBG(" <- early_setup()\n");
#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
@@ -321,7 +329,7 @@ void __init early_setup(unsigned long dt_ptr)
#ifdef CONFIG_SMP
void early_setup_secondary(void)
{
- /* Mark interrupts enabled in PACA */
+ /* Mark interrupts disabled in PACA */
get_paca()->soft_enabled = 0;
/* Initialize the hash table or TLB handling */
@@ -391,7 +399,7 @@ void smp_release_cpus(void)
* cache informations about the CPU that will be used by cache flush
* routines and/or provided to userland
*/
-static void __init initialize_cache_info(void)
+void __init initialize_cache_info(void)
{
struct device_node *np;
unsigned long num_cpus = 0;
@@ -456,127 +464,11 @@ static void __init initialize_cache_info(void)
}
}
- DBG(" <- initialize_cache_info()\n");
-}
-
-
-/*
- * Do some initial setup of the system. The parameters are those which
- * were passed in from the bootloader.
- */
-void __init setup_system(void)
-{
- DBG(" -> setup_system()\n");
-
- /* Apply the CPUs-specific and firmware specific fixups to kernel
- * text (nop out sections not relevant to this CPU or this firmware)
- */
- do_feature_fixups(cur_cpu_spec->cpu_features,
- &__start___ftr_fixup, &__stop___ftr_fixup);
- do_feature_fixups(cur_cpu_spec->mmu_features,
- &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
- do_feature_fixups(powerpc_firmware_features,
- &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
- do_lwsync_fixups(cur_cpu_spec->cpu_features,
- &__start___lwsync_fixup, &__stop___lwsync_fixup);
- do_final_fixups();
-
- /*
- * Unflatten the device-tree passed by prom_init or kexec
- */
- unflatten_device_tree();
-
- /*
- * Fill the ppc64_caches & systemcfg structures with informations
- * retrieved from the device-tree.
- */
- initialize_cache_info();
-
-#ifdef CONFIG_PPC_RTAS
- /*
- * Initialize RTAS if available
- */
- rtas_initialize();
-#endif /* CONFIG_PPC_RTAS */
-
- /*
- * Check if we have an initrd provided via the device-tree
- */
- check_for_initrd();
-
- /*
- * Do some platform specific early initializations, that includes
- * setting up the hash table pointers. It also sets up some interrupt-mapping
- * related options that will be used by finish_device_tree()
- */
- if (ppc_md.init_early)
- ppc_md.init_early();
-
- /*
- * We can discover serial ports now since the above did setup the
- * hash table management for us, thus ioremap works. We do that early
- * so that further code can be debugged
- */
- find_legacy_serial_ports();
-
- /*
- * Register early console
- */
- register_early_udbg_console();
-
- /*
- * Initialize xmon
- */
- xmon_setup();
-
- smp_setup_cpu_maps();
- check_smt_enabled();
- setup_tlb_core_data();
-
- /*
- * Freescale Book3e parts spin in a loop provided by firmware,
- * so smp_release_cpus() does nothing for them
- */
-#if defined(CONFIG_SMP)
- /* Release secondary cpus out of their spinloops at 0x60 now that
- * we can map physical -> logical CPU ids
- */
- smp_release_cpus();
-#endif
-
- pr_info("Starting Linux %s %s\n", init_utsname()->machine,
- init_utsname()->version);
-
- pr_info("-----------------------------------------------------\n");
- pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
- pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
-
- if (ppc64_caches.dline_size != 0x80)
- pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
- if (ppc64_caches.iline_size != 0x80)
- pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
-
- pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
- pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
- pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
- pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
- cur_cpu_spec->cpu_user_features2);
- pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
- pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
-
-#ifdef CONFIG_PPC_STD_MMU_64
- if (htab_address)
- pr_info("htab_address = 0x%p\n", htab_address);
-
- pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
-#endif
-
- if (PHYSICAL_START > 0)
- pr_info("physical_start = 0x%llx\n",
- (unsigned long long)PHYSICAL_START);
- pr_info("-----------------------------------------------------\n");
+ /* For use by binfmt_elf */
+ dcache_bsize = ppc64_caches.dline_size;
+ icache_bsize = ppc64_caches.iline_size;
- DBG(" <- setup_system()\n");
+ DBG(" <- initialize_cache_info()\n");
}
/* This returns the limit below which memory accesses to the linear
@@ -584,7 +476,7 @@ void __init setup_system(void)
* used to allocate interrupt or emergency stacks for which our
* exception entry path doesn't deal with being interrupted.
*/
-static u64 safe_stack_limit(void)
+static __init u64 safe_stack_limit(void)
{
#ifdef CONFIG_PPC_BOOK3E
/* Freescale BookE bolts the entire linear mapping */
@@ -600,7 +492,7 @@ static u64 safe_stack_limit(void)
#endif
}
-static void __init irqstack_early_init(void)
+void __init irqstack_early_init(void)
{
u64 limit = safe_stack_limit();
unsigned int i;
@@ -620,7 +512,7 @@ static void __init irqstack_early_init(void)
}
#ifdef CONFIG_PPC_BOOK3E
-static void __init exc_lvl_early_init(void)
+void __init exc_lvl_early_init(void)
{
unsigned int i;
unsigned long sp;
@@ -642,8 +534,6 @@ static void __init exc_lvl_early_init(void)
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
patch_exception(0x040, exc_debug_debug_book3e);
}
-#else
-#define exc_lvl_early_init()
#endif
/*
@@ -651,7 +541,7 @@ static void __init exc_lvl_early_init(void)
* early in SMP boots before relocation is enabled. Exclusive emergency
* stack for machine checks.
*/
-static void __init emergency_stack_init(void)
+void __init emergency_stack_init(void)
{
u64 limit;
unsigned int i;
@@ -682,61 +572,6 @@ static void __init emergency_stack_init(void)
}
}
-/*
- * Called into from start_kernel this initializes memblock, which is used
- * to manage page allocation until mem_init is called.
- */
-void __init setup_arch(char **cmdline_p)
-{
- *cmdline_p = boot_command_line;
-
- /*
- * Set cache line size based on type of cpu as a default.
- * Systems with OF can look in the properties on the cpu node(s)
- * for a possibly more accurate value.
- */
- dcache_bsize = ppc64_caches.dline_size;
- icache_bsize = ppc64_caches.iline_size;
-
- if (ppc_md.panic)
- setup_panic();
-
- klp_init_thread_info(&init_thread_info);
-
- init_mm.start_code = (unsigned long)_stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
-#ifdef CONFIG_PPC_64K_PAGES
- init_mm.context.pte_frag = NULL;
-#endif
-#ifdef CONFIG_SPAPR_TCE_IOMMU
- mm_iommu_init(&init_mm.context);
-#endif
- irqstack_early_init();
- exc_lvl_early_init();
- emergency_stack_init();
-
- initmem_init();
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-
- if (ppc_md.setup_arch)
- ppc_md.setup_arch();
-
- paging_init();
-
- /* Initialize the MMU context management stuff */
- mmu_context_init();
-
- /* Interrupt code needs to be 64K-aligned */
- if ((unsigned long)_stext & 0xffff)
- panic("Kernelbase not 64K-aligned (0x%lx)!\n",
- (unsigned long)_stext);
-}
-
#ifdef CONFIG_SMP
#define PCPU_DYN_SIZE ()
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 25520794aa37..7e49984d4331 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -104,6 +104,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
*/
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
+ unsigned long vrsave;
#endif
unsigned long msr = regs->msr;
long err = 0;
@@ -125,9 +126,13 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec.
*/
- if (cpu_has_feature(CPU_FTR_ALTIVEC))
- current->thread.vrsave = mfspr(SPRN_VRSAVE);
- err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
+ vrsave = 0;
+ if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ vrsave = mfspr(SPRN_VRSAVE);
+ current->thread.vrsave = vrsave;
+ }
+
+ err |= __put_user(vrsave, (u32 __user *)&v_regs[33]);
#else /* CONFIG_ALTIVEC */
err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 55c924b65f71..5a1f015ea9f3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -31,6 +31,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/topology.h>
+#include <linux/profile.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -53,6 +54,7 @@
#include <asm/vdso.h>
#include <asm/debug.h>
#include <asm/kexec.h>
+#include <asm/asm-prototypes.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -593,6 +595,7 @@ out:
of_node_put(np);
return id;
}
+EXPORT_SYMBOL_GPL(cpu_to_core_id);
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 692873bff334..c4f1d1f7bae0 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -35,7 +35,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_PPC64
/* Time in microseconds we delay before sleeping in the idle loop */
-DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
+static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
static ssize_t store_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 3ed9a5a21d77..4e7759c8ca30 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -96,7 +96,8 @@ static struct clocksource clocksource_timebase = {
.read = timebase_read,
};
-#define DECREMENTER_MAX 0x7fffffff
+#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
+u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev);
@@ -166,7 +167,15 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
cputime_t cputime_one_jiffy;
+#ifdef CONFIG_PPC_SPLPAR
void (*dtl_consumer)(struct dtl_entry *, u64);
+#endif
+
+#ifdef CONFIG_PPC64
+#define get_accounting(tsk) (&get_paca()->accounting)
+#else
+#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
+#endif
static void calc_cputime_factors(void)
{
@@ -186,7 +195,7 @@ static void calc_cputime_factors(void)
* Read the SPURR on systems that have it, otherwise the PURR,
* or if that doesn't exist return the timebase value passed in.
*/
-static u64 read_spurr(u64 tb)
+static unsigned long read_spurr(unsigned long tb)
{
if (cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
@@ -249,8 +258,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
void accumulate_stolen_time(void)
{
u64 sst, ust;
-
u8 save_soft_enabled = local_paca->soft_enabled;
+ struct cpu_accounting_data *acct = &local_paca->accounting;
/* We are called early in the exception entry, before
* soft/hard_enabled are sync'ed to the expected state
@@ -260,10 +269,10 @@ void accumulate_stolen_time(void)
*/
local_paca->soft_enabled = 0;
- sst = scan_dispatch_log(local_paca->starttime_user);
- ust = scan_dispatch_log(local_paca->starttime);
- local_paca->system_time -= sst;
- local_paca->user_time -= ust;
+ sst = scan_dispatch_log(acct->starttime_user);
+ ust = scan_dispatch_log(acct->starttime);
+ acct->system_time -= sst;
+ acct->user_time -= ust;
local_paca->stolen_time += ust + sst;
local_paca->soft_enabled = save_soft_enabled;
@@ -275,7 +284,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
stolen = scan_dispatch_log(stop_tb);
- get_paca()->system_time -= stolen;
+ get_paca()->accounting.system_time -= stolen;
}
stolen += get_paca()->stolen_time;
@@ -295,27 +304,29 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
* Account time for a transition between system, hard irq
* or soft irq state.
*/
-static u64 vtime_delta(struct task_struct *tsk,
- u64 *sys_scaled, u64 *stolen)
+static unsigned long vtime_delta(struct task_struct *tsk,
+ unsigned long *sys_scaled,
+ unsigned long *stolen)
{
- u64 now, nowscaled, deltascaled;
- u64 udelta, delta, user_scaled;
+ unsigned long now, nowscaled, deltascaled;
+ unsigned long udelta, delta, user_scaled;
+ struct cpu_accounting_data *acct = get_accounting(tsk);
WARN_ON_ONCE(!irqs_disabled());
now = mftb();
nowscaled = read_spurr(now);
- get_paca()->system_time += now - get_paca()->starttime;
- get_paca()->starttime = now;
- deltascaled = nowscaled - get_paca()->startspurr;
- get_paca()->startspurr = nowscaled;
+ acct->system_time += now - acct->starttime;
+ acct->starttime = now;
+ deltascaled = nowscaled - acct->startspurr;
+ acct->startspurr = nowscaled;
*stolen = calculate_stolen_time(now);
- delta = get_paca()->system_time;
- get_paca()->system_time = 0;
- udelta = get_paca()->user_time - get_paca()->utime_sspurr;
- get_paca()->utime_sspurr = get_paca()->user_time;
+ delta = acct->system_time;
+ acct->system_time = 0;
+ udelta = acct->user_time - acct->utime_sspurr;
+ acct->utime_sspurr = acct->user_time;
/*
* Because we don't read the SPURR on every kernel entry/exit,
@@ -337,14 +348,14 @@ static u64 vtime_delta(struct task_struct *tsk,
*sys_scaled = deltascaled;
}
}
- get_paca()->user_time_scaled += user_scaled;
+ acct->user_time_scaled += user_scaled;
return delta;
}
void vtime_account_system(struct task_struct *tsk)
{
- u64 delta, sys_scaled, stolen;
+ unsigned long delta, sys_scaled, stolen;
delta = vtime_delta(tsk, &sys_scaled, &stolen);
account_system_time(tsk, 0, delta, sys_scaled);
@@ -355,7 +366,7 @@ EXPORT_SYMBOL_GPL(vtime_account_system);
void vtime_account_idle(struct task_struct *tsk)
{
- u64 delta, sys_scaled, stolen;
+ unsigned long delta, sys_scaled, stolen;
delta = vtime_delta(tsk, &sys_scaled, &stolen);
account_idle_time(delta + stolen);
@@ -373,15 +384,32 @@ void vtime_account_idle(struct task_struct *tsk)
void vtime_account_user(struct task_struct *tsk)
{
cputime_t utime, utimescaled;
+ struct cpu_accounting_data *acct = get_accounting(tsk);
- utime = get_paca()->user_time;
- utimescaled = get_paca()->user_time_scaled;
- get_paca()->user_time = 0;
- get_paca()->user_time_scaled = 0;
- get_paca()->utime_sspurr = 0;
+ utime = acct->user_time;
+ utimescaled = acct->user_time_scaled;
+ acct->user_time = 0;
+ acct->user_time_scaled = 0;
+ acct->utime_sspurr = 0;
account_user_time(tsk, utime, utimescaled);
}
+#ifdef CONFIG_PPC32
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+void arch_vtime_task_switch(struct task_struct *prev)
+{
+ struct cpu_accounting_data *acct = get_accounting(current);
+
+ acct->starttime = get_accounting(prev)->starttime;
+ acct->system_time = 0;
+ acct->user_time = 0;
+}
+#endif /* CONFIG_PPC32 */
+
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#define calc_cputime_factors()
#endif
@@ -504,8 +532,8 @@ static void __timer_interrupt(void)
__this_cpu_inc(irq_stat.timer_irqs_event);
} else {
now = *next_tb - now;
- if (now <= DECREMENTER_MAX)
- set_dec((int)now);
+ if (now <= decrementer_max)
+ set_dec(now);
/* We may have raced with new irq work */
if (test_irq_work_pending())
set_dec(1);
@@ -535,7 +563,7 @@ void timer_interrupt(struct pt_regs * regs)
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions.
*/
- set_dec(DECREMENTER_MAX);
+ set_dec(decrementer_max);
/* Some implementations of hotplug will get timer interrupts while
* offline, just ignore these and we also need to set
@@ -583,9 +611,9 @@ static void generic_suspend_disable_irqs(void)
* with suspending.
*/
- set_dec(DECREMENTER_MAX);
+ set_dec(decrementer_max);
local_irq_disable();
- set_dec(DECREMENTER_MAX);
+ set_dec(decrementer_max);
}
static void generic_suspend_enable_irqs(void)
@@ -866,7 +894,7 @@ static int decrementer_set_next_event(unsigned long evt,
static int decrementer_shutdown(struct clock_event_device *dev)
{
- decrementer_set_next_event(DECREMENTER_MAX, dev);
+ decrementer_set_next_event(decrementer_max, dev);
return 0;
}
@@ -892,6 +920,49 @@ static void register_decrementer_clockevent(int cpu)
clockevents_register_device(dec);
}
+static void enable_large_decrementer(void)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
+ return;
+
+ /*
+ * If we're running as the hypervisor we need to enable the LD manually
+ * otherwise firmware should have done it for us.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
+}
+
+static void __init set_decrementer_max(void)
+{
+ struct device_node *cpu;
+ u32 bits = 32;
+
+ /* Prior to ISAv3 the decrementer is always 32 bit */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ cpu = of_find_node_by_type(NULL, "cpu");
+
+ if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
+ if (bits > 64 || bits < 32) {
+ pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
+ bits = 32;
+ }
+
+ /* calculate the signed maximum given this many bits */
+ decrementer_max = (1ul << (bits - 1)) - 1;
+ }
+
+ of_node_put(cpu);
+
+ pr_info("time_init: %u bit decrementer (max: %llx)\n",
+ bits, decrementer_max);
+}
+
static void __init init_decrementer_clockevent(void)
{
int cpu = smp_processor_id();
@@ -899,7 +970,7 @@ static void __init init_decrementer_clockevent(void)
clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
decrementer_clockevent.max_delta_ns =
- clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
+ clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
decrementer_clockevent.min_delta_ns =
clockevent_delta2ns(2, &decrementer_clockevent);
@@ -908,6 +979,9 @@ static void __init init_decrementer_clockevent(void)
void secondary_cpu_time_init(void)
{
+ /* Enable and test the large decrementer for this cpu */
+ enable_large_decrementer();
+
/* Start the decrementer on CPUs that have manual control
* such as BookE
*/
@@ -973,6 +1047,10 @@ void __init time_init(void)
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
+ /* initialise and enable the large decrementer (if we have one) */
+ set_decrementer_max();
+ enable_large_decrementer();
+
/* Start the decrementer on CPUs that have manual control
* such as BookE
*/
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index bf8f34a58670..298afcf3bf2a 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)
- /* We need to setup MSR for VSX register save instructions. Here we
- * also clear the MSR RI since when we do the treclaim, we won't have a
- * valid kernel pointer for a while. We clear RI here as it avoids
- * adding another mtmsr closer to the treclaim. This makes the region
- * maked as non-recoverable wider than it needs to be but it saves on
- * inserting another mtmsrd later.
- */
+ /* We need to setup MSR for VSX register save instructions. */
mfmsr r14
mr r15, r14
ori r15, r15, MSR_FP
- li r16, MSR_RI
+ li r16, 0
ori r16, r16, MSR_EE /* IRQs hard off */
andc r15, r15, r16
oris r15, r15, MSR_VEC@h
@@ -176,7 +170,17 @@ dont_backup_fp:
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
- /* The moment we treclaim, ALL of our GPRs will switch
+ /* Clear MSR RI since we are about to change r1, EE is already off. */
+ li r4, 0
+ mtmsrd r4, 1
+
+ /*
+ * BE CAREFUL HERE:
+ * At this point we can't take an SLB miss since we have MSR_RI
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
+ * until we turn MSR RI back on.
+ *
+ * The moment we treclaim, ALL of our GPRs will switch
* to user register state. (FPRs, CCR etc. also!)
* Use an sprg and a tm_scratch in the PACA to shuffle.
*/
@@ -197,6 +201,11 @@ dont_backup_fp:
/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+
+ /* Reset MSR RI so we can take SLB faults again */
+ li r11, MSR_RI
+ mtmsrd r11, 1
+
mfspr r11, SPRN_PPR
HMT_MEDIUM
@@ -329,8 +338,6 @@ _GLOBAL(__tm_recheckpoint)
*/
subi r7, r7, STACK_FRAME_OVERHEAD
- SET_SCRATCH0(r1)
-
mfmsr r6
/* R4 = original MSR to indicate whether thread used FP/Vector etc. */
@@ -397,11 +404,6 @@ restore_gprs:
ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3)
- /* Clear the MSR RI since we are about to change R1. EE is already off
- */
- li r4, 0
- mtmsrd r4, 1
-
REST_GPR(0, r7) /* GPR0 */
REST_2GPRS(2, r7) /* GPR2-3 */
REST_GPR(4, r7) /* GPR4 */
@@ -439,10 +441,34 @@ restore_gprs:
ld r6, _CCR(r7)
mtcr r6
- REST_GPR(1, r7) /* GPR1 */
- REST_GPR(5, r7) /* GPR5-7 */
REST_GPR(6, r7)
- ld r7, GPR7(r7)
+
+ /*
+ * Store r1 and r5 on the stack so that we can access them
+ * after we clear MSR RI.
+ */
+
+ REST_GPR(5, r7)
+ std r5, -8(r1)
+ ld r5, GPR1(r7)
+ std r5, -16(r1)
+
+ REST_GPR(7, r7)
+
+ /* Clear MSR RI since we are about to change r1. EE is already off */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /*
+ * BE CAREFUL HERE:
+ * At this point we can't take an SLB miss since we have MSR_RI
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
+ * until we turn MSR RI back on.
+ */
+
+ SET_SCRATCH0(r1)
+ ld r5, -8(r1)
+ ld r1, -16(r1)
/* Commit register state as checkpointed state: */
TRECHKPT
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9229ba63c370..f7e2f2e318bd 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -60,6 +60,7 @@
#include <asm/switch_to.h>
#include <asm/tm.h>
#include <asm/debug.h>
+#include <asm/asm-prototypes.h>
#include <sysdev/fsl_pci.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
@@ -1376,6 +1377,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
+ [FSCR_LM_LG] = "LM",
};
char *facility = "unknown";
u64 value;
@@ -1418,7 +1420,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
rd = (instword >> 21) & 0x1f;
current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
- mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ current->thread.fscr |= FSCR_DSCR;
+ mtspr(SPRN_FSCR, current->thread.fscr);
}
/* Read from DSCR (mfspr RT, 0x03) */
@@ -1432,6 +1435,14 @@ void facility_unavailable_exception(struct pt_regs *regs)
emulate_single_step(regs);
}
return;
+ } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * This process has touched LM, so turn it on forever
+ * for this process
+ */
+ current->thread.fscr |= FSCR_LM;
+ mtspr(SPRN_FSCR, current->thread.fscr);
+ return;
}
if ((status < ARRAY_SIZE(facility_strings)) &&
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 1c2e7a343bf5..616a6d854638 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -70,10 +70,11 @@ _GLOBAL(load_up_altivec)
MTMSRD(r5) /* enable use of AltiVec now */
isync
- /* Hack: if we get an altivec unavailable trap with VRSAVE
- * set to all zeros, we assume this is a broken application
- * that fails to set it properly, and thus we switch it to
- * all 1's
+ /*
+ * While userspace in general ignores VRSAVE, glibc uses it as a boolean
+ * to optimise userspace context save/restore. Whenever we take an
+ * altivec unavailable exception we must set VRSAVE to something non
+ * zero. Set it to all 1s. See also the programming note in the ISA.
*/
mfspr r4,SPRN_VRSAVE
cmpwi 0,r4,0
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 2dd91f79de05..b5fba689fca6 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -165,7 +165,7 @@ SECTIONS
. = ALIGN(8);
.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
{
-#ifdef CONFIG_RELOCATABLE_PPC32
+#ifdef CONFIG_PPC32
__dynamic_symtab = .;
#endif
*(.dynsym)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 114edace6cdd..a587e8f4fd26 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -34,9 +34,9 @@
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
- ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
- pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
- false);
+ mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
+ pte->pagesize, pte->pagesize,
+ MMU_SEGSIZE_256M, false);
}
/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
@@ -169,13 +169,13 @@ map_again:
/* In case we tried normal mapping already, let's nuke old entries */
if (attempt > 1)
- if (ppc_md.hpte_remove(hpteg) < 0) {
+ if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
r = -1;
goto out_unlock;
}
- ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
- hpsize, hpsize, MMU_SEGSIZE_256M);
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
+ hpsize, hpsize, MMU_SEGSIZE_256M);
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
@@ -187,8 +187,10 @@ map_again:
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte);
- /* The ppc_md code may give us a secondary entry even though we
- asked for a primary. Fix up. */
+ /*
+ * The mmu_hash_ops code may give us a secondary entry even
+ * though we asked for a primary. Fix up.
+ */
if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
hash = ~hash;
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 18cf6d1f8174..c379ff5a4438 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -242,7 +242,8 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
struct kvmppc_spapr_tce_table *stt;
long i, ret = H_SUCCESS, idx;
unsigned long entry, ua = 0;
- u64 __user *tces, tce;
+ u64 __user *tces;
+ u64 tce;
stt = kvmppc_find_table(vcpu, liobn);
if (!stt)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index e571ad277398..86f0cae37a85 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -392,7 +392,7 @@ kvm_no_guest:
cmpwi r3, 0
bne 54f
/*
- * We jump to power7_wakeup_loss, which will return to the caller
+ * We jump to pnv_wakeup_loss, which will return to the caller
* of power7_nap in the powernv cpu offline loop. The value we
* put in r3 becomes the return value for power7_nap.
*/
@@ -401,7 +401,7 @@ kvm_no_guest:
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
li r3, 0
- b power7_wakeup_loss
+ b pnv_wakeup_loss
53: HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index d044b8b7c69d..901e6fe00c39 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,7 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 8e4f64f0b774..c4f7d6b86b9e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -35,7 +35,7 @@
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/firmware.h>
-#include <asm/hvcall.h>
+#include <asm/setup.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
@@ -1690,7 +1690,7 @@ static int kvmppc_core_init_vm_pr(struct kvm *kvm)
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
if (++kvm_global_user_count == 1)
- pSeries_disable_reloc_on_exc();
+ pseries_disable_reloc_on_exc();
spin_unlock(&kvm_global_user_count_lock);
}
return 0;
@@ -1706,7 +1706,7 @@ static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
spin_lock(&kvm_global_user_count_lock);
BUG_ON(kvm_global_user_count == 0);
if (--kvm_global_user_count == 0)
- pSeries_enable_reloc_on_exc();
+ pseries_enable_reloc_on_exc();
spin_unlock(&kvm_global_user_count_lock);
}
}
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 16c4d88ba27d..42a4b237df5f 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,7 @@
#if defined(CONFIG_PPC_BOOK3S_64)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 8e6e51016cc5..fdec6e613e95 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -74,9 +74,9 @@ _GLOBAL(__csum_partial)
ld r11,24(r3)
/*
- * On POWER6 and POWER7 back to back addes take 2 cycles because of
- * the XER dependency. This means the fastest this loop can go is
- * 16 cycles per iteration. The scheduling of the loop below has
+ * On POWER6 and POWER7 back to back adde instructions take 2 cycles
+ * because of the XER dependency. This means the fastest this loop can
+ * go is 16 cycles per iteration. The scheduling of the loop below has
* been shown to hit this on both POWER6 and POWER7.
*/
.align 5
@@ -275,9 +275,9 @@ source; ld r10,16(r3)
source; ld r11,24(r3)
/*
- * On POWER6 and POWER7 back to back addes take 2 cycles because of
- * the XER dependency. This means the fastest this loop can go is
- * 16 cycles per iteration. The scheduling of the loop below has
+ * On POWER6 and POWER7 back to back adde instructions take 2 cycles
+ * because of the XER dependency. This means the fastest this loop can
+ * go is 16 cycles per iteration. The scheduling of the loop below has
* been shown to hit this on both POWER6 and POWER7.
*/
.align 5
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 7ce3870d7ddd..defb2998b818 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -20,7 +20,8 @@
#include <asm/code-patching.h>
#include <asm/page.h>
#include <asm/sections.h>
-
+#include <asm/setup.h>
+#include <asm/firmware.h>
struct fixup_entry {
unsigned long mask;
@@ -130,7 +131,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
-void do_final_fixups(void)
+static void do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
int *src, *dest;
@@ -151,6 +152,33 @@ void do_final_fixups(void)
#endif
}
+void apply_feature_fixups(void)
+{
+ struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec);
+
+ /*
+ * Apply the CPU-specific and firmware specific fixups to kernel text
+ * (nop out sections not relevant to this CPU or this firmware).
+ */
+ do_feature_fixups(spec->cpu_features,
+ PTRRELOC(&__start___ftr_fixup),
+ PTRRELOC(&__stop___ftr_fixup));
+
+ do_feature_fixups(spec->mmu_features,
+ PTRRELOC(&__start___mmu_ftr_fixup),
+ PTRRELOC(&__stop___mmu_ftr_fixup));
+
+ do_lwsync_fixups(spec->cpu_features,
+ PTRRELOC(&__start___lwsync_fixup),
+ PTRRELOC(&__stop___lwsync_fixup));
+
+#ifdef CONFIG_PPC64
+ do_feature_fixups(powerpc_firmware_features,
+ &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
+#endif
+ do_final_fixups();
+}
+
#ifdef CONFIG_FTR_FIXUP_SELFTEST
#define check(x) \
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebdf3365..b7b1237d4aa6 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -68,19 +68,3 @@ void __rw_yield(arch_rwlock_t *rw)
get_hard_smp_processor_id(holder_cpu), yield_count);
}
#endif
-
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_mb();
-
- while (lock->slock) {
- HMT_low();
- if (SHARED_PROCESSOR)
- __spin_yield(lock);
- }
- HMT_medium();
-
- smp_mb();
-}
-
-EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
index c422812f7405..ae69d846a841 100644
--- a/arch/powerpc/lib/ppc_ksyms.c
+++ b/arch/powerpc/lib/ppc_ksyms.c
@@ -9,11 +9,7 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
#ifndef CONFIG_GENERIC_CSUM
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index 69abf844c2c3..94058c21a482 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -325,7 +325,7 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
}
EXPORT_SYMBOL_GPL(rh_init);
-/* Attach a free memory region, coalesces regions if adjuscent */
+/* Attach a free memory region, coalesces regions if adjacent */
int rh_attach_region(rh_info_t * info, unsigned long start, int size)
{
rh_block_t *blk;
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index c80fb49ce607..beabc68d9a1e 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -16,15 +16,6 @@
PPC_LONG_ALIGN
.text
-_GLOBAL(strcpy)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r5)
- bne 1b
- blr
-
/* This clears out any unused part of the destination buffer,
just as the libc version does. -- paulus */
_GLOBAL(strncpy)
@@ -33,6 +24,7 @@ _GLOBAL(strncpy)
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
+ .balign 16
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
@@ -45,36 +37,13 @@ _GLOBAL(strncpy)
bdnz 2b
blr
-_GLOBAL(strcat)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r0,1(r5)
- cmpwi 0,r0,0
- bne 1b
- addi r5,r5,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r5)
- bne 1b
- blr
-
-_GLOBAL(strcmp)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r3,1(r5)
- cmpwi 1,r3,0
- lbzu r0,1(r4)
- subf. r3,r0,r3
- beqlr 1
- beq 1b
- blr
-
_GLOBAL(strncmp)
PPC_LCMPI 0,r5,0
beq- 2f
mtctr r5
addi r5,r3,-1
addi r4,r4,-1
+ .balign 16
1: lbzu r3,1(r5)
cmpwi 1,r3,0
lbzu r0,1(r4)
@@ -85,14 +54,6 @@ _GLOBAL(strncmp)
2: li r3,0
blr
-_GLOBAL(strlen)
- addi r4,r3,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- bne 1b
- subf r3,r3,r4
- blr
-
#ifdef CONFIG_PPC32
_GLOBAL(memcmp)
PPC_LCMPI 0,r5,0
@@ -114,6 +75,7 @@ _GLOBAL(memchr)
beq- 2f
mtctr r5
addi r3,r3,-1
+ .balign 16
1: lbzu r0,1(r3)
cmpw 0,r0,r4
bdnzf 2,1b
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index b27e030fc9f8..bf925cdcaca9 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <asm/switch_to.h>
+#include <asm/asm-prototypes.h>
int enter_vmx_usercopy(void)
{
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 949100577db5..6c5025e81236 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -13,62 +13,115 @@
*/
#include <linux/memblock.h>
+#include <asm/fixmap.h>
+#include <asm/code-patching.h>
#include "mmu_decl.h"
+#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
+
extern int __map_without_ltlbs;
+
/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ * Return PA for this VA if it is in IMMR area, or 0
*/
-void __init MMU_init_hw(void)
+phys_addr_t v_block_mapped(unsigned long va)
{
- /* Nothing to do for the time being but keep it similar to other PPC */
+ unsigned long p = PHYS_IMMR_BASE;
+
+ if (__map_without_ltlbs)
+ return 0;
+ if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
+ return p + va - VIRT_IMMR_BASE;
+ return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_block_mapped(phys_addr_t pa)
+{
+ unsigned long p = PHYS_IMMR_BASE;
+
+ if (__map_without_ltlbs)
+ return 0;
+ if (pa >= p && pa < p + IMMR_SIZE)
+ return VIRT_IMMR_BASE + pa - p;
+ return 0;
}
-#define LARGE_PAGE_SIZE_4M (1<<22)
#define LARGE_PAGE_SIZE_8M (1<<23)
-#define LARGE_PAGE_SIZE_64M (1<<26)
-unsigned long __init mmu_mapin_ram(unsigned long top)
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
{
- unsigned long v, s, mapped;
- phys_addr_t p;
+ /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
+#ifdef CONFIG_PIN_TLB
+ unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
+ unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY;
+#ifdef CONFIG_PIN_TLB_IMMR
+ int i = 29;
+#else
+ int i = 28;
+#endif
+ unsigned long addr = 0;
+ unsigned long mem = total_lowmem;
+
+ for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
+ mtspr(SPRN_MD_CTR, ctr | (i << 8));
+ mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
+ mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
+ mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
+ addr += LARGE_PAGE_SIZE_8M;
+ mem -= LARGE_PAGE_SIZE_8M;
+ }
+#endif
+}
- v = KERNELBASE;
- p = 0;
- s = top;
+static void mmu_mapin_immr(void)
+{
+ unsigned long p = PHYS_IMMR_BASE;
+ unsigned long v = VIRT_IMMR_BASE;
+ unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
+ int offset;
- if (__map_without_ltlbs)
- return 0;
+ for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
+ map_page(v + offset, p + offset, f);
+}
-#ifdef CONFIG_PPC_4K_PAGES
- while (s >= LARGE_PAGE_SIZE_8M) {
- pmd_t *pmdp;
- unsigned long val = p | MD_PS8MEG;
+/* Address of instructions to patch */
+#ifndef CONFIG_PIN_TLB_IMMR
+extern unsigned int DTLBMiss_jmp;
+#endif
+extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
- pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
- *pmdp++ = __pmd(val);
- *pmdp++ = __pmd(val + LARGE_PAGE_SIZE_4M);
+void mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
+{
+ unsigned int instr = *addr;
- v += LARGE_PAGE_SIZE_8M;
- p += LARGE_PAGE_SIZE_8M;
- s -= LARGE_PAGE_SIZE_8M;
- }
-#else /* CONFIG_PPC_16K_PAGES */
- while (s >= LARGE_PAGE_SIZE_64M) {
- pmd_t *pmdp;
- unsigned long val = p | MD_PS8MEG;
+ instr &= 0xffff0000;
+ instr |= (unsigned long)__va(mapped) >> 16;
+ patch_instruction(addr, instr);
+}
- pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
- *pmdp++ = __pmd(val);
+unsigned long __init mmu_mapin_ram(unsigned long top)
+{
+ unsigned long mapped;
- v += LARGE_PAGE_SIZE_64M;
- p += LARGE_PAGE_SIZE_64M;
- s -= LARGE_PAGE_SIZE_64M;
- }
+ if (__map_without_ltlbs) {
+ mapped = 0;
+ mmu_mapin_immr();
+#ifndef CONFIG_PIN_TLB_IMMR
+ patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
#endif
+ } else {
+ mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
+ }
- mapped = top - s;
+ mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped);
+ mmu_patch_cmp_limit(&FixupDAR_cmp, mapped);
/* If the size of RAM is not an exact power of two, we may not
* have covered RAM in its entirety with 8 MiB
@@ -77,7 +130,8 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
* coverage with normal-sized pages (or other reasons) do not
* attempt to allocate outside the allowed range.
*/
- memblock_set_current_limit(mapped);
+ if (mapped)
+ memblock_set_current_limit(mapped);
return mapped;
}
@@ -90,13 +144,8 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
*/
BUG_ON(first_memblock_base != 0);
-#ifdef CONFIG_PIN_TLB
/* 8xx can only access 24MB at the moment */
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000));
-#else
- /* 8xx can only access 8MB at the moment */
- memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
-#endif
}
/*
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 6527882ce05e..bb0354222b11 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -75,7 +75,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
}
ret = 0;
- *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
+ *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a67c6d781c52..a4db22f65021 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -429,7 +429,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 6333b273d2d5..42c702b3be1f 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -70,8 +70,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
- if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K,
- MMU_PAGE_4K, ssize, flags) == -1)
+ if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K,
+ MMU_PAGE_4K, ssize, flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
@@ -84,21 +84,23 @@ repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ MMU_PAGE_4K, MMU_PAGE_4K, ssize);
/*
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
- rflags, HPTE_V_SECONDARY,
- MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
+ rflags,
+ HPTE_V_SECONDARY,
+ MMU_PAGE_4K,
+ MMU_PAGE_4K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
*/
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 16644e1f4e6b..3bbbea07378c 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -133,9 +133,9 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
- MMU_PAGE_4K, MMU_PAGE_4K,
- ssize, flags);
+ ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
+ MMU_PAGE_4K, MMU_PAGE_4K,
+ ssize, flags);
/*
*if we failed because typically the HPTE wasn't really here
* we try an insertion.
@@ -166,21 +166,22 @@ repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ MMU_PAGE_4K, MMU_PAGE_4K, ssize);
/*
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
- rflags, HPTE_V_SECONDARY,
- MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
+ rflags, HPTE_V_SECONDARY,
+ MMU_PAGE_4K, MMU_PAGE_4K,
+ ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
*/
@@ -272,8 +273,9 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
- if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
- MMU_PAGE_64K, ssize, flags) == -1)
+ if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
+ MMU_PAGE_64K, ssize,
+ flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
@@ -286,21 +288,24 @@ repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- MMU_PAGE_64K, MMU_PAGE_64K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ MMU_PAGE_64K, MMU_PAGE_64K,
+ ssize);
/*
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
- rflags, HPTE_V_SECONDARY,
- MMU_PAGE_64K, MMU_PAGE_64K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
+ rflags,
+ HPTE_V_SECONDARY,
+ MMU_PAGE_64K,
+ MMU_PAGE_64K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
*/
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index d873f6507f72..88ce7d212320 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -55,7 +55,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
* We need 14 to 65 bits of va for a tlibe of 4K page
* With vpn we ignore the lower VPN_SHIFT bits already.
* And top two bits are already ignored because we can
- * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
+ * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
* of 12.
*/
va = vpn << VPN_SHIFT;
@@ -64,7 +64,8 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
* Older versions of the architecture (2.02 and earler) require the
* masking of the top 16 bits.
*/
- va &= ~(0xffffULL << 48);
+ if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
+ va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
@@ -113,7 +114,8 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
* Older versions of the architecture (2.02 and earler) require the
* masking of the top 16 bits.
*/
- va &= ~(0xffffULL << 48);
+ if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
+ va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
@@ -316,8 +318,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" -> hit\n");
/* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
- ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N |
+ ~(HPTE_R_PPP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PPP | HPTE_R_N |
HPTE_R_C)));
}
native_unlock_hpte(hptep);
@@ -385,8 +387,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
/* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
- ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N)));
+ ~(HPTE_R_PPP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PPP | HPTE_R_N)));
/*
* Ensure it is out of the tlb too. Bolted entries base and
* actual page size will be same.
@@ -550,7 +552,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
}
}
/* This works for all page sizes, and for 256M and 1T segments */
- *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
+ else
+ *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+
shift = mmu_psize_defs[size].shift;
avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
@@ -601,7 +607,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
* crashdump and all bets are off anyway.
*
* TODO: add batching support when enabled. remember, no dynamic memory here,
- * athough there is the control page available...
+ * although there is the control page available...
*/
static void native_hpte_clear(void)
{
@@ -719,23 +725,29 @@ static void native_flush_hash_range(unsigned long number, int local)
local_irq_restore(flags);
}
-static int native_update_partition_table(u64 patb1)
+static int native_register_proc_table(unsigned long base, unsigned long page_size,
+ unsigned long table_size)
{
+ unsigned long patb1 = base << 25; /* VSID */
+
+ patb1 |= (page_size << 5); /* sllp */
+ patb1 |= table_size;
+
partition_tb->patb1 = cpu_to_be64(patb1);
return 0;
}
void __init hpte_init_native(void)
{
- ppc_md.hpte_invalidate = native_hpte_invalidate;
- ppc_md.hpte_updatepp = native_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
- ppc_md.hpte_insert = native_hpte_insert;
- ppc_md.hpte_remove = native_hpte_remove;
- ppc_md.hpte_clear_all = native_hpte_clear;
- ppc_md.flush_hash_range = native_flush_hash_range;
- ppc_md.hugepage_invalidate = native_hugepage_invalidate;
+ mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
+ mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
+ mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
+ mmu_hash_ops.hpte_insert = native_hpte_insert;
+ mmu_hash_ops.hpte_remove = native_hpte_remove;
+ mmu_hash_ops.hpte_clear_all = native_hpte_clear;
+ mmu_hash_ops.flush_hash_range = native_flush_hash_range;
+ mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
if (cpu_has_feature(CPU_FTR_ARCH_300))
- ppc_md.update_partition_table = native_update_partition_table;
+ ppc_md.register_process_table = native_register_proc_table;
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 59268969a0bc..b78b5d211278 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -34,6 +34,7 @@
#include <linux/signal.h>
#include <linux/memblock.h>
#include <linux/context_tracking.h>
+#include <linux/libfdt.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
@@ -58,6 +59,7 @@
#include <asm/firmware.h>
#include <asm/tm.h>
#include <asm/trace.h>
+#include <asm/ps3.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -87,10 +89,6 @@
*
*/
-#ifdef CONFIG_U3_DART
-extern unsigned long dart_tablebase;
-#endif /* CONFIG_U3_DART */
-
static unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
EXPORT_SYMBOL_GPL(mmu_psize_defs);
@@ -120,6 +118,8 @@ static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
static DEFINE_SPINLOCK(linear_map_hash_lock);
#endif /* CONFIG_DEBUG_PAGEALLOC */
+struct mmu_hash_ops mmu_hash_ops;
+EXPORT_SYMBOL(mmu_hash_ops);
/* There are definitions of page sizes arrays to be used when none
* is provided by the firmware.
@@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
},
};
+/*
+ * 'R' and 'C' update notes:
+ * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
+ * create writeable HPTEs without C set, because the hcall H_PROTECT
+ * that we use in that case will not update C
+ * - The above is however not a problem, because we also don't do that
+ * fancy "no flush" variant of eviction and we use H_REMOVE which will
+ * do the right thing and thus we don't have the race I described earlier
+ *
+ * - Under bare metal, we do have the race, so we need R and C set
+ * - We make sure R is always set and never lost
+ * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
+ */
unsigned long htab_convert_pte_flags(unsigned long pteflags)
{
unsigned long rflags = 0;
@@ -186,19 +199,28 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
rflags |= 0x1;
}
/*
- * Always add "C" bit for perf. Memory coherence is always enabled
+ * We can't allow hardware to update hpte bits. Hence always
+ * set 'R' bit and set 'C' if it is a write fault
*/
- rflags |= HPTE_R_C | HPTE_R_M;
+ rflags |= HPTE_R_R;
+
+ if (pteflags & _PAGE_DIRTY)
+ rflags |= HPTE_R_C;
/*
* Add in WIG bits
*/
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
rflags |= HPTE_R_I;
- if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
rflags |= (HPTE_R_I | HPTE_R_G);
- if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
- rflags |= (HPTE_R_I | HPTE_R_W);
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+ rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
+ else
+ /*
+ * Add memory coherence if cache inhibited is not set
+ */
+ rflags |= HPTE_R_M;
return rflags;
}
@@ -256,9 +278,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- BUG_ON(!ppc_md.hpte_insert);
- ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
- HPTE_V_BOLTED, psize, psize, ssize);
+ BUG_ON(!mmu_hash_ops.hpte_insert);
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
+ HPTE_V_BOLTED, psize, psize,
+ ssize);
if (ret < 0)
break;
@@ -283,11 +306,11 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
shift = mmu_psize_defs[psize].shift;
step = 1 << shift;
- if (!ppc_md.hpte_removebolted)
+ if (!mmu_hash_ops.hpte_removebolted)
return -ENODEV;
for (vaddr = vstart; vaddr < vend; vaddr += step) {
- rc = ppc_md.hpte_removebolted(vaddr, psize, ssize);
+ rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
if (rc == -ENOENT) {
ret = -ENOENT;
continue;
@@ -299,6 +322,15 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
return ret;
}
+static bool disable_1tb_segments = false;
+
+static int __init parse_disable_1tb_segments(char *p)
+{
+ disable_1tb_segments = true;
+ return 0;
+}
+early_param("disable_1tb_segments", parse_disable_1tb_segments);
+
static int __init htab_dt_scan_seg_sizes(unsigned long node,
const char *uname, int depth,
void *data)
@@ -317,6 +349,12 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
for (; size >= 4; size -= 4, ++prop) {
if (be32_to_cpu(prop[0]) == 40) {
DBG("1T segment support detected\n");
+
+ if (disable_1tb_segments) {
+ DBG("1T segments disabled by command line\n");
+ break;
+ }
+
cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
return 1;
}
@@ -492,7 +530,8 @@ static bool might_have_hea(void)
* we will never see an HEA ethernet device.
*/
#ifdef CONFIG_IBMEBUS
- return !cpu_has_feature(CPU_FTR_ARCH_207S);
+ return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ !firmware_has_feature(FW_FEATURE_SPLPAR);
#else
return false;
#endif
@@ -558,7 +597,7 @@ found:
* would stop us accessing the HEA ethernet. So if we
* have the chance of ever seeing one, stay at 4k.
*/
- if (!might_have_hea() || !machine_is(pseries))
+ if (!might_have_hea())
mmu_io_psize = MMU_PAGE_64K;
} else
mmu_ci_restrictions = 1;
@@ -677,10 +716,9 @@ int remove_section_mapping(unsigned long start, unsigned long end)
#endif /* CONFIG_MEMORY_HOTPLUG */
static void __init hash_init_partition_table(phys_addr_t hash_table,
- unsigned long pteg_count)
+ unsigned long htab_size)
{
unsigned long ps_field;
- unsigned long htab_size;
unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
/*
@@ -688,7 +726,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
* We can ignore that for lpid 0
*/
ps_field = 0;
- htab_size = __ilog2(pteg_count) - 11;
+ htab_size = __ilog2(htab_size) - 18;
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
@@ -702,7 +740,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
* For now UPRT is 0 for us.
*/
partition_tb->patb1 = 0;
- DBG("Partition table %p\n", partition_tb);
+ pr_info("Partition table %p\n", partition_tb);
/*
* update partition table control register,
* 64 K size.
@@ -716,7 +754,7 @@ static void __init htab_initialize(void)
unsigned long table;
unsigned long pteg_count;
unsigned long prot;
- unsigned long base = 0, size = 0, limit;
+ unsigned long base = 0, size = 0;
struct memblock_region *reg;
DBG(" -> htab_initialize()\n");
@@ -742,7 +780,8 @@ static void __init htab_initialize(void)
htab_hash_mask = pteg_count - 1;
- if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR) ||
+ firmware_has_feature(FW_FEATURE_PS3_LV1)) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
_SDR1 = 0;
@@ -753,20 +792,26 @@ static void __init htab_initialize(void)
* Clear the htab if firmware assisted dump is active so
* that we dont end up using old mappings.
*/
- if (is_fadump_active() && ppc_md.hpte_clear_all)
- ppc_md.hpte_clear_all();
+ if (is_fadump_active() && mmu_hash_ops.hpte_clear_all)
+ mmu_hash_ops.hpte_clear_all();
#endif
} else {
- /* Find storage for the HPT. Must be contiguous in
- * the absolute address space. On cell we want it to be
- * in the first 2 Gig so we can use it for IOMMU hacks.
+ unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
+
+#ifdef CONFIG_PPC_CELL
+ /*
+ * Cell may require the hash table down low when using the
+ * Axon IOMMU in order to fit the dynamic region over it, see
+ * comments in cell/iommu.c
*/
- if (machine_is(cell))
+ if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) {
limit = 0x80000000;
- else
- limit = MEMBLOCK_ALLOC_ANYWHERE;
+ pr_info("Hash table forced below 2G for Axon IOMMU\n");
+ }
+#endif /* CONFIG_PPC_CELL */
- table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
+ table = memblock_alloc_base(htab_size_bytes, htab_size_bytes,
+ limit);
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
@@ -774,7 +819,7 @@ static void __init htab_initialize(void)
htab_address = __va(table);
/* htab absolute addr + encoded htabsize */
- _SDR1 = table + __ilog2(pteg_count) - 11;
+ _SDR1 = table + __ilog2(htab_size_bytes) - 18;
/* Initialize the HPT with no entries */
memset((void *)table, 0, htab_size_bytes);
@@ -783,7 +828,7 @@ static void __init htab_initialize(void)
/* Set SDR1 */
mtspr(SPRN_SDR1, _SDR1);
else
- hash_init_partition_table(table, pteg_count);
+ hash_init_partition_table(table, htab_size_bytes);
}
prot = pgprot_val(PAGE_KERNEL);
@@ -810,34 +855,6 @@ static void __init htab_initialize(void)
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
base, size, prot);
-#ifdef CONFIG_U3_DART
- /* Do not map the DART space. Fortunately, it will be aligned
- * in such a way that it will not cross two memblock regions and
- * will fit within a single 16Mb page.
- * The DART space is assumed to be a full 16Mb region even if
- * we only use 2Mb of that space. We will use more of it later
- * for AGP GART. We have to use a full 16Mb large page.
- */
- DBG("DART base: %lx\n", dart_tablebase);
-
- if (dart_tablebase != 0 && dart_tablebase >= base
- && dart_tablebase < (base + size)) {
- unsigned long dart_table_end = dart_tablebase + 16 * MB;
- if (base != dart_tablebase)
- BUG_ON(htab_bolt_mapping(base, dart_tablebase,
- __pa(base), prot,
- mmu_linear_psize,
- mmu_kernel_ssize));
- if ((base + size) > dart_table_end)
- BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
- base + size,
- __pa(dart_table_end),
- prot,
- mmu_linear_psize,
- mmu_kernel_ssize));
- continue;
- }
-#endif /* CONFIG_U3_DART */
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
prot, mmu_linear_psize, mmu_kernel_ssize));
}
@@ -900,12 +917,28 @@ void __init hash__early_init_mmu(void)
vmemmap = (struct page *)H_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
+#ifdef CONFIG_PCI
+ pci_io_base = ISA_IO_BASE;
+#endif
+
+ /* Select appropriate backend */
+ if (firmware_has_feature(FW_FEATURE_PS3_LV1))
+ ps3_early_mm_init();
+ else if (firmware_has_feature(FW_FEATURE_LPAR))
+ hpte_init_pseries();
+ else if (IS_ENABLED(CONFIG_PPC_NATIVE))
+ hpte_init_native();
+
+ if (!mmu_hash_ops.hpte_insert)
+ panic("hash__early_init_mmu: No MMU hash ops defined!\n");
+
/* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
*/
htab_initialize();
+ pr_info("Initializing hash mmu with SLB\n");
/* Initialize SLB management */
slb_initialize();
}
@@ -1448,7 +1481,8 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
* We use same base page size and actual psize, because we don't
* use these functions for hugepage
*/
- ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);
+ mmu_hash_ops.hpte_invalidate(slot, vpn, psize, psize,
+ ssize, local);
} pte_iterate_hashed_end();
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -1489,9 +1523,9 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
if (!hpte_slot_array)
return;
- if (ppc_md.hugepage_invalidate) {
- ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
- psize, ssize, local);
+ if (mmu_hash_ops.hugepage_invalidate) {
+ mmu_hash_ops.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
+ psize, ssize, local);
goto tm_abort;
}
/*
@@ -1518,8 +1552,8 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, vpn, psize,
- MMU_PAGE_16M, ssize, local);
+ mmu_hash_ops.hpte_invalidate(slot, vpn, psize,
+ MMU_PAGE_16M, ssize, local);
}
tm_abort:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -1543,8 +1577,8 @@ tm_abort:
void flush_hash_range(unsigned long number, int local)
{
- if (ppc_md.flush_hash_range)
- ppc_md.flush_hash_range(number, local);
+ if (mmu_hash_ops.flush_hash_range)
+ mmu_hash_ops.flush_hash_range(number, local);
else {
int i;
struct ppc64_tlb_batch *batch =
@@ -1589,22 +1623,22 @@ repeat:
HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
- psize, psize, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
+ psize, psize, ssize);
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
- vflags | HPTE_V_SECONDARY,
- psize, psize, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags,
+ vflags | HPTE_V_SECONDARY,
+ psize, psize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP)&~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
}
}
@@ -1654,8 +1688,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize,
- mmu_kernel_ssize, 0);
+ mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize,
+ mmu_linear_psize,
+ mmu_kernel_ssize, 0);
}
void __kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index ba3fc229468a..f20d16f849c5 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -103,8 +103,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
- psize, lpsize, ssize, flags);
+ ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
+ psize, lpsize, ssize, flags);
/*
* We failed to update, try to insert a new entry.
*/
@@ -131,23 +131,24 @@ repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- psize, lpsize, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ psize, lpsize, ssize);
/*
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
- rflags, HPTE_V_SECONDARY,
- psize, lpsize, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
+ rflags,
+ HPTE_V_SECONDARY,
+ psize, lpsize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
}
}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 3058560b6121..d5026f3800b6 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -79,8 +79,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
- if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
- mmu_psize, ssize, flags) == -1)
+ if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, mmu_psize,
+ mmu_psize, ssize, flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5aac1a3f86cd..7372ee13eb1e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
cachep = PGT_CACHE(pdshift - pshift);
#endif
- new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
+ new = kmem_cache_zalloc(cachep, GFP_KERNEL);
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
@@ -81,6 +81,13 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (! new)
return -ENOMEM;
+ /*
+ * Make sure other cpus find the hugepd set only after a
+ * properly initialized page table is visible to them.
+ * For more details look for comment in __pte_alloc().
+ */
+ smp_wmb();
+
spin_lock(&mm->page_table_lock);
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index c899fe340bbd..448685fbf27c 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL(kernstart_addr);
-#ifdef CONFIG_RELOCATABLE_PPC32
+#ifdef CONFIG_RELOCATABLE
/* Used in __va()/__pa() */
long long virt_phys_offset;
EXPORT_SYMBOL(virt_phys_offset);
@@ -80,9 +80,6 @@ EXPORT_SYMBOL(agp_special_page);
void MMU_init(void);
-/* XXX should be in current.h -- paulus */
-extern struct task_struct *current_set[NR_CPUS];
-
/*
* this tells the system to map all of ram with the segregs
* (i.e. page tables) instead of the bats.
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2fd57fa48429..5f844337de21 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -116,6 +116,16 @@ int memory_add_physaddr_to_nid(u64 start)
}
#endif
+int __weak create_section_mapping(unsigned long start, unsigned long end)
+{
+ return -ENODEV;
+}
+
+int __weak remove_section_mapping(unsigned long start, unsigned long end)
+{
+ return -ENODEV;
+}
+
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
struct pglist_data *pgdata;
@@ -239,8 +249,14 @@ static int __init mark_nonram_nosave(void)
static bool zone_limits_final;
+/*
+ * The memory zones past TOP_ZONE are managed by generic mm code.
+ * These should be set to zero since that's what every other
+ * architecture does.
+ */
static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
- [0 ... MAX_NR_ZONES - 1] = ~0UL
+ [0 ... TOP_ZONE ] = ~0UL,
+ [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
};
/*
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 227b2a6c4544..b114f8b93ec9 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index)
/*
* set the process table entry,
*/
- rts_field = 3ull << PPC_BITLSHIFT(2);
+ rts_field = radix__get_tree_size();
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
return 0;
}
@@ -181,7 +181,10 @@ void destroy_context(struct mm_struct *mm)
#ifdef CONFIG_PPC_RADIX_MMU
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
- mtspr(SPRN_PID, next->context.id);
asm volatile("isync": : :"memory");
+ mtspr(SPRN_PID, next->context.id);
+ asm volatile("isync \n"
+ PPC_SLBIA(0x7)
+ : : :"memory");
}
#endif
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 6af65327c993..f988db655e5b 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -154,9 +154,10 @@ struct tlbcam {
};
#endif
-#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE)
+#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx)
/* 6xx have BATS */
/* FSL_BOOKE have TLBCAM */
+/* 8xx have LTLB */
phys_addr_t v_block_mapped(unsigned long va);
unsigned long p_block_mapped(phys_addr_t pa);
#else
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 669a15e7fa76..75b9cd6150cc 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -581,30 +581,22 @@ static void verify_cpu_node_mapping(int cpu, int node)
}
}
-static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+/* Must run before sched domains notifier. */
+static int ppc_numa_cpu_prepare(unsigned int cpu)
{
- unsigned long lcpu = (unsigned long)hcpu;
- int ret = NOTIFY_DONE, nid;
+ int nid;
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- nid = numa_setup_cpu(lcpu);
- verify_cpu_node_mapping((int)lcpu, nid);
- ret = NOTIFY_OK;
- break;
+ nid = numa_setup_cpu(cpu);
+ verify_cpu_node_mapping(cpu, nid);
+ return 0;
+}
+
+static int ppc_numa_cpu_dead(unsigned int cpu)
+{
#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- unmap_cpu_from_node(lcpu);
- ret = NOTIFY_OK;
- break;
+ unmap_cpu_from_node(cpu);
#endif
- }
- return ret;
+ return 0;
}
/*
@@ -913,11 +905,6 @@ static void __init dump_numa_memory_topology(void)
}
}
-static struct notifier_block ppc64_numa_nb = {
- .notifier_call = cpu_numa_callback,
- .priority = 1 /* Must run before sched domains notifier. */
-};
-
/* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
@@ -985,15 +972,18 @@ void __init initmem_init(void)
setup_node_to_cpumask_map();
reset_numa_cpu_lookup_table();
- register_cpu_notifier(&ppc64_numa_nb);
+
/*
* We need the numa_cpu_lookup_table to be accurate for all CPUs,
* even before we online them, so that we can use cpu_to_{node,mem}
* early in boot, cf. smp_prepare_cpus().
+ * _nocalls() + manual invocation is used because cpuhp is not yet
+ * initialized for the boot CPU.
*/
- for_each_present_cpu(cpu) {
- numa_setup_cpu((unsigned long)cpu);
- }
+ cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE",
+ ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
+ for_each_present_cpu(cpu)
+ numa_setup_cpu(cpu);
}
static int __init early_numa(char *p)
@@ -1163,18 +1153,34 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
static u64 hot_add_drconf_memory_max(void)
{
- struct device_node *memory = NULL;
- unsigned int drconf_cell_cnt = 0;
- u64 lmb_size = 0;
+ struct device_node *memory = NULL;
+ struct device_node *dn = NULL;
+ unsigned int drconf_cell_cnt = 0;
+ u64 lmb_size = 0;
const __be32 *dm = NULL;
+ const __be64 *lrdr = NULL;
+ struct of_drconf_cell drmem;
+
+ dn = of_find_node_by_path("/rtas");
+ if (dn) {
+ lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
+ of_node_put(dn);
+ if (lrdr)
+ return be64_to_cpup(lrdr);
+ }
- memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
- if (memory) {
- drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
- lmb_size = of_get_lmb_size(memory);
- of_node_put(memory);
- }
- return lmb_size * drconf_cell_cnt;
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory) {
+ drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
+ lmb_size = of_get_lmb_size(memory);
+
+ /* Advance to the last cell, each cell has 6 32 bit integers */
+ dm += (drconf_cell_cnt - 1) * 6;
+ read_drconf_cell(&drmem, &dm);
+ of_node_put(memory);
+ return drmem.base_addr + lmb_size;
+ }
+ return 0;
}
/*
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index eb4451144746..670318766545 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
changed = !pmd_same(*(pmdp), entry);
if (changed) {
__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
- /*
- * Since we are not supporting SW TLB systems, we don't
- * have any thing similar to flush_tlb_page_nohash()
- */
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
}
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 18b2c11604fa..003ff48a11b6 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -21,8 +21,11 @@
#include <trace/events/thp.h>
-static int native_update_partition_table(u64 patb1)
+static int native_register_process_table(unsigned long base, unsigned long pg_sz,
+ unsigned long table_size)
{
+ unsigned long patb1 = base | table_size | PATB_GR;
+
partition_tb->patb1 = cpu_to_be64(patb1);
return 0;
}
@@ -160,32 +163,30 @@ redo:
process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
/*
* Fill in the process table.
- * we support 52 bits, hence 52-28 = 24, 11000
*/
- rts_field = 3ull << PPC_BITLSHIFT(2);
+ rts_field = radix__get_tree_size();
process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
/*
* Fill in the partition table. We are suppose to use effective address
* of process table here. But our linear mapping also enable us to use
* physical address here.
*/
- ppc_md.update_partition_table(__pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR);
+ ppc_md.register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
}
static void __init radix_init_partition_table(void)
{
unsigned long rts_field;
- /*
- * we support 52 bits, hence 52-28 = 24, 11000
- */
- rts_field = 3ull << PPC_BITLSHIFT(2);
+
+ rts_field = radix__get_tree_size();
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
RADIX_PGD_INDEX_SIZE | PATB_HR);
- printk("Partition table %p\n", partition_tb);
+ pr_info("Initializing Radix MMU\n");
+ pr_info("Partition table %p\n", partition_tb);
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
/*
@@ -197,7 +198,7 @@ static void __init radix_init_partition_table(void)
void __init radix_init_native(void)
{
- ppc_md.update_partition_table = native_update_partition_table;
+ ppc_md.register_process_table = native_register_process_table;
}
static int __init get_idx_from_shift(unsigned int shift)
@@ -296,11 +297,6 @@ found:
void __init radix__early_init_mmu(void)
{
unsigned long lpcr;
- /*
- * setup LPCR UPRT based on mmu_features
- */
- lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
#ifdef CONFIG_PPC_64K_PAGES
/* PAGE_SIZE mappings */
@@ -336,6 +332,11 @@ void __init radix__early_init_mmu(void)
__vmalloc_end = RADIX_VMALLOC_END;
vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
+
+#ifdef CONFIG_PCI
+ pci_io_base = ISA_IO_BASE;
+#endif
+
/*
* For now radix also use the same frag size
*/
@@ -343,8 +344,12 @@ void __init radix__early_init_mmu(void)
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
radix_init_page_sizes();
- if (!firmware_has_feature(FW_FEATURE_LPAR))
+ if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+ radix_init_native();
+ lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table();
+ }
radix_init_pgtable();
}
@@ -353,16 +358,15 @@ void radix__early_init_mmu_secondary(void)
{
unsigned long lpcr;
/*
- * setup LPCR UPRT based on mmu_features
+ * update partition table control register and UPRT
*/
- lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
- /*
- * update partition table control register, 64 K size.
- */
- if (!firmware_has_feature(FW_FEATURE_LPAR))
+ if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+ lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
+
mtspr(SPRN_PTCR,
__pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+ }
}
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index bf7bf32b54f8..7f922f557936 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
pte_t *pte;
if (slab_is_available()) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
} else {
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte)
@@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
- gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO;
ptepage = alloc_pages(flags, 0);
if (!ptepage)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e009e0604a8a..f5e8d4edb808 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm)
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
if (!page)
return NULL;
if (!kernel && !pgtable_page_ctor(page)) {
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 0fdaf93a3e09..e1f22700fb16 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -12,26 +12,30 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/memblock.h>
+#include <asm/ppc-opcode.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void __tlbiel_pid(unsigned long pid, int set)
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
+static inline void __tlbiel_pid(unsigned long pid, int set,
+ unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */
rb |= set << PPC_BITLSHIFT(51);
rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory");
- asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
- "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
}
@@ -39,67 +43,61 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static inline void _tlbiel_pid(unsigned long pid)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
- __tlbiel_pid(pid, set);
+ __tlbiel_pid(pid, set, ric);
}
return;
}
-static inline void _tlbie_pid(unsigned long pid)
+static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory");
- asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
- "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
static inline void _tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long ap)
+ unsigned long ap, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory");
- asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
- "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,
- unsigned long ap)
+ unsigned long ap, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory");
- asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
- "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
@@ -117,25 +115,40 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
*/
void radix__local_flush_tlb_mm(struct mm_struct *mm)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm->context.id;
if (pid != MMU_NO_CONTEXT)
- _tlbiel_pid(pid);
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
preempt_enable();
}
EXPORT_SYMBOL(radix__local_flush_tlb_mm);
+void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+ unsigned long pid;
+ struct mm_struct *mm = tlb->mm;
+
+ preempt_disable();
+
+ pid = mm->context.id;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbiel_pid(pid, RIC_FLUSH_PWC);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
+
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm ? mm->context.id : 0;
if (pid != MMU_NO_CONTEXT)
- _tlbiel_va(vmaddr, pid, ap);
+ _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
preempt_enable();
}
@@ -160,7 +173,7 @@ static int mm_is_core_local(struct mm_struct *mm)
void radix__flush_tlb_mm(struct mm_struct *mm)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm->context.id;
@@ -172,20 +185,46 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_pid(pid);
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
- _tlbiel_pid(pid);
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context:
preempt_enable();
}
EXPORT_SYMBOL(radix__flush_tlb_mm);
+void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+ unsigned long pid;
+ struct mm_struct *mm = tlb->mm;
+
+ preempt_disable();
+
+ pid = mm->context.id;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+
+ if (!mm_is_core_local(mm)) {
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+ if (lock_tlbie)
+ raw_spin_lock(&native_tlbie_lock);
+ _tlbie_pid(pid, RIC_FLUSH_PWC);
+ if (lock_tlbie)
+ raw_spin_unlock(&native_tlbie_lock);
+ } else
+ _tlbiel_pid(pid, RIC_FLUSH_PWC);
+no_context:
+ preempt_enable();
+}
+EXPORT_SYMBOL(radix__flush_tlb_pwc);
+
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm ? mm->context.id : 0;
@@ -196,11 +235,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_va(vmaddr, pid, ap);
+ _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
- _tlbiel_va(vmaddr, pid, ap);
+ _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail:
preempt_enable();
}
@@ -224,7 +263,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_pid(0);
+ _tlbie_pid(0, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
}
@@ -243,9 +282,61 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
EXPORT_SYMBOL(radix__flush_tlb_range);
+static int radix_get_mmu_psize(int page_size)
+{
+ int psize;
+
+ if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
+ psize = mmu_virtual_psize;
+ else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
+ psize = MMU_PAGE_2M;
+ else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
+ psize = MMU_PAGE_1G;
+ else
+ return -1;
+ return psize;
+}
void radix__tlb_flush(struct mmu_gather *tlb)
{
struct mm_struct *mm = tlb->mm;
radix__flush_tlb_mm(mm);
}
+
+void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
+ unsigned long page_size)
+{
+ unsigned long rb,rs,prs,r;
+ unsigned long ap;
+ unsigned long ric = RIC_FLUSH_TLB;
+
+ ap = mmu_get_ap(radix_get_mmu_psize(page_size));
+ rb = gpa & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = lpid & ((1UL << 32) - 1);
+ prs = 0; /* process scoped */
+ r = 1; /* raidx format */
+
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
+}
+EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
+
+void radix__flush_tlb_lpid(unsigned long lpid)
+{
+ unsigned long rb,rs,prs,r;
+ unsigned long ric = RIC_FLUSH_ALL;
+
+ rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
+ rs = lpid & ((1UL << 32) - 1);
+ prs = 0; /* partition scoped */
+ r = 1; /* raidx format */
+
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
+}
+EXPORT_SYMBOL(radix__flush_tlb_lpid);
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index 1306a58ac541..c1ff16a6eb51 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -1,4 +1,8 @@
#
# Arch-specific network modules
#
+ifeq ($(CONFIG_PPC64),y)
+obj-$(CONFIG_BPF_JIT) += bpf_jit_asm64.o bpf_jit_comp64.o
+else
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
+endif
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 889fd199a821..d5301b6f20d0 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -1,6 +1,8 @@
-/* bpf_jit.h: BPF JIT compiler for PPC64
+/*
+ * bpf_jit.h: BPF JIT compiler for PPC
*
* Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ * 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -10,67 +12,11 @@
#ifndef _BPF_JIT_H
#define _BPF_JIT_H
-#ifdef CONFIG_PPC64
-#define BPF_PPC_STACK_R3_OFF 48
-#define BPF_PPC_STACK_LOCALS 32
-#define BPF_PPC_STACK_BASIC (48+64)
-#define BPF_PPC_STACK_SAVE (18*8)
-#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
- BPF_PPC_STACK_SAVE)
-#define BPF_PPC_SLOWPATH_FRAME (48+64)
-#else
-#define BPF_PPC_STACK_R3_OFF 24
-#define BPF_PPC_STACK_LOCALS 16
-#define BPF_PPC_STACK_BASIC (24+32)
-#define BPF_PPC_STACK_SAVE (18*4)
-#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
- BPF_PPC_STACK_SAVE)
-#define BPF_PPC_SLOWPATH_FRAME (24+32)
-#endif
-
-#define REG_SZ (BITS_PER_LONG/8)
-
-/*
- * Generated code register usage:
- *
- * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
- *
- * skb r3 (Entry parameter)
- * A register r4
- * X register r5
- * addr param r6
- * r7-r10 scratch
- * skb->data r14
- * skb headlen r15 (skb->len - skb->data_len)
- * m[0] r16
- * m[...] ...
- * m[15] r31
- */
-#define r_skb 3
-#define r_ret 3
-#define r_A 4
-#define r_X 5
-#define r_addr 6
-#define r_scratch1 7
-#define r_scratch2 8
-#define r_D 14
-#define r_HL 15
-#define r_M 16
-
#ifndef __ASSEMBLY__
-/*
- * Assembly helpers from arch/powerpc/net/bpf_jit.S:
- */
-#define DECLARE_LOAD_FUNC(func) \
- extern u8 func[], func##_negative_offset[], func##_positive_offset[]
-
-DECLARE_LOAD_FUNC(sk_load_word);
-DECLARE_LOAD_FUNC(sk_load_half);
-DECLARE_LOAD_FUNC(sk_load_byte);
-DECLARE_LOAD_FUNC(sk_load_byte_msh);
+#include <asm/types.h>
-#ifdef CONFIG_PPC64
+#ifdef PPC64_ELF_ABI_v1
#define FUNCTION_DESCR_SIZE 24
#else
#define FUNCTION_DESCR_SIZE 0
@@ -83,7 +29,7 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
*/
#define IMM_H(i) ((uintptr_t)(i)>>16)
#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
- (((uintptr_t)(i) & 0x8000) >> 15))
+ (((uintptr_t)(i) & 0x8000) >> 15))
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
#define PLANT_INSTR(d, idx, instr) \
@@ -99,16 +45,20 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_MR(d, a) PPC_OR(d, a, a)
#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
- ___PPC_RS(d) | ___PPC_RA(a) | IMM_L(i))
+ ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
+ ___PPC_RA(base) | IMM_L(i))
#define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
+ ___PPC_RA(base) | IMM_L(i))
+#define PPC_STH(r, base, i) EMIT(PPC_INST_STH | ___PPC_RS(r) | \
+ ___PPC_RA(base) | IMM_L(i))
+#define PPC_STB(r, base, i) EMIT(PPC_INST_STB | ___PPC_RS(r) | \
+ ___PPC_RA(base) | IMM_L(i))
#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
@@ -120,6 +70,19 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RA(base) | IMM_L(i))
#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_LDBRX(r, base, b) EMIT(PPC_INST_LDBRX | ___PPC_RT(r) | \
+ ___PPC_RA(base) | ___PPC_RB(b))
+
+#define PPC_BPF_LDARX(t, a, b, eh) EMIT(PPC_INST_LDARX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b) | \
+ __PPC_EH(eh))
+#define PPC_BPF_LWARX(t, a, b, eh) EMIT(PPC_INST_LWARX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b) | \
+ __PPC_EH(eh))
+#define PPC_BPF_STWCX(s, a, b) EMIT(PPC_INST_STWCX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
#ifdef CONFIG_PPC64
#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
@@ -131,56 +94,26 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
#endif
-/* Convenience helpers for the above with 'far' offsets: */
-#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LBZ(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LD(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LWZ(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LHZ(r, r, IMM_L(i)); } } while(0)
-
-#ifdef CONFIG_PPC64
-#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
-#else
-#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
-#endif
-
-#ifdef CONFIG_SMP
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
- PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
- } while (0)
-#else
-#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
- PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
- offsetof(struct thread_info, cpu)); \
- } while(0)
-#endif
-#else
-#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
-#endif
-
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
+#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
+ ___PPC_RB(b))
+#define PPC_CMPD(a, b) EMIT(PPC_INST_CMPD | ___PPC_RA(a) | \
+ ___PPC_RB(b))
#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
-#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_CMPLDI(a, i) EMIT(PPC_INST_CMPLDI | ___PPC_RA(a) | IMM_L(i))
+#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | \
+ ___PPC_RB(b))
+#define PPC_CMPLD(a, b) EMIT(PPC_INST_CMPLD | ___PPC_RA(a) | \
+ ___PPC_RB(b))
#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \
___PPC_RB(a) | ___PPC_RA(b))
#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \
+#define PPC_MULD(d, a, b) EMIT(PPC_INST_MULLD | ___PPC_RT(d) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_MULW(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
@@ -188,6 +121,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RA(a) | IMM_L(i))
#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(b))
#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | ___PPC_RA(d) | \
@@ -196,6 +131,7 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RS(a) | ___PPC_RB(b))
#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_MR(d, a) PPC_OR(d, a, a)
#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | ___PPC_RA(d) | \
___PPC_RS(a) | IMM_L(i))
#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | ___PPC_RA(d) | \
@@ -206,22 +142,43 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RS(a) | IMM_L(i))
#define PPC_XORIS(d, a, i) EMIT(PPC_INST_XORIS | ___PPC_RA(d) | \
___PPC_RS(a) | IMM_L(i))
+#define PPC_EXTSW(d, a) EMIT(PPC_INST_EXTSW | ___PPC_RA(d) | \
+ ___PPC_RS(a))
#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SLD(d, a, s) EMIT(PPC_INST_SLD | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRD(d, a, s) EMIT(PPC_INST_SRD | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRAD(d, a, s) EMIT(PPC_INST_SRAD | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRADI(d, a, i) EMIT(PPC_INST_SRADI | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ (((i) & 0x20) >> 4))
+#define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RLDICL(d, a, i, mb) EMIT(PPC_INST_RLDICL | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB64(mb) | (((i) & 0x20) >> 4))
+#define PPC_RLDICR(d, a, i, me) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_ME64(me) | (((i) & 0x20) >> 4))
+
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
-#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(i) | \
- __PPC_MB(0) | __PPC_ME(31-(i)))
+#define PPC_SLWI(d, a, i) PPC_RLWINM(d, a, i, 0, 31-(i))
/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
-#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(32-(i)) | \
- __PPC_MB(i) | __PPC_ME(31))
+#define PPC_SRWI(d, a, i) PPC_RLWINM(d, a, 32-(i), i, 31)
/* sldi = rldicr Rx, Ry, n, 63-n */
-#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(i) | \
- __PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
+#define PPC_SLDI(d, a, i) PPC_RLDICR(d, a, i, 63-(i))
+/* sldi = rldicl Rx, Ry, 64-n, n */
+#define PPC_SRDI(d, a, i) PPC_RLDICL(d, a, 64-(i), i)
+
#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a))
/* Long jump; (unconditional 'branch') */
@@ -232,25 +189,37 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
(((cond) & 0x3ff) << 16) | \
(((dest) - (ctx->idx * 4)) & \
0xfffc))
-#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \
- if ((u32)(uintptr_t)(i) >= 32768) { \
- PPC_ADDIS(d, d, IMM_HA(i)); \
+/* Sign-extended 32-bit immediate load */
+#define PPC_LI32(d, i) do { \
+ if ((int)(uintptr_t)(i) >= -32768 && \
+ (int)(uintptr_t)(i) < 32768) \
+ PPC_LI(d, i); \
+ else { \
+ PPC_LIS(d, IMM_H(i)); \
+ if (IMM_L(i)) \
+ PPC_ORI(d, d, IMM_L(i)); \
} } while(0)
+
#define PPC_LI64(d, i) do { \
- if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \
+ if ((long)(i) >= -2147483648 && \
+ (long)(i) < 2147483648) \
PPC_LI32(d, i); \
else { \
- PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
- if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
- PPC_ORI(d, d, \
- ((uintptr_t)(i) >> 32) & 0xffff); \
+ if (!((uintptr_t)(i) & 0xffff800000000000ULL)) \
+ PPC_LI(d, ((uintptr_t)(i) >> 32) & 0xffff); \
+ else { \
+ PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
+ if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
+ PPC_ORI(d, d, \
+ ((uintptr_t)(i) >> 32) & 0xffff); \
+ } \
PPC_SLDI(d, d, 32); \
if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
PPC_ORIS(d, d, \
((uintptr_t)(i) >> 16) & 0xffff); \
if ((uintptr_t)(i) & 0x000000000000ffffULL) \
PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
- } } while (0);
+ } } while (0)
#ifdef CONFIG_PPC64
#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
@@ -258,14 +227,6 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
#endif
-#define PPC_LHBRX_OFFS(r, base, i) \
- do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
-#ifdef __LITTLE_ENDIAN__
-#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
-#else
-#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
-#endif
-
static inline bool is_nearbranch(int offset)
{
return (offset < 32768) && (offset >= -32768);
@@ -302,18 +263,6 @@ static inline bool is_nearbranch(int offset)
#define COND_NE (CR0_EQ | COND_CMP_FALSE)
#define COND_LT (CR0_LT | COND_CMP_TRUE)
-#define SEEN_DATAREF 0x10000 /* might call external helpers */
-#define SEEN_XREG 0x20000 /* X reg is used */
-#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
- * storage */
-#define SEEN_MEM_MSK 0x0ffff
-
-struct codegen_context {
- unsigned int seen;
- unsigned int idx;
- int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
-};
-
#endif
#endif
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
new file mode 100644
index 000000000000..a8cd7e289ecd
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -0,0 +1,139 @@
+/*
+ * bpf_jit32.h: BPF JIT compiler for PPC
+ *
+ * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ *
+ * Split from bpf_jit.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _BPF_JIT32_H
+#define _BPF_JIT32_H
+
+#include "bpf_jit.h"
+
+#ifdef CONFIG_PPC64
+#define BPF_PPC_STACK_R3_OFF 48
+#define BPF_PPC_STACK_LOCALS 32
+#define BPF_PPC_STACK_BASIC (48+64)
+#define BPF_PPC_STACK_SAVE (18*8)
+#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
+ BPF_PPC_STACK_SAVE)
+#define BPF_PPC_SLOWPATH_FRAME (48+64)
+#else
+#define BPF_PPC_STACK_R3_OFF 24
+#define BPF_PPC_STACK_LOCALS 16
+#define BPF_PPC_STACK_BASIC (24+32)
+#define BPF_PPC_STACK_SAVE (18*4)
+#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
+ BPF_PPC_STACK_SAVE)
+#define BPF_PPC_SLOWPATH_FRAME (24+32)
+#endif
+
+#define REG_SZ (BITS_PER_LONG/8)
+
+/*
+ * Generated code register usage:
+ *
+ * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
+ *
+ * skb r3 (Entry parameter)
+ * A register r4
+ * X register r5
+ * addr param r6
+ * r7-r10 scratch
+ * skb->data r14
+ * skb headlen r15 (skb->len - skb->data_len)
+ * m[0] r16
+ * m[...] ...
+ * m[15] r31
+ */
+#define r_skb 3
+#define r_ret 3
+#define r_A 4
+#define r_X 5
+#define r_addr 6
+#define r_scratch1 7
+#define r_scratch2 8
+#define r_D 14
+#define r_HL 15
+#define r_M 16
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Assembly helpers from arch/powerpc/net/bpf_jit.S:
+ */
+#define DECLARE_LOAD_FUNC(func) \
+ extern u8 func[], func##_negative_offset[], func##_positive_offset[]
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+DECLARE_LOAD_FUNC(sk_load_byte_msh);
+
+#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LBZ(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LD(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LWZ(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LHZ(r, r, IMM_L(i)); } } while(0)
+
+#ifdef CONFIG_PPC64
+#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
+#else
+#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
+#endif
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_PPC64
+#define PPC_BPF_LOAD_CPU(r) \
+ do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
+ PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
+ } while (0)
+#else
+#define PPC_BPF_LOAD_CPU(r) \
+ do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
+ PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
+ offsetof(struct thread_info, cpu)); \
+ } while(0)
+#endif
+#else
+#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
+#endif
+
+#define PPC_LHBRX_OFFS(r, base, i) \
+ do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
+#ifdef __LITTLE_ENDIAN__
+#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
+#else
+#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
+#endif
+
+#define SEEN_DATAREF 0x10000 /* might call external helpers */
+#define SEEN_XREG 0x20000 /* X reg is used */
+#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
+ * storage */
+#define SEEN_MEM_MSK 0x0ffff
+
+struct codegen_context {
+ unsigned int seen;
+ unsigned int idx;
+ int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
+};
+
+#endif
+
+#endif
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
new file mode 100644
index 000000000000..5046d6f65c02
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -0,0 +1,102 @@
+/*
+ * bpf_jit64.h: BPF JIT compiler for PPC64
+ *
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _BPF_JIT64_H
+#define _BPF_JIT64_H
+
+#include "bpf_jit.h"
+
+/*
+ * Stack layout:
+ *
+ * [ prev sp ] <-------------
+ * [ nv gpr save area ] 8*8 |
+ * fp (r31) --> [ ebpf stack space ] 512 |
+ * [ local/tmp var space ] 16 |
+ * [ frame header ] 32/112 |
+ * sp (r1) ---> [ stack pointer ] --------------
+ */
+
+/* for bpf JIT code internal usage */
+#define BPF_PPC_STACK_LOCALS 16
+/* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
+#define BPF_PPC_STACK_SAVE (8*8)
+/* Ensure this is quadword aligned */
+#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS + \
+ MAX_BPF_STACK + BPF_PPC_STACK_SAVE)
+
+#ifndef __ASSEMBLY__
+
+/* BPF register usage */
+#define SKB_HLEN_REG (MAX_BPF_REG + 0)
+#define SKB_DATA_REG (MAX_BPF_REG + 1)
+#define TMP_REG_1 (MAX_BPF_REG + 2)
+#define TMP_REG_2 (MAX_BPF_REG + 3)
+
+/* BPF to ppc register mappings */
+static const int b2p[] = {
+ /* function return value */
+ [BPF_REG_0] = 8,
+ /* function arguments */
+ [BPF_REG_1] = 3,
+ [BPF_REG_2] = 4,
+ [BPF_REG_3] = 5,
+ [BPF_REG_4] = 6,
+ [BPF_REG_5] = 7,
+ /* non volatile registers */
+ [BPF_REG_6] = 27,
+ [BPF_REG_7] = 28,
+ [BPF_REG_8] = 29,
+ [BPF_REG_9] = 30,
+ /* frame pointer aka BPF_REG_10 */
+ [BPF_REG_FP] = 31,
+ /* eBPF jit internal registers */
+ [SKB_HLEN_REG] = 25,
+ [SKB_DATA_REG] = 26,
+ [TMP_REG_1] = 9,
+ [TMP_REG_2] = 10
+};
+
+/* Assembly helpers */
+#define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
+ u64 func##_negative_offset(u64 r3, u64 r4); \
+ u64 func##_positive_offset(u64 r3, u64 r4);
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+
+#define CHOOSE_LOAD_FUNC(imm, func) \
+ (imm < 0 ? \
+ (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
+ func##_positive_offset)
+
+#define SEEN_FUNC 0x1000 /* might call external helpers */
+#define SEEN_STACK 0x2000 /* uses BPF stack */
+#define SEEN_SKB 0x4000 /* uses sk_buff */
+
+struct codegen_context {
+ /*
+ * This is used to track register usage as well
+ * as calls to external helpers.
+ * - register usage is tracked with corresponding
+ * bits (r3-r10 and r25-r31)
+ * - rest of the bits can be used to track other
+ * things -- for now, we use bits 16 to 23
+ * encoded in SEEN_* macros above
+ */
+ unsigned int seen;
+ unsigned int idx;
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/powerpc/net/bpf_jit_asm.S b/arch/powerpc/net/bpf_jit_asm.S
index 8ff5a3b5d1c3..3dd9c43d40c9 100644
--- a/arch/powerpc/net/bpf_jit_asm.S
+++ b/arch/powerpc/net/bpf_jit_asm.S
@@ -10,7 +10,7 @@
*/
#include <asm/ppc_asm.h>
-#include "bpf_jit.h"
+#include "bpf_jit32.h"
/*
* All of these routines are called directly from generated code,
diff --git a/arch/powerpc/net/bpf_jit_asm64.S b/arch/powerpc/net/bpf_jit_asm64.S
new file mode 100644
index 000000000000..7e4c51430b84
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_asm64.S
@@ -0,0 +1,180 @@
+/*
+ * bpf_jit_asm64.S: Packet/header access helper functions
+ * for PPC64 BPF compiler.
+ *
+ * Copyright 2016, Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
+ *
+ * Based on bpf_jit_asm.S by Matt Evans
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include "bpf_jit64.h"
+
+/*
+ * All of these routines are called directly from generated code,
+ * with the below register usage:
+ * r27 skb pointer (ctx)
+ * r25 skb header length
+ * r26 skb->data pointer
+ * r4 offset
+ *
+ * Result is passed back in:
+ * r8 data read in host endian format (accumulator)
+ *
+ * r9 is used as a temporary register
+ */
+
+#define r_skb r27
+#define r_hlen r25
+#define r_data r26
+#define r_off r4
+#define r_val r8
+#define r_tmp r9
+
+_GLOBAL_TOC(sk_load_word)
+ cmpdi r_off, 0
+ blt bpf_slow_path_word_neg
+ b sk_load_word_positive_offset
+
+_GLOBAL_TOC(sk_load_word_positive_offset)
+ /* Are we accessing past headlen? */
+ subi r_tmp, r_hlen, 4
+ cmpd r_tmp, r_off
+ blt bpf_slow_path_word
+ /* Nope, just hitting the header. cr0 here is eq or gt! */
+ LWZX_BE r_val, r_data, r_off
+ blr /* Return success, cr0 != LT */
+
+_GLOBAL_TOC(sk_load_half)
+ cmpdi r_off, 0
+ blt bpf_slow_path_half_neg
+ b sk_load_half_positive_offset
+
+_GLOBAL_TOC(sk_load_half_positive_offset)
+ subi r_tmp, r_hlen, 2
+ cmpd r_tmp, r_off
+ blt bpf_slow_path_half
+ LHZX_BE r_val, r_data, r_off
+ blr
+
+_GLOBAL_TOC(sk_load_byte)
+ cmpdi r_off, 0
+ blt bpf_slow_path_byte_neg
+ b sk_load_byte_positive_offset
+
+_GLOBAL_TOC(sk_load_byte_positive_offset)
+ cmpd r_hlen, r_off
+ ble bpf_slow_path_byte
+ lbzx r_val, r_data, r_off
+ blr
+
+/*
+ * Call out to skb_copy_bits:
+ * Allocate a new stack frame here to remain ABI-compliant in
+ * stashing LR.
+ */
+#define bpf_slow_path_common(SIZE) \
+ mflr r0; \
+ std r0, PPC_LR_STKOFF(r1); \
+ stdu r1, -(STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS)(r1); \
+ mr r3, r_skb; \
+ /* r4 = r_off as passed */ \
+ addi r5, r1, STACK_FRAME_MIN_SIZE; \
+ li r6, SIZE; \
+ bl skb_copy_bits; \
+ nop; \
+ /* save r5 */ \
+ addi r5, r1, STACK_FRAME_MIN_SIZE; \
+ /* r3 = 0 on success */ \
+ addi r1, r1, STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS; \
+ ld r0, PPC_LR_STKOFF(r1); \
+ mtlr r0; \
+ cmpdi r3, 0; \
+ blt bpf_error; /* cr0 = LT */
+
+bpf_slow_path_word:
+ bpf_slow_path_common(4)
+ /* Data value is on stack, and cr0 != LT */
+ LWZX_BE r_val, 0, r5
+ blr
+
+bpf_slow_path_half:
+ bpf_slow_path_common(2)
+ LHZX_BE r_val, 0, r5
+ blr
+
+bpf_slow_path_byte:
+ bpf_slow_path_common(1)
+ lbzx r_val, 0, r5
+ blr
+
+/*
+ * Call out to bpf_internal_load_pointer_neg_helper
+ */
+#define sk_negative_common(SIZE) \
+ mflr r0; \
+ std r0, PPC_LR_STKOFF(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE(r1); \
+ mr r3, r_skb; \
+ /* r4 = r_off, as passed */ \
+ li r5, SIZE; \
+ bl bpf_internal_load_pointer_neg_helper; \
+ nop; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE; \
+ ld r0, PPC_LR_STKOFF(r1); \
+ mtlr r0; \
+ /* R3 != 0 on success */ \
+ cmpldi r3, 0; \
+ beq bpf_error_slow; /* cr0 = EQ */
+
+bpf_slow_path_word_neg:
+ lis r_tmp, -32 /* SKF_LL_OFF */
+ cmpd r_off, r_tmp /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ b sk_load_word_negative_offset
+
+_GLOBAL_TOC(sk_load_word_negative_offset)
+ sk_negative_common(4)
+ LWZX_BE r_val, 0, r3
+ blr
+
+bpf_slow_path_half_neg:
+ lis r_tmp, -32 /* SKF_LL_OFF */
+ cmpd r_off, r_tmp /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ b sk_load_half_negative_offset
+
+_GLOBAL_TOC(sk_load_half_negative_offset)
+ sk_negative_common(2)
+ LHZX_BE r_val, 0, r3
+ blr
+
+bpf_slow_path_byte_neg:
+ lis r_tmp, -32 /* SKF_LL_OFF */
+ cmpd r_off, r_tmp /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ b sk_load_byte_negative_offset
+
+_GLOBAL_TOC(sk_load_byte_negative_offset)
+ sk_negative_common(1)
+ lbzx r_val, 0, r3
+ blr
+
+bpf_error_slow:
+ /* fabricate a cr0 = lt */
+ li r_tmp, -1
+ cmpdi r_tmp, 0
+bpf_error:
+ /*
+ * Entered with cr0 = lt
+ * Generated code will 'blt epilogue', returning 0.
+ */
+ li r_val, 0
+ blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2d66a8446198..7e706f36e364 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -16,7 +16,7 @@
#include <linux/filter.h>
#include <linux/if_vlan.h>
-#include "bpf_jit.h"
+#include "bpf_jit32.h"
int bpf_jit_enable __read_mostly;
@@ -161,14 +161,14 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
break;
case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
ctx->seen |= SEEN_XREG;
- PPC_MUL(r_A, r_A, r_X);
+ PPC_MULW(r_A, r_A, r_X);
break;
case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
if (K < 32768)
PPC_MULI(r_A, r_A, K);
else {
PPC_LI32(r_scratch1, K);
- PPC_MUL(r_A, r_A, r_scratch1);
+ PPC_MULW(r_A, r_A, r_scratch1);
}
break;
case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
@@ -184,7 +184,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
}
if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
PPC_DIVWU(r_scratch1, r_A, r_X);
- PPC_MUL(r_scratch1, r_X, r_scratch1);
+ PPC_MULW(r_scratch1, r_X, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1);
} else {
PPC_DIVWU(r_A, r_A, r_X);
@@ -193,7 +193,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
PPC_LI32(r_scratch2, K);
PPC_DIVWU(r_scratch1, r_A, r_scratch2);
- PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
+ PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1);
break;
case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
new file mode 100644
index 000000000000..6073b78516f6
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -0,0 +1,954 @@
+/*
+ * bpf_jit_comp64.c: eBPF JIT compiler
+ *
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
+ *
+ * Based on the powerpc classic BPF JIT compiler by Matt Evans
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/moduleloader.h>
+#include <asm/cacheflush.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/if_vlan.h>
+#include <asm/kprobes.h>
+
+#include "bpf_jit64.h"
+
+int bpf_jit_enable __read_mostly;
+
+static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
+{
+ int *p = area;
+
+ /* Fill whole space with trap instructions */
+ while (p < (int *)((char *)area + size))
+ *p++ = BREAKPOINT_INSTRUCTION;
+}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ smp_wmb();
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
+{
+ return (ctx->seen & (1 << (31 - b2p[i])));
+}
+
+static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
+{
+ ctx->seen |= (1 << (31 - b2p[i]));
+}
+
+static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
+{
+ /*
+ * We only need a stack frame if:
+ * - we call other functions (kernel helpers), or
+ * - the bpf program uses its stack area
+ * The latter condition is deduced from the usage of BPF_REG_FP
+ */
+ return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
+}
+
+static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
+{
+ /*
+ * Load skb->len and skb->data_len
+ * r3 points to skb
+ */
+ PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
+ PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
+ /* header_len = len - data_len */
+ PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
+
+ /* skb->data pointer */
+ PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
+}
+
+static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
+{
+#ifdef PPC64_ELF_ABI_v1
+ /* func points to the function descriptor */
+ PPC_LI64(b2p[TMP_REG_2], func);
+ /* Load actual entry point from function descriptor */
+ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
+ /* ... and move it to LR */
+ PPC_MTLR(b2p[TMP_REG_1]);
+ /*
+ * Load TOC from function descriptor at offset 8.
+ * We can clobber r2 since we get called through a
+ * function pointer (so caller will save/restore r2)
+ * and since we don't use a TOC ourself.
+ */
+ PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
+#else
+ /* We can clobber r12 */
+ PPC_FUNC_ADDR(12, func);
+ PPC_MTLR(12);
+#endif
+ PPC_BLRL();
+}
+
+static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+ bool new_stack_frame = bpf_has_stack_frame(ctx);
+
+ if (new_stack_frame) {
+ /*
+ * We need a stack frame, but we don't necessarily need to
+ * save/restore LR unless we call other functions
+ */
+ if (ctx->seen & SEEN_FUNC) {
+ EMIT(PPC_INST_MFLR | __PPC_RT(R0));
+ PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
+ }
+
+ PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
+ }
+
+ /*
+ * Back up non-volatile regs -- BPF registers 6-10
+ * If we haven't created our own stack frame, we save these
+ * in the protected zone below the previous stack frame
+ */
+ for (i = BPF_REG_6; i <= BPF_REG_10; i++)
+ if (bpf_is_seen_register(ctx, i))
+ PPC_BPF_STL(b2p[i], 1,
+ (new_stack_frame ? BPF_PPC_STACKFRAME : 0) -
+ (8 * (32 - b2p[i])));
+
+ /*
+ * Save additional non-volatile regs if we cache skb
+ * Also, setup skb data
+ */
+ if (ctx->seen & SEEN_SKB) {
+ PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
+ BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_HLEN_REG])));
+ PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
+ BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_DATA_REG])));
+ bpf_jit_emit_skb_loads(image, ctx);
+ }
+
+ /* Setup frame pointer to point to the bpf stack area */
+ if (bpf_is_seen_register(ctx, BPF_REG_FP))
+ PPC_ADDI(b2p[BPF_REG_FP], 1,
+ BPF_PPC_STACKFRAME - BPF_PPC_STACK_SAVE);
+}
+
+static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+ bool new_stack_frame = bpf_has_stack_frame(ctx);
+
+ /* Move result to r3 */
+ PPC_MR(3, b2p[BPF_REG_0]);
+
+ /* Restore NVRs */
+ for (i = BPF_REG_6; i <= BPF_REG_10; i++)
+ if (bpf_is_seen_register(ctx, i))
+ PPC_BPF_LL(b2p[i], 1,
+ (new_stack_frame ? BPF_PPC_STACKFRAME : 0) -
+ (8 * (32 - b2p[i])));
+
+ /* Restore non-volatile registers used for skb cache */
+ if (ctx->seen & SEEN_SKB) {
+ PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
+ BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_HLEN_REG])));
+ PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
+ BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_DATA_REG])));
+ }
+
+ /* Tear down our stack frame */
+ if (new_stack_frame) {
+ PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
+ if (ctx->seen & SEEN_FUNC) {
+ PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
+ PPC_MTLR(0);
+ }
+ }
+
+ PPC_BLR();
+}
+
+/* Assemble the body code between the prologue & epilogue */
+static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
+ struct codegen_context *ctx,
+ u32 *addrs)
+{
+ const struct bpf_insn *insn = fp->insnsi;
+ int flen = fp->len;
+ int i;
+
+ /* Start of epilogue code - will only be valid 2nd pass onwards */
+ u32 exit_addr = addrs[flen];
+
+ for (i = 0; i < flen; i++) {
+ u32 code = insn[i].code;
+ u32 dst_reg = b2p[insn[i].dst_reg];
+ u32 src_reg = b2p[insn[i].src_reg];
+ s16 off = insn[i].off;
+ s32 imm = insn[i].imm;
+ u64 imm64;
+ u8 *func;
+ u32 true_cond;
+ int stack_local_off;
+
+ /*
+ * addrs[] maps a BPF bytecode address into a real offset from
+ * the start of the body code.
+ */
+ addrs[i] = ctx->idx * 4;
+
+ /*
+ * As an optimization, we note down which non-volatile registers
+ * are used so that we can only save/restore those in our
+ * prologue and epilogue. We do this here regardless of whether
+ * the actual BPF instruction uses src/dst registers or not
+ * (for instance, BPF_CALL does not use them). The expectation
+ * is that those instructions will have src_reg/dst_reg set to
+ * 0. Even otherwise, we just lose some prologue/epilogue
+ * optimization but everything else should work without
+ * any issues.
+ */
+ if (dst_reg >= 24 && dst_reg <= 31)
+ bpf_set_seen_register(ctx, insn[i].dst_reg);
+ if (src_reg >= 24 && src_reg <= 31)
+ bpf_set_seen_register(ctx, insn[i].src_reg);
+
+ switch (code) {
+ /*
+ * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
+ */
+ case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
+ PPC_ADD(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
+ PPC_SUB(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
+ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
+ if (BPF_OP(code) == BPF_SUB)
+ imm = -imm;
+ if (imm) {
+ if (imm >= -32768 && imm < 32768)
+ PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ }
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
+ if (BPF_CLASS(code) == BPF_ALU)
+ PPC_MULW(dst_reg, dst_reg, src_reg);
+ else
+ PPC_MULD(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
+ if (imm >= -32768 && imm < 32768)
+ PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ if (BPF_CLASS(code) == BPF_ALU)
+ PPC_MULW(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ else
+ PPC_MULD(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
+ case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
+ PPC_CMPWI(src_reg, 0);
+ PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_JMP(exit_addr);
+ if (BPF_OP(code) == BPF_MOD) {
+ PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
+ PPC_MULW(b2p[TMP_REG_1], src_reg,
+ b2p[TMP_REG_1]);
+ PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ } else
+ PPC_DIVWU(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
+ PPC_CMPDI(src_reg, 0);
+ PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_JMP(exit_addr);
+ if (BPF_OP(code) == BPF_MOD) {
+ PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
+ PPC_MULD(b2p[TMP_REG_1], src_reg,
+ b2p[TMP_REG_1]);
+ PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ } else
+ PPC_DIVD(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
+ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
+ if (imm == 0)
+ return -EINVAL;
+ else if (imm == 1)
+ goto bpf_alu32_trunc;
+
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ switch (BPF_CLASS(code)) {
+ case BPF_ALU:
+ if (BPF_OP(code) == BPF_MOD) {
+ PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
+ b2p[TMP_REG_1]);
+ PPC_MULW(b2p[TMP_REG_1],
+ b2p[TMP_REG_1],
+ b2p[TMP_REG_2]);
+ PPC_SUB(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ } else
+ PPC_DIVWU(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ break;
+ case BPF_ALU64:
+ if (BPF_OP(code) == BPF_MOD) {
+ PPC_DIVD(b2p[TMP_REG_2], dst_reg,
+ b2p[TMP_REG_1]);
+ PPC_MULD(b2p[TMP_REG_1],
+ b2p[TMP_REG_1],
+ b2p[TMP_REG_2]);
+ PPC_SUB(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ } else
+ PPC_DIVD(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ break;
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
+ case BPF_ALU64 | BPF_NEG: /* dst = -dst */
+ PPC_NEG(dst_reg, dst_reg);
+ goto bpf_alu32_trunc;
+
+ /*
+ * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
+ */
+ case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
+ case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
+ PPC_AND(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
+ case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
+ if (!IMM_H(imm))
+ PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
+ else {
+ /* Sign-extended */
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
+ case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
+ PPC_OR(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
+ case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
+ if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
+ /* Sign-extended */
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ } else {
+ if (IMM_L(imm))
+ PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
+ if (IMM_H(imm))
+ PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
+ PPC_XOR(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
+ if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
+ /* Sign-extended */
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ } else {
+ if (IMM_L(imm))
+ PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
+ if (IMM_H(imm))
+ PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
+ /* slw clears top 32 bits */
+ PPC_SLW(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
+ PPC_SLD(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
+ /* with imm 0, we still need to clear top 32 bits */
+ PPC_SLWI(dst_reg, dst_reg, imm);
+ break;
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
+ if (imm != 0)
+ PPC_SLDI(dst_reg, dst_reg, imm);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
+ PPC_SRW(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
+ PPC_SRD(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
+ PPC_SRWI(dst_reg, dst_reg, imm);
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
+ if (imm != 0)
+ PPC_SRDI(dst_reg, dst_reg, imm);
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
+ PPC_SRAD(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
+ if (imm != 0)
+ PPC_SRADI(dst_reg, dst_reg, imm);
+ break;
+
+ /*
+ * MOV
+ */
+ case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
+ PPC_MR(dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
+ PPC_LI32(dst_reg, imm);
+ if (imm < 0)
+ goto bpf_alu32_trunc;
+ break;
+
+bpf_alu32_trunc:
+ /* Truncate to 32-bits */
+ if (BPF_CLASS(code) == BPF_ALU)
+ PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
+ break;
+
+ /*
+ * BPF_FROM_BE/LE
+ */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+#ifdef __BIG_ENDIAN__
+ if (BPF_SRC(code) == BPF_FROM_BE)
+ goto emit_clear;
+#else /* !__BIG_ENDIAN__ */
+ if (BPF_SRC(code) == BPF_FROM_LE)
+ goto emit_clear;
+#endif
+ switch (imm) {
+ case 16:
+ /* Rotate 8 bits left & mask with 0x0000ff00 */
+ PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
+ /* Rotate 8 bits right & insert LSB to reg */
+ PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
+ /* Move result back to dst_reg */
+ PPC_MR(dst_reg, b2p[TMP_REG_1]);
+ break;
+ case 32:
+ /*
+ * Rotate word left by 8 bits:
+ * 2 bytes are already in their final position
+ * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
+ */
+ PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
+ /* Rotate 24 bits and insert byte 1 */
+ PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
+ /* Rotate 24 bits and insert byte 3 */
+ PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
+ PPC_MR(dst_reg, b2p[TMP_REG_1]);
+ break;
+ case 64:
+ /*
+ * Way easier and faster(?) to store the value
+ * into stack and then use ldbrx
+ *
+ * First, determine where in stack we can store
+ * this:
+ * - if we have allotted a stack frame, then we
+ * will utilize the area set aside by
+ * BPF_PPC_STACK_LOCALS
+ * - else, we use the area beneath the NV GPR
+ * save area
+ *
+ * ctx->seen will be reliable in pass2, but
+ * the instructions generated will remain the
+ * same across all passes
+ */
+ if (bpf_has_stack_frame(ctx))
+ stack_local_off = STACK_FRAME_MIN_SIZE;
+ else
+ stack_local_off = -(BPF_PPC_STACK_SAVE + 8);
+
+ PPC_STD(dst_reg, 1, stack_local_off);
+ PPC_ADDI(b2p[TMP_REG_1], 1, stack_local_off);
+ PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
+ break;
+ }
+ break;
+
+emit_clear:
+ switch (imm) {
+ case 16:
+ /* zero-extend 16 bits into 64 bits */
+ PPC_RLDICL(dst_reg, dst_reg, 0, 48);
+ break;
+ case 32:
+ /* zero-extend 32 bits into 64 bits */
+ PPC_RLDICL(dst_reg, dst_reg, 0, 32);
+ break;
+ case 64:
+ /* nop */
+ break;
+ }
+ break;
+
+ /*
+ * BPF_ST(X)
+ */
+ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
+ if (BPF_CLASS(code) == BPF_ST) {
+ PPC_LI(b2p[TMP_REG_1], imm);
+ src_reg = b2p[TMP_REG_1];
+ }
+ PPC_STB(src_reg, dst_reg, off);
+ break;
+ case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
+ if (BPF_CLASS(code) == BPF_ST) {
+ PPC_LI(b2p[TMP_REG_1], imm);
+ src_reg = b2p[TMP_REG_1];
+ }
+ PPC_STH(src_reg, dst_reg, off);
+ break;
+ case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
+ if (BPF_CLASS(code) == BPF_ST) {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ src_reg = b2p[TMP_REG_1];
+ }
+ PPC_STW(src_reg, dst_reg, off);
+ break;
+ case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
+ if (BPF_CLASS(code) == BPF_ST) {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ src_reg = b2p[TMP_REG_1];
+ }
+ PPC_STD(src_reg, dst_reg, off);
+ break;
+
+ /*
+ * BPF_STX XADD (atomic_add)
+ */
+ /* *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_W:
+ /* Get EA into TMP_REG_1 */
+ PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
+ /* error if EA is not word-aligned */
+ PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_JMP(exit_addr);
+ /* load value from memory into TMP_REG_2 */
+ PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
+ /* add value from src_reg into this */
+ PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
+ /* store result back */
+ PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ /* we're done if this succeeded */
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+ /* otherwise, let's try once more */
+ PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
+ PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
+ PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ /* exit if the store was not successful */
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_BCC(COND_NE, exit_addr);
+ break;
+ /* *(u64 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
+ /* error if EA is not doubleword-aligned */
+ PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_JMP(exit_addr);
+ PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
+ PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
+ PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+ PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
+ PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
+ PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_BCC(COND_NE, exit_addr);
+ break;
+
+ /*
+ * BPF_LDX
+ */
+ /* dst = *(u8 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEM | BPF_B:
+ PPC_LBZ(dst_reg, src_reg, off);
+ break;
+ /* dst = *(u16 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEM | BPF_H:
+ PPC_LHZ(dst_reg, src_reg, off);
+ break;
+ /* dst = *(u32 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEM | BPF_W:
+ PPC_LWZ(dst_reg, src_reg, off);
+ break;
+ /* dst = *(u64 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ PPC_LD(dst_reg, src_reg, off);
+ break;
+
+ /*
+ * Doubleword load
+ * 16 byte instruction that uses two 'struct bpf_insn'
+ */
+ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
+ imm64 = ((u64)(u32) insn[i].imm) |
+ (((u64)(u32) insn[i+1].imm) << 32);
+ /* Adjust for two bpf instructions */
+ addrs[++i] = ctx->idx * 4;
+ PPC_LI64(dst_reg, imm64);
+ break;
+
+ /*
+ * Return/Exit
+ */
+ case BPF_JMP | BPF_EXIT:
+ /*
+ * If this isn't the very last instruction, branch to
+ * the epilogue. If we _are_ the last instruction,
+ * we'll just fall through to the epilogue.
+ */
+ if (i != flen - 1)
+ PPC_JMP(exit_addr);
+ /* else fall through to the epilogue */
+ break;
+
+ /*
+ * Call kernel helper
+ */
+ case BPF_JMP | BPF_CALL:
+ ctx->seen |= SEEN_FUNC;
+ func = (u8 *) __bpf_call_base + imm;
+
+ /* Save skb pointer if we need to re-cache skb data */
+ if (bpf_helper_changes_skb_data(func))
+ PPC_BPF_STL(3, 1, STACK_FRAME_MIN_SIZE);
+
+ bpf_jit_emit_func_call(image, ctx, (u64)func);
+
+ /* move return value from r3 to BPF_REG_0 */
+ PPC_MR(b2p[BPF_REG_0], 3);
+
+ /* refresh skb cache */
+ if (bpf_helper_changes_skb_data(func)) {
+ /* reload skb pointer to r3 */
+ PPC_BPF_LL(3, 1, STACK_FRAME_MIN_SIZE);
+ bpf_jit_emit_skb_loads(image, ctx);
+ }
+ break;
+
+ /*
+ * Jumps and branches
+ */
+ case BPF_JMP | BPF_JA:
+ PPC_JMP(addrs[i + 1 + off]);
+ break;
+
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ true_cond = COND_GT;
+ goto cond_branch;
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ true_cond = COND_GE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ true_cond = COND_EQ;
+ goto cond_branch;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ true_cond = COND_NE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ true_cond = COND_NE;
+ /* Fall through */
+
+cond_branch:
+ switch (code) {
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ /* unsigned comparison */
+ PPC_CMPLD(dst_reg, src_reg);
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ /* signed comparison */
+ PPC_CMPD(dst_reg, src_reg);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
+ break;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ /*
+ * Need sign-extended load, so only positive
+ * values can be used as imm in cmpldi
+ */
+ if (imm >= 0 && imm < 32768)
+ PPC_CMPLDI(dst_reg, imm);
+ else {
+ /* sign-extending load */
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ /* ... but unsigned comparison */
+ PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
+ }
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ /*
+ * signed comparison, so any 16-bit value
+ * can be used in cmpdi
+ */
+ if (imm >= -32768 && imm < 32768)
+ PPC_CMPDI(dst_reg, imm);
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
+ }
+ break;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ /* andi does not sign-extend the immediate */
+ if (imm >= 0 && imm < 32768)
+ /* PPC_ANDI is _only/always_ dot-form */
+ PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
+ b2p[TMP_REG_1]);
+ }
+ break;
+ }
+ PPC_BCC(true_cond, addrs[i + 1 + off]);
+ break;
+
+ /*
+ * Loads from packet header/data
+ * Assume 32-bit input value in imm and X (src_reg)
+ */
+
+ /* Absolute loads */
+ case BPF_LD | BPF_W | BPF_ABS:
+ func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
+ goto common_load_abs;
+ case BPF_LD | BPF_H | BPF_ABS:
+ func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
+ goto common_load_abs;
+ case BPF_LD | BPF_B | BPF_ABS:
+ func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
+common_load_abs:
+ /*
+ * Load from [imm]
+ * Load into r4, which can just be passed onto
+ * skb load helpers as the second parameter
+ */
+ PPC_LI32(4, imm);
+ goto common_load;
+
+ /* Indirect loads */
+ case BPF_LD | BPF_W | BPF_IND:
+ func = (u8 *)sk_load_word;
+ goto common_load_ind;
+ case BPF_LD | BPF_H | BPF_IND:
+ func = (u8 *)sk_load_half;
+ goto common_load_ind;
+ case BPF_LD | BPF_B | BPF_IND:
+ func = (u8 *)sk_load_byte;
+common_load_ind:
+ /*
+ * Load from [src_reg + imm]
+ * Treat src_reg as a 32-bit value
+ */
+ PPC_EXTSW(4, src_reg);
+ if (imm) {
+ if (imm >= -32768 && imm < 32768)
+ PPC_ADDI(4, 4, IMM_L(imm));
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_ADD(4, 4, b2p[TMP_REG_1]);
+ }
+ }
+
+common_load:
+ ctx->seen |= SEEN_SKB;
+ ctx->seen |= SEEN_FUNC;
+ bpf_jit_emit_func_call(image, ctx, (u64)func);
+
+ /*
+ * Helper returns 'lt' condition on error, and an
+ * appropriate return value in BPF_REG_0
+ */
+ PPC_BCC(COND_LT, exit_addr);
+ break;
+
+ /*
+ * TODO: Tail call
+ */
+ case BPF_JMP | BPF_CALL | BPF_X:
+
+ default:
+ /*
+ * The filter contains something cruel & unusual.
+ * We don't handle it, but also there shouldn't be
+ * anything missing from our list.
+ */
+ pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
+ code, i);
+ return -ENOTSUPP;
+ }
+ }
+
+ /* Set end-of-body-code address for exit. */
+ addrs[i] = ctx->idx * 4;
+
+ return 0;
+}
+
+void bpf_jit_compile(struct bpf_prog *fp) { }
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+{
+ u32 proglen;
+ u32 alloclen;
+ u8 *image = NULL;
+ u32 *code_base;
+ u32 *addrs;
+ struct codegen_context cgctx;
+ int pass;
+ int flen;
+ struct bpf_binary_header *bpf_hdr;
+
+ if (!bpf_jit_enable)
+ return fp;
+
+ flen = fp->len;
+ addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
+ if (addrs == NULL)
+ return fp;
+
+ cgctx.idx = 0;
+ cgctx.seen = 0;
+ /* Scouting faux-generate pass 0 */
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
+ /* We hit something illegal or unsupported. */
+ goto out;
+
+ /*
+ * Pretend to build prologue, given the features we've seen. This will
+ * update ctgtx.idx as it pretends to output instructions, then we can
+ * calculate total size from idx.
+ */
+ bpf_jit_build_prologue(0, &cgctx);
+ bpf_jit_build_epilogue(0, &cgctx);
+
+ proglen = cgctx.idx * 4;
+ alloclen = proglen + FUNCTION_DESCR_SIZE;
+
+ bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
+ bpf_jit_fill_ill_insns);
+ if (!bpf_hdr)
+ goto out;
+
+ code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
+
+ /* Code generation passes 1-2 */
+ for (pass = 1; pass < 3; pass++) {
+ /* Now build the prologue, body code & epilogue for real. */
+ cgctx.idx = 0;
+ bpf_jit_build_prologue(code_base, &cgctx);
+ bpf_jit_build_body(fp, code_base, &cgctx, addrs);
+ bpf_jit_build_epilogue(code_base, &cgctx);
+
+ if (bpf_jit_enable > 1)
+ pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
+ proglen - (cgctx.idx * 4), cgctx.seen);
+ }
+
+ if (bpf_jit_enable > 1)
+ /*
+ * Note that we output the base address of the code_base
+ * rather than image, since opcodes are in code_base.
+ */
+ bpf_jit_dump(flen, proglen, pass, code_base);
+
+ if (image) {
+ bpf_flush_icache(bpf_hdr, image + alloclen);
+#ifdef PPC64_ELF_ABI_v1
+ /* Function descriptor nastiness: Address + TOC */
+ ((u64 *)image)[0] = (u64)code_base;
+ ((u64 *)image)[1] = local_paca->kernel_toc;
+#endif
+ fp->bpf_func = (void *)image;
+ fp->jited = 1;
+ }
+
+out:
+ kfree(addrs);
+ return fp;
+}
+
+void bpf_jit_free(struct bpf_prog *fp)
+{
+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+ struct bpf_binary_header *bpf_hdr = (void *)addr;
+
+ if (fp->jited)
+ bpf_jit_binary_free(bpf_hdr);
+
+ bpf_prog_unlock_free(fp);
+}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index ed7b0977072a..ef2142ff7dbd 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -51,7 +51,7 @@ static void spu_buff_add(unsigned long int value, int spu)
* That way we can tell the difference between the
* buffer being full versus empty.
*
- * ASSUPTION: the buffer_lock is held when this function
+ * ASSUMPTION: the buffer_lock is held when this function
* is called to lock the buffer, head and tail.
*/
int full = 1;
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 77b6394a7c50..f102d5370101 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o \
- power8-pmu.o
+ isa207-common.o power8-pmu.o power9-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 97a1d40d8696..4ed377f0f7b2 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -992,7 +992,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
* than the previous value it will cause the delta and the counter to
* have bogus values unless we rolled a counter over. If a coutner is
* rolled back, it will be smaller, but within 256, which is the maximum
- * number of events to rollback at once. If we dectect a rollback
+ * number of events to rollback at once. If we detect a rollback
* return 0. This can lead to a small lack of precision in the
* counters.
*/
@@ -2158,31 +2158,15 @@ static void perf_event_interrupt(struct pt_regs *regs)
irq_exit();
}
-static void power_pmu_setup(int cpu)
+int power_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
- if (!ppmu)
- return;
- memset(cpuhw, 0, sizeof(*cpuhw));
- cpuhw->mmcr[0] = MMCR0_FC;
-}
-
-static int
-power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- power_pmu_setup(cpu);
- break;
-
- default:
- break;
+ if (ppmu) {
+ memset(cpuhw, 0, sizeof(*cpuhw));
+ cpuhw->mmcr[0] = MMCR0_FC;
}
-
- return NOTIFY_OK;
+ return 0;
}
int register_power_pmu(struct power_pmu *pmu)
@@ -2205,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu)
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
- perf_cpu_notifier(power_pmu_notifier);
-
+ cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER",
+ power_pmu_prepare_cpu, NULL);
return 0;
}
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 2da41b78cb6d..7b2ca16b1eb4 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1298,7 +1298,7 @@ static void h_24x7_event_read(struct perf_event *event)
__this_cpu_write(hv_24x7_txn_err, ret);
} else {
/*
- * Assoicate the event with the HCALL request index,
+ * Associate the event with the HCALL request index,
* so ->commit_txn() can quickly find/update count.
*/
i = request_buffer->num_requests - 1;
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 791455e7f5cf..634ef4082cdc 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -66,7 +66,7 @@ struct hv_24x7_result_element {
/* -1 if @performance_domain does not refer to a virtual processor */
__be32 lpar_cfg_instance_id;
- /* size = @result_element_data_size of cointaining result. */
+ /* size = @result_element_data_size of containing result. */
__u64 element_data[1];
} __packed;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
new file mode 100644
index 000000000000..6143c99f3ec5
--- /dev/null
+++ b/arch/powerpc/perf/isa207-common.c
@@ -0,0 +1,263 @@
+/*
+ * Common Performance counter support functions for PowerISA v2.07 processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2013 Michael Ellerman, IBM Corporation.
+ * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "isa207-common.h"
+
+static inline bool event_is_fab_match(u64 event)
+{
+ /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
+ event &= 0xff0fe;
+
+ /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
+ return (event == 0x30056 || event == 0x4f052);
+}
+
+int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+{
+ unsigned int unit, pmc, cache, ebb;
+ unsigned long mask, value;
+
+ mask = value = 0;
+
+ if (event & ~EVENT_VALID_MASK)
+ return -1;
+
+ pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
+ ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
+
+ if (pmc) {
+ u64 base_event;
+
+ if (pmc > 6)
+ return -1;
+
+ /* Ignore Linux defined bits when checking event below */
+ base_event = event & ~EVENT_LINUX_MASK;
+
+ if (pmc >= 5 && base_event != 0x500fa &&
+ base_event != 0x600f4)
+ return -1;
+
+ mask |= CNST_PMC_MASK(pmc);
+ value |= CNST_PMC_VAL(pmc);
+ }
+
+ if (pmc <= 4) {
+ /*
+ * Add to number of counters in use. Note this includes events with
+ * a PMC of 0 - they still need a PMC, it's just assigned later.
+ * Don't count events on PMC 5 & 6, there is only one valid event
+ * on each of those counters, and they are handled above.
+ */
+ mask |= CNST_NC_MASK;
+ value |= CNST_NC_VAL;
+ }
+
+ if (unit >= 6 && unit <= 9) {
+ /*
+ * L2/L3 events contain a cache selector field, which is
+ * supposed to be programmed into MMCRC. However MMCRC is only
+ * HV writable, and there is no API for guest kernels to modify
+ * it. The solution is for the hypervisor to initialise the
+ * field to zeroes, and for us to only ever allow events that
+ * have a cache selector of zero. The bank selector (bit 3) is
+ * irrelevant, as long as the rest of the value is 0.
+ */
+ if (cache & 0x7)
+ return -1;
+
+ } else if (event & EVENT_IS_L1) {
+ mask |= CNST_L1_QUAL_MASK;
+ value |= CNST_L1_QUAL_VAL(cache);
+ }
+
+ if (event & EVENT_IS_MARKED) {
+ mask |= CNST_SAMPLE_MASK;
+ value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
+ }
+
+ /*
+ * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
+ * the threshold control bits are used for the match value.
+ */
+ if (event_is_fab_match(event)) {
+ mask |= CNST_FAB_MATCH_MASK;
+ value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
+ } else {
+ /*
+ * Check the mantissa upper two bits are not zero, unless the
+ * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
+ */
+ unsigned int cmp, exp;
+
+ cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
+ exp = cmp >> 7;
+
+ if (exp && (cmp & 0x60) == 0)
+ return -1;
+
+ mask |= CNST_THRESH_MASK;
+ value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
+ }
+
+ if (!pmc && ebb)
+ /* EBB events must specify the PMC */
+ return -1;
+
+ if (event & EVENT_WANTS_BHRB) {
+ if (!ebb)
+ /* Only EBB events can request BHRB */
+ return -1;
+
+ mask |= CNST_IFM_MASK;
+ value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
+ }
+
+ /*
+ * All events must agree on EBB, either all request it or none.
+ * EBB events are pinned & exclusive, so this should never actually
+ * hit, but we leave it as a fallback in case.
+ */
+ mask |= CNST_EBB_VAL(ebb);
+ value |= CNST_EBB_MASK;
+
+ *maskp = mask;
+ *valp = value;
+
+ return 0;
+}
+
+int isa207_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[],
+ struct perf_event *pevents[])
+{
+ unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
+ unsigned int pmc, pmc_inuse;
+ int i;
+
+ pmc_inuse = 0;
+
+ /* First pass to count resource use */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ if (pmc)
+ pmc_inuse |= 1 << pmc;
+ }
+
+ /* In continuous sampling mode, update SDAR on TLB miss */
+ mmcra = MMCRA_SDAR_MODE_TLB;
+ mmcr1 = mmcr2 = 0;
+
+ /* Second pass: assign PMCs, set all MMCR1 fields */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
+ psel = event[i] & EVENT_PSEL_MASK;
+
+ if (!pmc) {
+ for (pmc = 1; pmc <= 4; ++pmc) {
+ if (!(pmc_inuse & (1 << pmc)))
+ break;
+ }
+
+ pmc_inuse |= 1 << pmc;
+ }
+
+ if (pmc <= 4) {
+ mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
+ mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
+ mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
+ }
+
+ if (event[i] & EVENT_IS_L1) {
+ cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
+ mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
+ cache >>= 1;
+ mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
+ }
+
+ if (event[i] & EVENT_IS_MARKED) {
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+
+ val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ if (val) {
+ mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
+ mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
+ }
+ }
+
+ /*
+ * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
+ * the threshold bits are used for the match value.
+ */
+ if (event_is_fab_match(event[i])) {
+ mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
+ EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
+ } else {
+ val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
+ mmcra |= val << MMCRA_THR_CTL_SHIFT;
+ val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
+ mmcra |= val << MMCRA_THR_SEL_SHIFT;
+ val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
+ mmcra |= val << MMCRA_THR_CMP_SHIFT;
+ }
+
+ if (event[i] & EVENT_WANTS_BHRB) {
+ val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
+ mmcra |= val << MMCRA_IFM_SHIFT;
+ }
+
+ if (pevents[i]->attr.exclude_user)
+ mmcr2 |= MMCR2_FCP(pmc);
+
+ if (pevents[i]->attr.exclude_hv)
+ mmcr2 |= MMCR2_FCH(pmc);
+
+ if (pevents[i]->attr.exclude_kernel) {
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ mmcr2 |= MMCR2_FCH(pmc);
+ else
+ mmcr2 |= MMCR2_FCS(pmc);
+ }
+
+ hwc[i] = pmc - 1;
+ }
+
+ /* Return MMCRx values */
+ mmcr[0] = 0;
+
+ /* pmc_inuse is 1-based */
+ if (pmc_inuse & 2)
+ mmcr[0] = MMCR0_PMC1CE;
+
+ if (pmc_inuse & 0x7c)
+ mmcr[0] |= MMCR0_PMCjCE;
+
+ /* If we're not using PMC 5 or 6, freeze them */
+ if (!(pmc_inuse & 0x60))
+ mmcr[0] |= MMCR0_FC56;
+
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+ mmcr[3] = mmcr2;
+
+ return 0;
+}
+
+void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+{
+ if (pmc <= 3)
+ mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
+}
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
new file mode 100644
index 000000000000..4d0a4e5017c2
--- /dev/null
+++ b/arch/powerpc/perf/isa207-common.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2013 Michael Ellerman, IBM Corporation.
+ * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or any later version.
+ */
+
+#ifndef _LINUX_POWERPC_PERF_ISA207_COMMON_H_
+#define _LINUX_POWERPC_PERF_ISA207_COMMON_H_
+
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <asm/firmware.h>
+#include <asm/cputable.h>
+
+/*
+ * Raw event encoding for PowerISA v2.07:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * | | [ ] [ thresh_cmp ] [ thresh_ctl ]
+ * | | | |
+ * | | *- IFM (Linux) thresh start/stop OR FAB match -*
+ * | *- BHRB (Linux)
+ * *- EBB (Linux)
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
+ * | | | | |
+ * | | | | *- mark
+ * | | *- L1/L2/L3 cache_sel |
+ * | | |
+ * | *- sampling mode for marked events *- combine
+ * |
+ * *- thresh_sel
+ *
+ * Below uses IBM bit numbering.
+ *
+ * MMCR1[x:y] = unit (PMCxUNIT)
+ * MMCR1[x] = combine (PMCxCOMB)
+ *
+ * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
+ * # PM_MRK_FAB_RSP_MATCH
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
+ * # PM_MRK_FAB_RSP_MATCH_CYC
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else
+ * MMCRA[48:55] = thresh_ctl (THRESH START/END)
+ *
+ * if thresh_sel:
+ * MMCRA[45:47] = thresh_sel
+ *
+ * if thresh_cmp:
+ * MMCRA[22:24] = thresh_cmp[0:2]
+ * MMCRA[25:31] = thresh_cmp[3:9]
+ *
+ * if unit == 6 or unit == 7
+ * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
+ * else if unit == 8 or unit == 9:
+ * if cache_sel[0] == 0: # L3 bank
+ * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
+ * else if cache_sel[0] == 1:
+ * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
+ * else if cache_sel[1]: # L1 event
+ * MMCR1[16] = cache_sel[2]
+ * MMCR1[17] = cache_sel[3]
+ *
+ * if mark:
+ * MMCRA[63] = 1 (SAMPLE_ENABLE)
+ * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
+ * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
+ *
+ * if EBB and BHRB:
+ * MMCRA[32:33] = IFM
+ *
+ */
+
+#define EVENT_EBB_MASK 1ull
+#define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT
+#define EVENT_BHRB_MASK 1ull
+#define EVENT_BHRB_SHIFT 62
+#define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT)
+#define EVENT_IFM_MASK 3ull
+#define EVENT_IFM_SHIFT 60
+#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
+#define EVENT_THR_CMP_MASK 0x3ff
+#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
+#define EVENT_THR_CTL_MASK 0xffull
+#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
+#define EVENT_THR_SEL_MASK 0x7
+#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
+#define EVENT_THRESH_MASK 0x1fffffull
+#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
+#define EVENT_SAMPLE_MASK 0x1f
+#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
+#define EVENT_CACHE_SEL_MASK 0xf
+#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
+#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
+#define EVENT_PMC_MASK 0xf
+#define EVENT_UNIT_SHIFT 12 /* Unit */
+#define EVENT_UNIT_MASK 0xf
+#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
+#define EVENT_COMBINE_MASK 0x1
+#define EVENT_MARKED_SHIFT 8 /* Marked bit */
+#define EVENT_MARKED_MASK 0x1
+#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
+#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
+
+/* Bits defined by Linux */
+#define EVENT_LINUX_MASK \
+ ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \
+ (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \
+ (EVENT_IFM_MASK << EVENT_IFM_SHIFT))
+
+#define EVENT_VALID_MASK \
+ ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
+ (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
+ (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
+ (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
+ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
+ (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
+ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
+ EVENT_LINUX_MASK | \
+ EVENT_PSEL_MASK)
+
+#define ONLY_PLM \
+ (PERF_SAMPLE_BRANCH_USER |\
+ PERF_SAMPLE_BRANCH_KERNEL |\
+ PERF_SAMPLE_BRANCH_HV)
+
+/*
+ * Layout of constraint bits:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
+ * |
+ * thresh_sel -*
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
+ * | | | |
+ * BHRB IFM -* | | | Count of events for each PMC.
+ * EBB -* | | p1, p2, p3, p4, p5, p6.
+ * L1 I/D qualifier -* |
+ * nc - number of counters -*
+ *
+ * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
+ * we want the low bit of each field to be added to any existing value.
+ *
+ * Everything else is a value field.
+ */
+
+#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
+#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
+
+/* We just throw all the threshold bits into the constraint */
+#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
+#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
+
+#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
+#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
+
+#define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25)
+#define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK)
+
+#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
+#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
+
+#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
+#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
+
+/*
+ * For NC we are counting up to 4 events. This requires three bits, and we need
+ * the fifth event to overflow and set the 4th bit. To achieve that we bias the
+ * fields by 3 in test_adder.
+ */
+#define CNST_NC_SHIFT 12
+#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
+#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
+#define ISA207_TEST_ADDER (3 << CNST_NC_SHIFT)
+
+/*
+ * For the per-PMC fields we have two bits. The low bit is added, so if two
+ * events ask for the same PMC the sum will overflow, setting the high bit,
+ * indicating an error. So our mask sets the high bit.
+ */
+#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
+#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
+#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
+
+/* Our add_fields is defined as: */
+#define ISA207_ADD_FIELDS \
+ CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
+ CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
+
+
+/* Bits in MMCR1 for PowerISA v2.07 */
+#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
+#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
+#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
+#define MMCR1_FAB_SHIFT 36
+#define MMCR1_DC_QUAL_SHIFT 47
+#define MMCR1_IC_QUAL_SHIFT 46
+
+/* Bits in MMCRA for PowerISA v2.07 */
+#define MMCRA_SAMP_MODE_SHIFT 1
+#define MMCRA_SAMP_ELIG_SHIFT 4
+#define MMCRA_THR_CTL_SHIFT 8
+#define MMCRA_THR_SEL_SHIFT 16
+#define MMCRA_THR_CMP_SHIFT 32
+#define MMCRA_SDAR_MODE_TLB (1ull << 42)
+#define MMCRA_IFM_SHIFT 30
+
+/* Bits in MMCR2 for PowerISA v2.07 */
+#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
+#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
+#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
+
+#define MAX_ALT 2
+#define MAX_PMU_COUNTERS 6
+
+int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp);
+int isa207_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[],
+ struct perf_event *pevents[]);
+void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]);
+
+#endif
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 7cf3b4378192..5fde2b192fec 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -12,10 +12,7 @@
#define pr_fmt(fmt) "power8-pmu: " fmt
-#include <linux/kernel.h>
-#include <linux/perf_event.h>
-#include <asm/firmware.h>
-#include <asm/cputable.h>
+#include "isa207-common.h"
/*
* Some power8 event codes.
@@ -28,465 +25,11 @@ enum {
#undef EVENT
-/*
- * Raw event encoding for POWER8:
- *
- * 60 56 52 48 44 40 36 32
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * | | [ ] [ thresh_cmp ] [ thresh_ctl ]
- * | | | |
- * | | *- IFM (Linux) thresh start/stop OR FAB match -*
- * | *- BHRB (Linux)
- * *- EBB (Linux)
- *
- * 28 24 20 16 12 8 4 0
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
- * | | | | |
- * | | | | *- mark
- * | | *- L1/L2/L3 cache_sel |
- * | | |
- * | *- sampling mode for marked events *- combine
- * |
- * *- thresh_sel
- *
- * Below uses IBM bit numbering.
- *
- * MMCR1[x:y] = unit (PMCxUNIT)
- * MMCR1[x] = combine (PMCxCOMB)
- *
- * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
- * # PM_MRK_FAB_RSP_MATCH
- * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
- * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
- * # PM_MRK_FAB_RSP_MATCH_CYC
- * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
- * else
- * MMCRA[48:55] = thresh_ctl (THRESH START/END)
- *
- * if thresh_sel:
- * MMCRA[45:47] = thresh_sel
- *
- * if thresh_cmp:
- * MMCRA[22:24] = thresh_cmp[0:2]
- * MMCRA[25:31] = thresh_cmp[3:9]
- *
- * if unit == 6 or unit == 7
- * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
- * else if unit == 8 or unit == 9:
- * if cache_sel[0] == 0: # L3 bank
- * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
- * else if cache_sel[0] == 1:
- * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
- * else if cache_sel[1]: # L1 event
- * MMCR1[16] = cache_sel[2]
- * MMCR1[17] = cache_sel[3]
- *
- * if mark:
- * MMCRA[63] = 1 (SAMPLE_ENABLE)
- * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
- * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
- *
- * if EBB and BHRB:
- * MMCRA[32:33] = IFM
- *
- */
-
-#define EVENT_EBB_MASK 1ull
-#define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT
-#define EVENT_BHRB_MASK 1ull
-#define EVENT_BHRB_SHIFT 62
-#define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT)
-#define EVENT_IFM_MASK 3ull
-#define EVENT_IFM_SHIFT 60
-#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
-#define EVENT_THR_CMP_MASK 0x3ff
-#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
-#define EVENT_THR_CTL_MASK 0xffull
-#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
-#define EVENT_THR_SEL_MASK 0x7
-#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
-#define EVENT_THRESH_MASK 0x1fffffull
-#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
-#define EVENT_SAMPLE_MASK 0x1f
-#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
-#define EVENT_CACHE_SEL_MASK 0xf
-#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
-#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
-#define EVENT_PMC_MASK 0xf
-#define EVENT_UNIT_SHIFT 12 /* Unit */
-#define EVENT_UNIT_MASK 0xf
-#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
-#define EVENT_COMBINE_MASK 0x1
-#define EVENT_MARKED_SHIFT 8 /* Marked bit */
-#define EVENT_MARKED_MASK 0x1
-#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
-#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
-
-/* Bits defined by Linux */
-#define EVENT_LINUX_MASK \
- ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \
- (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \
- (EVENT_IFM_MASK << EVENT_IFM_SHIFT))
-
-#define EVENT_VALID_MASK \
- ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
- (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
- (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
- (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
- (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
- (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
- (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
- EVENT_LINUX_MASK | \
- EVENT_PSEL_MASK)
-
/* MMCRA IFM bits - POWER8 */
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
-#define ONLY_PLM \
- (PERF_SAMPLE_BRANCH_USER |\
- PERF_SAMPLE_BRANCH_KERNEL |\
- PERF_SAMPLE_BRANCH_HV)
-
-/*
- * Layout of constraint bits:
- *
- * 60 56 52 48 44 40 36 32
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
- * |
- * thresh_sel -*
- *
- * 28 24 20 16 12 8 4 0
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
- * | | | |
- * BHRB IFM -* | | | Count of events for each PMC.
- * EBB -* | | p1, p2, p3, p4, p5, p6.
- * L1 I/D qualifier -* |
- * nc - number of counters -*
- *
- * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
- * we want the low bit of each field to be added to any existing value.
- *
- * Everything else is a value field.
- */
-
-#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
-#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
-
-/* We just throw all the threshold bits into the constraint */
-#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
-#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
-
-#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
-#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
-
-#define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25)
-#define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK)
-
-#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
-#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
-
-#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
-#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
-
-/*
- * For NC we are counting up to 4 events. This requires three bits, and we need
- * the fifth event to overflow and set the 4th bit. To achieve that we bias the
- * fields by 3 in test_adder.
- */
-#define CNST_NC_SHIFT 12
-#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
-#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
-#define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
-
-/*
- * For the per-PMC fields we have two bits. The low bit is added, so if two
- * events ask for the same PMC the sum will overflow, setting the high bit,
- * indicating an error. So our mask sets the high bit.
- */
-#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
-#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
-#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
-
-/* Our add_fields is defined as: */
-#define POWER8_ADD_FIELDS \
- CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
- CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
-
-
-/* Bits in MMCR1 for POWER8 */
-#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
-#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
-#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
-#define MMCR1_FAB_SHIFT 36
-#define MMCR1_DC_QUAL_SHIFT 47
-#define MMCR1_IC_QUAL_SHIFT 46
-
-/* Bits in MMCRA for POWER8 */
-#define MMCRA_SAMP_MODE_SHIFT 1
-#define MMCRA_SAMP_ELIG_SHIFT 4
-#define MMCRA_THR_CTL_SHIFT 8
-#define MMCRA_THR_SEL_SHIFT 16
-#define MMCRA_THR_CMP_SHIFT 32
-#define MMCRA_SDAR_MODE_TLB (1ull << 42)
-#define MMCRA_IFM_SHIFT 30
-
-/* Bits in MMCR2 for POWER8 */
-#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
-#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
-#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
-
-
-static inline bool event_is_fab_match(u64 event)
-{
- /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
- event &= 0xff0fe;
-
- /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
- return (event == 0x30056 || event == 0x4f052);
-}
-
-static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
-{
- unsigned int unit, pmc, cache, ebb;
- unsigned long mask, value;
-
- mask = value = 0;
-
- if (event & ~EVENT_VALID_MASK)
- return -1;
-
- pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
- cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
- ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
-
- if (pmc) {
- u64 base_event;
-
- if (pmc > 6)
- return -1;
-
- /* Ignore Linux defined bits when checking event below */
- base_event = event & ~EVENT_LINUX_MASK;
-
- if (pmc >= 5 && base_event != PM_RUN_INST_CMPL &&
- base_event != PM_RUN_CYC)
- return -1;
-
- mask |= CNST_PMC_MASK(pmc);
- value |= CNST_PMC_VAL(pmc);
- }
-
- if (pmc <= 4) {
- /*
- * Add to number of counters in use. Note this includes events with
- * a PMC of 0 - they still need a PMC, it's just assigned later.
- * Don't count events on PMC 5 & 6, there is only one valid event
- * on each of those counters, and they are handled above.
- */
- mask |= CNST_NC_MASK;
- value |= CNST_NC_VAL;
- }
-
- if (unit >= 6 && unit <= 9) {
- /*
- * L2/L3 events contain a cache selector field, which is
- * supposed to be programmed into MMCRC. However MMCRC is only
- * HV writable, and there is no API for guest kernels to modify
- * it. The solution is for the hypervisor to initialise the
- * field to zeroes, and for us to only ever allow events that
- * have a cache selector of zero. The bank selector (bit 3) is
- * irrelevant, as long as the rest of the value is 0.
- */
- if (cache & 0x7)
- return -1;
-
- } else if (event & EVENT_IS_L1) {
- mask |= CNST_L1_QUAL_MASK;
- value |= CNST_L1_QUAL_VAL(cache);
- }
-
- if (event & EVENT_IS_MARKED) {
- mask |= CNST_SAMPLE_MASK;
- value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
- }
-
- /*
- * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
- * the threshold control bits are used for the match value.
- */
- if (event_is_fab_match(event)) {
- mask |= CNST_FAB_MATCH_MASK;
- value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
- } else {
- /*
- * Check the mantissa upper two bits are not zero, unless the
- * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
- */
- unsigned int cmp, exp;
-
- cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
- exp = cmp >> 7;
-
- if (exp && (cmp & 0x60) == 0)
- return -1;
-
- mask |= CNST_THRESH_MASK;
- value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
- }
-
- if (!pmc && ebb)
- /* EBB events must specify the PMC */
- return -1;
-
- if (event & EVENT_WANTS_BHRB) {
- if (!ebb)
- /* Only EBB events can request BHRB */
- return -1;
-
- mask |= CNST_IFM_MASK;
- value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
- }
-
- /*
- * All events must agree on EBB, either all request it or none.
- * EBB events are pinned & exclusive, so this should never actually
- * hit, but we leave it as a fallback in case.
- */
- mask |= CNST_EBB_VAL(ebb);
- value |= CNST_EBB_MASK;
-
- *maskp = mask;
- *valp = value;
-
- return 0;
-}
-
-static int power8_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[],
- struct perf_event *pevents[])
-{
- unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
- unsigned int pmc, pmc_inuse;
- int i;
-
- pmc_inuse = 0;
-
- /* First pass to count resource use */
- for (i = 0; i < n_ev; ++i) {
- pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- if (pmc)
- pmc_inuse |= 1 << pmc;
- }
-
- /* In continuous sampling mode, update SDAR on TLB miss */
- mmcra = MMCRA_SDAR_MODE_TLB;
- mmcr1 = mmcr2 = 0;
-
- /* Second pass: assign PMCs, set all MMCR1 fields */
- for (i = 0; i < n_ev; ++i) {
- pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
- combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
- psel = event[i] & EVENT_PSEL_MASK;
-
- if (!pmc) {
- for (pmc = 1; pmc <= 4; ++pmc) {
- if (!(pmc_inuse & (1 << pmc)))
- break;
- }
-
- pmc_inuse |= 1 << pmc;
- }
-
- if (pmc <= 4) {
- mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
- mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
- mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
- }
-
- if (event[i] & EVENT_IS_L1) {
- cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
- mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
- cache >>= 1;
- mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
- }
-
- if (event[i] & EVENT_IS_MARKED) {
- mmcra |= MMCRA_SAMPLE_ENABLE;
-
- val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
- if (val) {
- mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
- mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
- }
- }
-
- /*
- * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
- * the threshold bits are used for the match value.
- */
- if (event_is_fab_match(event[i])) {
- mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
- EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
- } else {
- val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
- mmcra |= val << MMCRA_THR_CTL_SHIFT;
- val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
- mmcra |= val << MMCRA_THR_SEL_SHIFT;
- val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
- mmcra |= val << MMCRA_THR_CMP_SHIFT;
- }
-
- if (event[i] & EVENT_WANTS_BHRB) {
- val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
- mmcra |= val << MMCRA_IFM_SHIFT;
- }
-
- if (pevents[i]->attr.exclude_user)
- mmcr2 |= MMCR2_FCP(pmc);
-
- if (pevents[i]->attr.exclude_hv)
- mmcr2 |= MMCR2_FCH(pmc);
-
- if (pevents[i]->attr.exclude_kernel) {
- if (cpu_has_feature(CPU_FTR_HVMODE))
- mmcr2 |= MMCR2_FCH(pmc);
- else
- mmcr2 |= MMCR2_FCS(pmc);
- }
-
- hwc[i] = pmc - 1;
- }
-
- /* Return MMCRx values */
- mmcr[0] = 0;
-
- /* pmc_inuse is 1-based */
- if (pmc_inuse & 2)
- mmcr[0] = MMCR0_PMC1CE;
-
- if (pmc_inuse & 0x7c)
- mmcr[0] |= MMCR0_PMCjCE;
-
- /* If we're not using PMC 5 or 6, freeze them */
- if (!(pmc_inuse & 0x60))
- mmcr[0] |= MMCR0_FC56;
-
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
- mmcr[3] = mmcr2;
-
- return 0;
-}
-
-#define MAX_ALT 2
-
/* Table of alternatives, sorted by column 0 */
static const unsigned int event_alternatives[][MAX_ALT] = {
{ PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
@@ -567,12 +110,6 @@ static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
return num_alt;
}
-static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
-{
- if (pmc <= 3)
- mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
-}
-
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
@@ -841,16 +378,16 @@ static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
static struct power_pmu power8_pmu = {
.name = "POWER8",
- .n_counter = 6,
+ .n_counter = MAX_PMU_COUNTERS,
.max_alternatives = MAX_ALT + 1,
- .add_fields = POWER8_ADD_FIELDS,
- .test_adder = POWER8_TEST_ADDER,
- .compute_mmcr = power8_compute_mmcr,
+ .add_fields = ISA207_ADD_FIELDS,
+ .test_adder = ISA207_TEST_ADDER,
+ .compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power8_config_bhrb,
.bhrb_filter_map = power8_bhrb_filter_map,
- .get_constraint = power8_get_constraint,
+ .get_constraint = isa207_get_constraint,
.get_alternatives = power8_get_alternatives,
- .disable_pmc = power8_disable_pmc,
+ .disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
new file mode 100644
index 000000000000..cda6fcb809ca
--- /dev/null
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -0,0 +1,55 @@
+/*
+ * Performance counter support for POWER9 processors.
+ *
+ * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Power9 event codes.
+ */
+EVENT(PM_CYC, 0x0001e)
+EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
+EVENT(PM_CMPLU_STALL, 0x1e054)
+EVENT(PM_INST_CMPL, 0x00002)
+EVENT(PM_BRU_CMPL, 0x40060)
+EVENT(PM_BR_MPRED_CMPL, 0x400f6)
+
+/* All L1 D cache load references counted at finish, gated by reject */
+EVENT(PM_LD_REF_L1, 0x100fc)
+/* Load Missed L1 */
+EVENT(PM_LD_MISS_L1_FIN, 0x2c04e)
+/* Store Missed L1 */
+EVENT(PM_ST_MISS_L1, 0x300f0)
+/* L1 cache data prefetches */
+EVENT(PM_L1_PREF, 0x20054)
+/* Instruction fetches from L1 */
+EVENT(PM_INST_FROM_L1, 0x04080)
+/* Demand iCache Miss */
+EVENT(PM_L1_ICACHE_MISS, 0x200fd)
+/* Instruction Demand sectors wriittent into IL1 */
+EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
+/* Instruction prefetch written into IL1 */
+EVENT(PM_IC_PREF_WRITE, 0x0408e)
+/* The data cache was reloaded from local core's L3 due to a demand load */
+EVENT(PM_DATA_FROM_L3, 0x4c042)
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
+/* All successful D-side store dispatches for this thread */
+EVENT(PM_L2_ST, 0x16081)
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+EVENT(PM_L2_ST_MISS, 0x26081)
+/* Total HW L3 prefetches(Load+store) */
+EVENT(PM_L3_PREF_ALL, 0x4e052)
+/* Data PTEG reload */
+EVENT(PM_DTLB_MISS, 0x300fc)
+/* ITLB Reloaded */
+EVENT(PM_ITLB_MISS, 0x400fc)
+/* Run_Instructions */
+EVENT(PM_RUN_INST_CMPL, 0x500fa)
+/* Run_cycles */
+EVENT(PM_RUN_CYC, 0x600f4)
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
new file mode 100644
index 000000000000..788346303852
--- /dev/null
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -0,0 +1,330 @@
+/*
+ * Performance counter support for POWER9 processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2013 Michael Ellerman, IBM Corporation.
+ * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or later version.
+ */
+
+#define pr_fmt(fmt) "power9-pmu: " fmt
+
+#include "isa207-common.h"
+
+/*
+ * Some power9 event codes.
+ */
+#define EVENT(_name, _code) _name = _code,
+
+enum {
+#include "power9-events-list.h"
+};
+
+#undef EVENT
+
+/* MMCRA IFM bits - POWER9 */
+#define POWER9_MMCRA_IFM1 0x0000000040000000UL
+#define POWER9_MMCRA_IFM2 0x0000000080000000UL
+#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
+
+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_CMPL);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
+GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN);
+
+CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
+CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
+CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
+CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
+CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
+CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
+CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
+CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
+CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
+CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
+CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
+CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
+CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
+CACHE_EVENT_ATTR(branch-loads, PM_BRU_CMPL);
+CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
+CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
+
+static struct attribute *power9_events_attr[] = {
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC),
+ GENERIC_EVENT_PTR(PM_CMPLU_STALL),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_BRU_CMPL),
+ GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_REF_L1),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN),
+ CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN),
+ CACHE_EVENT_PTR(PM_LD_REF_L1),
+ CACHE_EVENT_PTR(PM_L1_PREF),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_INST_FROM_L1),
+ CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3),
+ CACHE_EVENT_PTR(PM_L3_PREF_ALL),
+ CACHE_EVENT_PTR(PM_L2_ST_MISS),
+ CACHE_EVENT_PTR(PM_L2_ST),
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_BRU_CMPL),
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
+ NULL
+};
+
+static struct attribute_group power9_pmu_events_group = {
+ .name = "events",
+ .attrs = power9_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-49");
+PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
+PMU_FORMAT_ATTR(mark, "config:8");
+PMU_FORMAT_ATTR(combine, "config:11");
+PMU_FORMAT_ATTR(unit, "config:12-15");
+PMU_FORMAT_ATTR(pmc, "config:16-19");
+PMU_FORMAT_ATTR(cache_sel, "config:20-23");
+PMU_FORMAT_ATTR(sample_mode, "config:24-28");
+PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
+PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
+PMU_FORMAT_ATTR(thresh_start, "config:36-39");
+PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
+
+static struct attribute *power9_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_pmcxsel.attr,
+ &format_attr_mark.attr,
+ &format_attr_combine.attr,
+ &format_attr_unit.attr,
+ &format_attr_pmc.attr,
+ &format_attr_cache_sel.attr,
+ &format_attr_sample_mode.attr,
+ &format_attr_thresh_sel.attr,
+ &format_attr_thresh_stop.attr,
+ &format_attr_thresh_start.attr,
+ &format_attr_thresh_cmp.attr,
+ NULL,
+};
+
+struct attribute_group power9_pmu_format_group = {
+ .name = "format",
+ .attrs = power9_pmu_format_attr,
+};
+
+static const struct attribute_group *power9_pmu_attr_groups[] = {
+ &power9_pmu_format_group,
+ &power9_pmu_events_group,
+ NULL,
+};
+
+static int power9_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
+};
+
+static u64 power9_bhrb_filter_map(u64 branch_sample_type)
+{
+ u64 pmu_bhrb_filter = 0;
+
+ /* BHRB and regular PMU events share the same privilege state
+ * filter configuration. BHRB is always recorded along with a
+ * regular PMU event. As the privilege state filter is handled
+ * in the basic PMC configuration of the accompanying regular
+ * PMU event, we ignore any separate BHRB specific request.
+ */
+
+ /* No branch filter requested */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
+ return pmu_bhrb_filter;
+
+ /* Invalid branch filter options - HW does not support */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
+ pmu_bhrb_filter |= POWER9_MMCRA_IFM1;
+ return pmu_bhrb_filter;
+ }
+
+ /* Every thing else is unsupported */
+ return -1;
+}
+
+static void power9_config_bhrb(u64 pmu_bhrb_filter)
+{
+ /* Enable BHRB filter in PMU */
+ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
+}
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1_FIN,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_PREF,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L2_ST,
+ [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_BRU_CMPL,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+#undef C
+
+static struct power_pmu power9_pmu = {
+ .name = "POWER9",
+ .n_counter = MAX_PMU_COUNTERS,
+ .add_fields = ISA207_ADD_FIELDS,
+ .test_adder = ISA207_TEST_ADDER,
+ .compute_mmcr = isa207_compute_mmcr,
+ .config_bhrb = power9_config_bhrb,
+ .bhrb_filter_map = power9_bhrb_filter_map,
+ .get_constraint = isa207_get_constraint,
+ .disable_pmc = isa207_disable_pmc,
+ .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
+ .n_generic = ARRAY_SIZE(power9_generic_events),
+ .generic_events = power9_generic_events,
+ .cache_events = &power9_cache_events,
+ .attr_groups = power9_pmu_attr_groups,
+ .bhrb_nr = 32,
+};
+
+static int __init init_power9_pmu(void)
+{
+ int rc;
+
+ /* Comes from cpu_specs[] */
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
+ return -ENODEV;
+
+ rc = register_power_pmu(&power9_pmu);
+ if (rc)
+ return rc;
+
+ /* Tell userspace that EBB is supported */
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
+
+ return 0;
+}
+early_initcall(init_power9_pmu);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 6e287f1294fa..e3257f24a8a1 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -137,7 +137,7 @@ config STB03xxx
config PPC4xx_GPIO
bool "PPC4xx GPIO support"
depends on 40x
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
Enable gpiolib support for ppc40x based boards
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
index ddc12a1926ef..1c8aec6e9bb7 100644
--- a/arch/powerpc/platforms/40x/ep405.c
+++ b/arch/powerpc/platforms/40x/ep405.c
@@ -105,9 +105,7 @@ static void __init ep405_setup_arch(void)
static int __init ep405_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ep405"))
+ if (!of_machine_is_compatible("ep405"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index b0c46375dd95..2a050007bbae 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -63,7 +63,7 @@ static const char * const board[] __initconst = {
static int __init ppc40x_probe(void)
{
- if (of_flat_dt_match(of_get_flat_dt_root(), board)) {
+ if (of_device_compatible_match(of_root, board)) {
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
index 9aa7ae2f4164..91a08ea758a8 100644
--- a/arch/powerpc/platforms/40x/virtex.c
+++ b/arch/powerpc/platforms/40x/virtex.c
@@ -37,9 +37,7 @@ machine_device_initcall(virtex, virtex_device_probe);
static int __init virtex_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "xlnx,virtex"))
+ if (!of_machine_is_compatible("xlnx,virtex"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c
index f7ac2d0fcb44..e5797815e2f1 100644
--- a/arch/powerpc/platforms/40x/walnut.c
+++ b/arch/powerpc/platforms/40x/walnut.c
@@ -46,9 +46,7 @@ machine_device_initcall(walnut, walnut_device_probe);
static int __init walnut_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,walnut"))
+ if (!of_machine_is_compatible("ibm,walnut"))
return 0;
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 5538e57c36c1..48fc18041ff6 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -273,7 +273,7 @@ config PPC44x_SIMPLE
config PPC4xx_GPIO
bool "PPC4xx GPIO support"
depends on 44x
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
Enable gpiolib support for ppc440 based boards
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c
index 22ca5430c9cb..157f4ce46386 100644
--- a/arch/powerpc/platforms/44x/canyonlands.c
+++ b/arch/powerpc/platforms/44x/canyonlands.c
@@ -53,11 +53,10 @@ machine_device_initcall(canyonlands, ppc460ex_device_probe);
static int __init ppc460ex_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "amcc,canyonlands")) {
+ if (of_machine_is_compatible("amcc,canyonlands")) {
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
- }
+ }
return 0;
}
diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c
index ae893226392d..1070225f5f9b 100644
--- a/arch/powerpc/platforms/44x/ebony.c
+++ b/arch/powerpc/platforms/44x/ebony.c
@@ -49,9 +49,7 @@ machine_device_initcall(ebony, ebony_device_probe);
*/
static int __init ebony_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,ebony"))
+ if (!of_machine_is_compatible("ibm,ebony"))
return 0;
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index c7c6758b3cfe..5f296dd6b1c0 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -149,9 +149,7 @@ static void __init iss4xx_setup_arch(void)
*/
static int __init iss4xx_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx"))
+ if (!of_machine_is_compatible("ibm,iss-4xx"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 573c3d2689c6..8d6e4da9dfbe 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -67,11 +67,10 @@ static char *board[] __initdata = {
static int __init ppc44x_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
int i = 0;
for (i = 0; i < ARRAY_SIZE(board); i++) {
- if (of_flat_dt_is_compatible(root, board[i])) {
+ if (of_machine_is_compatible(board[i])) {
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index c11ce6516c8f..e55933f9cd55 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -68,7 +68,7 @@ DECLARE_PCI_FIXUP_HEADER(0x1033, 0x0035, quirk_ppc_currituck_usb_fixup);
#define AVR_PWRCTL_RESET (0x02)
static struct i2c_client *avr_i2c_client;
-static void avr_halt_system(int pwrctl_flags)
+static void __noreturn avr_halt_system(int pwrctl_flags)
{
/* Request the AVR to reset the system */
i2c_smbus_write_byte_data(avr_i2c_client,
@@ -84,7 +84,7 @@ static void avr_power_off_system(void)
avr_halt_system(AVR_PWRCTL_PWROFF);
}
-static void avr_reset_system(char *cmd)
+static void __noreturn avr_reset_system(char *cmd)
{
avr_halt_system(AVR_PWRCTL_RESET);
}
@@ -275,12 +275,10 @@ static void ppc47x_pci_irq_fixup(struct pci_dev *dev)
*/
static int __init ppc47x_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "ibm,akebono"))
+ if (of_machine_is_compatible("ibm,akebono"))
return 1;
- if (of_flat_dt_is_compatible(root, "ibm,currituck")) {
+ if (of_machine_is_compatible("ibm,currituck")) {
ppc_md.pci_irq_fixup = ppc47x_pci_irq_fixup;
return 1;
}
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 3ee4a03c1496..688ffeab0699 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -46,9 +46,7 @@ machine_device_initcall(sam440ep, sam440ep_device_probe);
static int __init sam440ep_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "acube,sam440ep"))
+ if (!of_machine_is_compatible("acube,sam440ep"))
return 0;
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c
index ad272c17c640..a7e08026097a 100644
--- a/arch/powerpc/platforms/44x/virtex.c
+++ b/arch/powerpc/platforms/44x/virtex.c
@@ -43,9 +43,7 @@ machine_device_initcall(virtex, virtex_device_probe);
static int __init virtex_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "xlnx,virtex440"))
+ if (!of_machine_is_compatible("xlnx,virtex440"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 501333cf42cf..5ecce543103e 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -44,9 +44,7 @@ machine_device_initcall(warp, warp_device_probe);
static int __init warp_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "pika,warp"))
+ if (!of_machine_is_compatible("pika,warp"))
return 0;
/* For __dma_alloc_coherent */
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index f09016f6b3a6..bf7ae5cbd07a 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -6,7 +6,6 @@ config PPC_MPC512x
select IPIC
select PPC_PCI_CHOICE
select FSL_PCI if PCI
- select ARCH_WANT_OPTIONAL_GPIOLIB
select USB_EHCI_BIG_ENDIAN_MMIO if USB_EHCI_HCD
select USB_EHCI_BIG_ENDIAN_DESC if USB_EHCI_HCD
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index c50ea76ba66c..add5a5374fa0 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void)
/* convenience wrappers around the common clk API */
static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
{
- return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *mpc512x_clk_factor(
@@ -719,7 +719,7 @@ static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
* most one of a mux, div, and gate each into one 'struct clk'
* item
* - PSC/MSCAN/SPDIF clock generation OTOH already is very
- * specific and cannot get mapped to componsites (at least not
+ * specific and cannot get mapped to composites (at least not
* a single one, maybe two of them, but then some of these
* intermediate clock signals get referenced elsewhere (e.g.
* in the clock frequency measurement, CFM) and thus need
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c
index 3e90ece10ae9..f65d5033cdb0 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads.c
@@ -57,9 +57,12 @@ static void __init mpc5121_ads_init_IRQ(void)
*/
static int __init mpc5121_ads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
+ if (!of_machine_is_compatible("fsl,mpc5121ads"))
+ return 0;
- return of_flat_dt_is_compatible(root, "fsl,mpc5121ads");
+ mpc512x_init_early();
+
+ return 1;
}
define_machine(mpc5121_ads) {
@@ -67,7 +70,6 @@ define_machine(mpc5121_ads) {
.probe = mpc5121_ads_probe,
.setup_arch = mpc5121_ads_setup_arch,
.init = mpc512x_init,
- .init_early = mpc512x_init_early,
.init_IRQ = mpc5121_ads_init_IRQ,
.get_irq = ipic_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h
index cc97f022d028..14ba49fd7938 100644
--- a/arch/powerpc/platforms/512x/mpc512x.h
+++ b/arch/powerpc/platforms/512x/mpc512x.h
@@ -18,6 +18,6 @@ extern void __init mpc512x_setup_arch(void);
extern int __init mpc5121_clk_init(void);
extern const char *mpc512x_select_psc_compat(void);
extern const char *mpc512x_select_reset_compat(void);
-extern void mpc512x_restart(char *cmd);
+extern void __noreturn mpc512x_restart(char *cmd);
#endif /* __MPC512X_H__ */
diff --git a/arch/powerpc/platforms/512x/mpc512x_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c
index ce71408781a0..bf884d3075e4 100644
--- a/arch/powerpc/platforms/512x/mpc512x_generic.c
+++ b/arch/powerpc/platforms/512x/mpc512x_generic.c
@@ -38,14 +38,18 @@ static const char * const board[] __initconst = {
*/
static int __init mpc512x_generic_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ if (!of_device_compatible_match(of_root, board))
+ return 0;
+
+ mpc512x_init_early();
+
+ return 1;
}
define_machine(mpc512x_generic) {
.name = "MPC512x generic",
.probe = mpc512x_generic_probe,
.init = mpc512x_init,
- .init_early = mpc512x_init_early,
.setup_arch = mpc512x_setup_arch,
.init_IRQ = mpc512x_init_IRQ,
.get_irq = ipic_get_irq,
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 452da2391153..6b4f4cb7009a 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -47,7 +47,7 @@ static void __init mpc512x_restart_init(void)
of_node_put(np);
}
-void mpc512x_restart(char *cmd)
+void __noreturn mpc512x_restart(char *cmd)
{
if (reset_module_base) {
/* Enable software reset "RSTE" */
diff --git a/arch/powerpc/platforms/512x/pdm360ng.c b/arch/powerpc/platforms/512x/pdm360ng.c
index 116f2325b20b..dc81f05e0bce 100644
--- a/arch/powerpc/platforms/512x/pdm360ng.c
+++ b/arch/powerpc/platforms/512x/pdm360ng.c
@@ -113,9 +113,12 @@ void __init pdm360ng_init(void)
static int __init pdm360ng_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
+ if (!of_machine_is_compatible("ifm,pdm360ng"))
+ return 0;
- return of_flat_dt_is_compatible(root, "ifm,pdm360ng");
+ mpc512x_init_early();
+
+ return 1;
}
define_machine(pdm360ng) {
@@ -123,7 +126,6 @@ define_machine(pdm360ng) {
.probe = pdm360ng_probe,
.setup_arch = mpc512x_setup_arch,
.init = pdm360ng_init,
- .init_early = mpc512x_init_early,
.init_IRQ = mpc512x_init_IRQ,
.get_irq = ipic_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 6af651e69129..39b49822ace1 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -200,8 +200,7 @@ static void __init efika_setup_arch(void)
static int __init efika_probe(void)
{
- const char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
- "model", NULL);
+ const char *model = of_get_property(of_root, "model", NULL);
if (model == NULL)
return 0;
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 7492de3cf6d0..c94c385cc919 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -183,7 +183,7 @@ static const char * const board[] __initconst = {
*/
static int __init lite5200_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(lite5200) {
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 8fb95480fd73..a3227040cc86 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -242,7 +242,7 @@ static const char * const board[] __initconst = {
*/
static int __init media5200_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(media5200_platform) {
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index 792a301a0bf0..a80c6278d515 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -70,7 +70,7 @@ static const char *board[] __initdata = {
*/
static int __init mpc5200_simple_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(mpc5200_simple_platform) {
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 26993826a797..565e3a83dc9e 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -243,8 +243,7 @@ EXPORT_SYMBOL(mpc52xx_get_xtal_freq);
/**
* mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
*/
-void
-mpc52xx_restart(char *cmd)
+void __noreturn mpc52xx_restart(char *cmd)
{
local_irq_disable();
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index 6781bda117be..cdab847749e6 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -309,8 +309,7 @@ machine_device_initcall(ep8248e, declare_of_platform_devices);
*/
static int __init ep8248e_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,ep8248e");
+ return of_machine_is_compatible("fsl,ep8248e");
}
define_machine(ep8248e)
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index 387b446f4161..28860e40b5db 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -198,8 +198,7 @@ machine_device_initcall(km82xx, declare_of_platform_devices);
*/
static int __init km82xx_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "keymile,km82xx");
+ return of_machine_is_compatible("keymile,km82xx");
}
define_machine(km82xx)
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index d24deacf07d0..d23c10a96bde 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -201,8 +201,7 @@ machine_device_initcall(mpc8272_ads, declare_of_platform_devices);
*/
static int __init mpc8272_ads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,mpc8272ads");
+ return of_machine_is_compatible("fsl,mpc8272ads");
}
define_machine(mpc8272_ads)
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index fc8b2d6a7d8d..c4f7029fc9ae 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -22,7 +22,7 @@
#define RMR_CSRE 0x00000001
-void pq2_restart(char *cmd)
+void __noreturn pq2_restart(char *cmd)
{
local_irq_disable();
setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE);
diff --git a/arch/powerpc/platforms/82xx/pq2.h b/arch/powerpc/platforms/82xx/pq2.h
index a41f84ae2325..3080ce3441c1 100644
--- a/arch/powerpc/platforms/82xx/pq2.h
+++ b/arch/powerpc/platforms/82xx/pq2.h
@@ -1,7 +1,7 @@
#ifndef _PQ2_H
#define _PQ2_H
-void pq2_restart(char *cmd);
+void __noreturn pq2_restart(char *cmd);
#ifdef CONFIG_PCI
int pq2ads_pci_init_irq(void);
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index 3a5164ad10ad..6c654dc74a4b 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -164,8 +164,7 @@ static void __init pq2fads_setup_arch(void)
*/
static int __init pq2fads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,pq2fads");
+ return of_machine_is_compatible("fsl,pq2fads");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 2bdc8c862c46..4ef7f1cd05b7 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -116,7 +116,6 @@ endif
# used for usb & gpio
config PPC_MPC831x
bool
- select ARCH_WANT_OPTIONAL_GPIOLIB
# used for math-emu
config PPC_MPC832x
@@ -125,9 +124,7 @@ config PPC_MPC832x
# used for usb & gpio
config PPC_MPC834x
bool
- select ARCH_WANT_OPTIONAL_GPIOLIB
# used for usb & gpio
config PPC_MPC837x
bool
- select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index 464ea8e0292d..17e54339f8d9 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -43,8 +43,7 @@ machine_device_initcall(asp834x, mpc83xx_declare_of_platform_devices);
*/
static int __init asp834x_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "analogue-and-micro,asp8347e");
+ return of_machine_is_compatible("analogue-and-micro,asp8347e");
}
define_machine(asp834x) {
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index 4bc6bbbe9ada..e7fbd6366abb 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -171,11 +171,10 @@ static char *board[] __initdata = {
*/
static int __init mpc83xx_km_probe(void)
{
- unsigned long node = of_get_flat_dt_root();
int i = 0;
while (board[i]) {
- if (of_flat_dt_is_compatible(node, board[i]))
+ if (of_machine_is_compatible(board[i]))
break;
i++;
}
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 7e923cad56cf..8899aa9d11f5 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -35,7 +35,7 @@ static int __init mpc83xx_restart_init(void)
arch_initcall(mpc83xx_restart_init);
-void mpc83xx_restart(char *cmd)
+void __noreturn mpc83xx_restart(char *cmd)
{
#define RST_OFFSET 0x00000900
#define RST_PROT_REG 0x00000018
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 4f2d9fea77b7..040d5d085467 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -46,7 +46,7 @@ static const char *board[] __initdata = {
*/
static int __init mpc830x_rdb_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index fa25977c52de..40e0d8307b59 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -46,7 +46,7 @@ static const char *board[] __initdata = {
*/
static int __init mpc831x_rdb_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
machine_device_initcall(mpc831x_rdb, mpc83xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index a973b2ae5df6..cdfa47c4d394 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -102,9 +102,7 @@ machine_device_initcall(mpc832x_mds, mpc83xx_declare_of_platform_devices);
*/
static int __init mpc832x_sys_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC832xMDS");
+ return of_machine_is_compatible("MPC832xMDS");
}
define_machine(mpc832x_mds) {
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index ea2b87d202ca..2ef03e7d248c 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -220,9 +220,7 @@ machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices);
*/
static int __init mpc832x_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC832xRDB");
+ return of_machine_is_compatible("MPC832xRDB");
}
define_machine(mpc832x_rdb) {
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 80aea8c4b5a3..8fd0c1e8b182 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -70,9 +70,7 @@ static void __init mpc834x_itx_setup_arch(void)
*/
static int __init mpc834x_itx_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC834xMITX");
+ return of_machine_is_compatible("MPC834xMITX");
}
define_machine(mpc834x_itx) {
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 553e793a4a93..eeaee6123bb3 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -91,9 +91,7 @@ machine_device_initcall(mpc834x_mds, mpc83xx_declare_of_platform_devices);
*/
static int __init mpc834x_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC834xMDS");
+ return of_machine_is_compatible("MPC834xMDS");
}
define_machine(mpc834x_mds) {
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index dd70b85f56d4..dacf4c2df069 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -209,9 +209,7 @@ machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg);
*/
static int __init mpc836x_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC836xMDS");
+ return of_machine_is_compatible("MPC836xMDS");
}
define_machine(mpc836x_mds) {
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index 4cd7153a6c88..cf67ac93ddcb 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -42,9 +42,7 @@ static void __init mpc836x_rdk_setup_arch(void)
*/
static int __init mpc836x_rdk_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,mpc8360rdk");
+ return of_machine_is_compatible("fsl,mpc8360rdk");
}
define_machine(mpc836x_rdk) {
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index e53a60b6c863..652b97d699c9 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -93,9 +93,7 @@ machine_device_initcall(mpc837x_mds, mpc83xx_declare_of_platform_devices);
*/
static int __init mpc837x_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,mpc837xmds");
+ return of_machine_is_compatible("fsl,mpc837xmds");
}
define_machine(mpc837x_mds) {
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 9813c81e8e5b..667731d81676 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -73,7 +73,7 @@ static const char * const board[] __initconst = {
*/
static int __init mpc837x_rdb_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(mpc837x_rdb) {
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 0cf74d7ea1c5..ad484199eff7 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -65,7 +65,7 @@
* mpc83xx_* files. Mostly for use by mpc83xx_setup
*/
-extern void mpc83xx_restart(char *cmd);
+extern void __noreturn mpc83xx_restart(char *cmd);
extern long mpc83xx_time_init(void);
extern int mpc837x_usb_cfg(void);
extern int mpc834x_usb_cfg(void);
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index 26cb3e934722..b867e88dfb0d 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -60,9 +60,7 @@ machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices);
*/
static int __init sbc834x_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "SBC834xE");
+ return of_machine_is_compatible("SBC834xE");
}
define_machine(sbc834x) {
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index e626461a63bd..df25a3ed489d 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -225,7 +225,7 @@ config GE_IMP3A
select DEFAULT_UIMAGE
select SWIOTLB
select MMIO_NVRAM
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select GE_FPGA
help
This option enables support for the GE Intelligent Platforms IMP3A
@@ -272,7 +272,7 @@ config CORENET_GENERIC
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select GPIO_MPC8XXX
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
diff --git a/arch/powerpc/platforms/85xx/bsc913x_qds.c b/arch/powerpc/platforms/85xx/bsc913x_qds.c
index dcfafd6b91ee..07dd6ae3ec52 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_qds.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_qds.c
@@ -60,9 +60,7 @@ machine_arch_initcall(bsc9132_qds, mpc85xx_common_publish_devices);
static int __init bsc9132_qds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,bsc9132qds");
+ return of_machine_is_compatible("fsl,bsc9132qds");
}
define_machine(bsc9132_qds) {
diff --git a/arch/powerpc/platforms/85xx/bsc913x_rdb.c b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
index 9d57bedb940c..e48f6710e6d5 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_rdb.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
@@ -50,9 +50,7 @@ machine_device_initcall(bsc9131_rdb, mpc85xx_common_publish_devices);
static int __init bsc9131_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,bsc9131rdb");
+ return of_machine_is_compatible("fsl,bsc9131rdb");
}
define_machine(bsc9131_rdb) {
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 61bc851e9a8e..3b9e3f0f9aec 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -54,9 +54,7 @@ machine_arch_initcall(c293_pcie, mpc85xx_common_publish_devices);
*/
static int __init c293_pcie_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,C293PCIE"))
+ if (of_machine_is_compatible("fsl,C293PCIE"))
return 1;
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index a2b0bc859de0..3a6a84f07f43 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -170,20 +170,19 @@ static const char * const boards[] __initconst = {
*/
static int __init corenet_generic_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
char hv_compat[24];
int i;
#ifdef CONFIG_SMP
extern struct smp_ops_t smp_85xx_ops;
#endif
- if (of_flat_dt_match(root, boards))
+ if (of_device_compatible_match(of_root, boards))
return 1;
/* Check if we're running under the Freescale hypervisor */
for (i = 0; boards[i]; i++) {
snprintf(hv_compat, sizeof(hv_compat), "%s-hv", boards[i]);
- if (of_flat_dt_is_compatible(root, hv_compat)) {
+ if (of_machine_is_compatible(hv_compat)) {
ppc_md.init_IRQ = ehv_pic_init;
ppc_md.get_irq = ehv_pic_get_irq;
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 11790e074c8a..14af36a7fa9c 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -47,9 +47,8 @@ void __init ge_imp3a_pic_init(void)
struct mpic *mpic;
struct device_node *np;
struct device_node *cascade_node = NULL;
- unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
+ if (of_machine_is_compatible("fsl,MPC8572DS-CAMP")) {
mpic = mpic_alloc(NULL, 0,
MPIC_NO_RESET |
MPIC_BIG_ENDIAN |
@@ -198,9 +197,7 @@ static void ge_imp3a_show_cpuinfo(struct seq_file *m)
*/
static int __init ge_imp3a_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "ge,IMP3A");
+ return of_machine_is_compatible("ge,IMP3A");
}
machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 3dc1bda3ddc3..6ef8580fdc0e 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -44,7 +44,7 @@
static void __iomem *cpld_base = NULL;
-static void machine_restart(char *cmd)
+static void __noreturn machine_restart(char *cmd)
{
if (cpld_base)
out_8(cpld_base + KSI8560_CPLD_RCR1, KSI8560_CPLD_RCR1_CPUHR);
@@ -176,9 +176,7 @@ machine_device_initcall(ksi8560, mpc85xx_common_publish_devices);
*/
static int __init ksi8560_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "emerson,KSI8560");
+ return of_machine_is_compatible("emerson,KSI8560");
}
define_machine(ksi8560) {
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index a378ba3519e9..6ba687f19e45 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -64,9 +64,7 @@ machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
*/
static int __init mpc8536_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,mpc8536ds");
+ return of_machine_is_compatible("fsl,mpc8536ds");
}
define_machine(mpc8536_ds) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index de72a5f464b1..8756715c7a47 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -160,9 +160,7 @@ machine_arch_initcall(mpc85xx_ads, mpc85xx_common_publish_devices);
*/
static int __init mpc85xx_ads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC85xxADS");
+ return of_machine_is_compatible("MPC85xxADS");
}
define_machine(mpc85xx_ads) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index d7e87ff912d7..62f171c71c4c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -83,7 +83,7 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
return PCIBIOS_SUCCESSFUL;
}
-static void mpc85xx_cds_restart(char *cmd)
+static void __noreturn mpc85xx_cds_restart(char *cmd)
{
struct pci_dev *dev;
u_char tmp;
@@ -367,9 +367,7 @@ static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
*/
static int __init mpc85xx_cds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC85xxCDS");
+ return of_machine_is_compatible("MPC85xxCDS");
}
machine_arch_initcall(mpc85xx_cds, mpc85xx_common_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index f858306dba6a..6bc07d837b1c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -66,9 +66,7 @@ void __init mpc85xx_ds_pic_init(void)
struct device_node *cascade_node = NULL;
int cascade_irq;
#endif
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
+ if (of_machine_is_compatible("fsl,MPC8572DS-CAMP")) {
mpic = mpic_alloc(NULL, 0,
MPIC_NO_RESET |
MPIC_BIG_ENDIAN |
@@ -169,9 +167,7 @@ static void __init mpc85xx_ds_setup_arch(void)
*/
static int __init mpc8544_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return !!of_flat_dt_is_compatible(root, "MPC8544DS");
+ return !!of_machine_is_compatible("MPC8544DS");
}
machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
@@ -187,9 +183,7 @@ machine_arch_initcall(p2020_ds, swiotlb_setup_bus_notifier);
*/
static int __init mpc8572_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return !!of_flat_dt_is_compatible(root, "fsl,MPC8572DS");
+ return !!of_machine_is_compatible("fsl,MPC8572DS");
}
/*
@@ -197,9 +191,7 @@ static int __init mpc8572_ds_probe(void)
*/
static int __init p2020_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return !!of_flat_dt_is_compatible(root, "fsl,P2020DS");
+ return !!of_machine_is_compatible("fsl,P2020DS");
}
define_machine(mpc8544_ds) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index f61cbe235581..fa9cd710d2ae 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -376,9 +376,7 @@ static void __init mpc85xx_mds_pic_init(void)
static int __init mpc85xx_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC85xxMDS");
+ return of_machine_is_compatible("MPC85xxMDS");
}
define_machine(mpc8568_mds) {
@@ -398,9 +396,7 @@ define_machine(mpc8568_mds) {
static int __init mpc8569_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,MPC8569EMDS");
+ return of_machine_is_compatible("fsl,MPC8569EMDS");
}
define_machine(mpc8569_mds) {
@@ -420,9 +416,7 @@ define_machine(mpc8569_mds) {
static int __init p1021_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1021MDS");
+ return of_machine_is_compatible("fsl,P1021MDS");
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 3f4dad133338..c1499cbf3786 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -47,13 +47,12 @@
void __init mpc85xx_rdb_pic_init(void)
{
struct mpic *mpic;
- unsigned long root = of_get_flat_dt_root();
#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
#endif
- if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) {
+ if (of_machine_is_compatible("fsl,MPC85XXRDB-CAMP")) {
mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET |
MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
@@ -148,80 +147,60 @@ machine_arch_initcall(p1024_rdb, mpc85xx_common_publish_devices);
*/
static int __init p2020_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P2020RDB"))
+ if (of_machine_is_compatible("fsl,P2020RDB"))
return 1;
return 0;
}
static int __init p1020_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P1020RDB"))
+ if (of_machine_is_compatible("fsl,P1020RDB"))
return 1;
return 0;
}
static int __init p1020_rdb_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PC");
+ return of_machine_is_compatible("fsl,P1020RDB-PC");
}
static int __init p1020_rdb_pd_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PD");
+ return of_machine_is_compatible("fsl,P1020RDB-PD");
}
static int __init p1021_rdb_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P1021RDB-PC"))
+ if (of_machine_is_compatible("fsl,P1021RDB-PC"))
return 1;
return 0;
}
static int __init p2020_rdb_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P2020RDB-PC"))
+ if (of_machine_is_compatible("fsl,P2020RDB-PC"))
return 1;
return 0;
}
static int __init p1025_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1025RDB");
+ return of_machine_is_compatible("fsl,P1025RDB");
}
static int __init p1020_mbg_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1020MBG-PC");
+ return of_machine_is_compatible("fsl,P1020MBG-PC");
}
static int __init p1020_utm_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1020UTM-PC");
+ return of_machine_is_compatible("fsl,P1020UTM-PC");
}
static int __init p1024_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1024RDB");
+ return of_machine_is_compatible("fsl,P1024RDB");
}
define_machine(p2020_rdb) {
diff --git a/arch/powerpc/platforms/85xx/mvme2500.c b/arch/powerpc/platforms/85xx/mvme2500.c
index 1233050560ae..acc3d0d6049d 100644
--- a/arch/powerpc/platforms/85xx/mvme2500.c
+++ b/arch/powerpc/platforms/85xx/mvme2500.c
@@ -53,9 +53,7 @@ machine_arch_initcall(mvme2500, mpc85xx_common_publish_devices);
*/
static int __init mvme2500_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "artesyn,MVME2500");
+ return of_machine_is_compatible("artesyn,MVME2500");
}
define_machine(mvme2500) {
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index ad1a3d438a9e..661d7b59e413 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -62,11 +62,9 @@ machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
*/
static int __init p1010_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P1010RDB"))
+ if (of_machine_is_compatible("fsl,P1010RDB"))
return 1;
- if (of_flat_dt_is_compatible(root, "fsl,P1010RDB-PB"))
+ if (of_machine_is_compatible("fsl,P1010RDB-PB"))
return 1;
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 371df822e88e..63568d68c76f 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -555,9 +555,7 @@ machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
*/
static int __init p1022_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,p1022ds");
+ return of_machine_is_compatible("fsl,p1022ds");
}
define_machine(p1022_ds) {
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 5087becaa8bc..2f2943600301 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -135,9 +135,7 @@ machine_arch_initcall(p1022_rdk, swiotlb_setup_bus_notifier);
*/
static int __init p1022_rdk_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,p1022rdk");
+ return of_machine_is_compatible("fsl,p1022rdk");
}
define_machine(p1022_rdk) {
diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c
index d5b7509825de..40d8de57c341 100644
--- a/arch/powerpc/platforms/85xx/p1023_rdb.c
+++ b/arch/powerpc/platforms/85xx/p1023_rdb.c
@@ -100,9 +100,7 @@ static void __init mpc85xx_rdb_pic_init(void)
static int __init p1023_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1023RDB");
+ return of_machine_is_compatible("fsl,P1023RDB");
}
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c
index 12019f17f297..2410167b290a 100644
--- a/arch/powerpc/platforms/85xx/ppa8548.c
+++ b/arch/powerpc/platforms/85xx/ppa8548.c
@@ -81,9 +81,7 @@ machine_device_initcall(ppa8548, declare_of_platform_devices);
*/
static int __init ppa8548_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "ppa8548");
+ return of_machine_is_compatible("ppa8548");
}
define_machine(ppa8548) {
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 8ad2fe6f200a..50d745809809 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -62,9 +62,7 @@ static void __init qemu_e500_setup_arch(void)
*/
static int __init qemu_e500_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return !!of_flat_dt_is_compatible(root, "fsl,qemu-e500");
+ return !!of_machine_is_compatible("fsl,qemu-e500");
}
machine_arch_initcall(qemu_e500, mpc85xx_common_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index b07214666d65..62b6c45a5a9b 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -120,9 +120,7 @@ machine_arch_initcall(sbc8548, mpc85xx_common_publish_devices);
*/
static int __init sbc8548_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "SBC8548");
+ return of_machine_is_compatible("SBC8548");
}
define_machine(sbc8548) {
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index ae368e0e1076..cd255acde2e2 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -79,9 +79,7 @@ machine_arch_initcall(socrates, mpc85xx_common_publish_devices);
*/
static int __init socrates_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "abb,socrates"))
+ if (of_machine_is_compatible("abb,socrates"))
return 1;
return 0;
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 6f4939b6309e..91b824c4dc08 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -93,9 +93,7 @@ machine_arch_initcall(stx_gp3, mpc85xx_common_publish_devices);
*/
static int __init stx_gp3_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "stx,gp3-8560");
+ return of_machine_is_compatible("stx,gp3-8560");
}
define_machine(stx_gp3) {
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index ec0b7272fae2..b7c54454d611 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -122,7 +122,7 @@ static const char * const board[] __initconst = {
*/
static int __init tqm85xx_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(tqm85xx) {
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index 71bc255b4324..1bc02a87f597 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -128,9 +128,7 @@ machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices);
static int __init twr_p1025_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,TWR-P1025");
+ return of_machine_is_compatible("fsl,TWR-P1025");
}
define_machine(twr_p1025) {
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 1a9c1085855f..cf0c70ff026e 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -144,23 +144,17 @@ machine_arch_initcall(xes_mpc8540, mpc85xx_common_publish_devices);
*/
static int __init xes_mpc8572_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "xes,MPC8572");
+ return of_machine_is_compatible("xes,MPC8572");
}
static int __init xes_mpc8548_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "xes,MPC8548");
+ return of_machine_is_compatible("xes,MPC8548");
}
static int __init xes_mpc8540_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "xes,MPC8540");
+ return of_machine_is_compatible("xes,MPC8540");
}
define_machine(xes_mpc8572) {
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 1afd1e4a2dd2..ce619bd1f82d 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -4,7 +4,6 @@ menuconfig PPC_86xx
depends on 6xx
select FSL_SOC
select ALTIVEC
- select ARCH_WANT_OPTIONAL_GPIOLIB
help
The Freescale E600 SoCs have 74xx cores.
@@ -37,7 +36,7 @@ config GEF_PPC9A
bool "GE PPC9A"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select GE_FPGA
help
This option enables support for the GE PPC9A.
@@ -46,7 +45,7 @@ config GEF_SBC310
bool "GE SBC310"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select GE_FPGA
help
This option enables support for the GE SBC310.
@@ -55,12 +54,17 @@ config GEF_SBC610
bool "GE SBC610"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select GE_FPGA
select HAS_RAPIDIO
help
This option enables support for the GE SBC610.
+config MVME7100
+ bool "Artesyn MVME7100"
+ help
+ This option enables support for the Emerson/Artesyn MVME7100 board.
+
endif
config MPC8641
@@ -69,7 +73,8 @@ config MPC8641
select FSL_PCI if PCI
select PPC_UDBG_16550
select MPIC
- default y if MPC8641_HPCN || SBC8641D || GEF_SBC610 || GEF_SBC310 || GEF_PPC9A
+ default y if MPC8641_HPCN || SBC8641D || GEF_SBC610 || GEF_SBC310 || GEF_PPC9A \
+ || MVME7100
config MPC8610
bool
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 2d889ad7dc89..01958fedc3f2 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o
obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o
obj-$(CONFIG_GEF_SBC310) += gef_sbc310.o
obj-$(CONFIG_GEF_PPC9A) += gef_ppc9a.o
+obj-$(CONFIG_MVME7100) += mvme7100.o
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 8e63b752712c..ef684afb63c6 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -189,9 +189,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*/
static int __init gef_ppc9a_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "gef,ppc9a"))
+ if (of_machine_is_compatible("gef,ppc9a"))
return 1;
return 0;
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 0e0be94f551f..67dd0c231646 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -176,9 +176,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*/
static int __init gef_sbc310_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "gef,sbc310"))
+ if (of_machine_is_compatible("gef,sbc310"))
return 1;
return 0;
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index e8292b492d7e..805026976cac 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -166,9 +166,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*/
static int __init gef_sbc610_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "gef,sbc610"))
+ if (of_machine_is_compatible("gef,sbc610"))
return 1;
return 0;
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 957473e5c8e5..fef0582eddf1 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -319,9 +319,7 @@ static void __init mpc86xx_hpcd_setup_arch(void)
*/
static int __init mpc86xx_hpcd_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,MPC8610HPCD"))
+ if (of_machine_is_compatible("fsl,MPC8610HPCD"))
return 1; /* Looks good */
return 0;
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index e5084811b9c6..5ae42a037065 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -96,13 +96,11 @@ mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
*/
static int __init mpc86xx_hpcn_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,mpc8641hpcn"))
+ if (of_machine_is_compatible("fsl,mpc8641hpcn"))
return 1; /* Looks good */
/* Be nice and don't give silent boot death. Delete this in 2.6.27 */
- if (of_flat_dt_is_compatible(root, "mpc86xx")) {
+ if (of_machine_is_compatible("mpc86xx")) {
pr_warning("WARNING: your dts/dtb is old. You must update before the next kernel release\n");
return 1;
}
diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c
new file mode 100644
index 000000000000..addb41e7cd14
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/mvme7100.c
@@ -0,0 +1,121 @@
+/*
+ * Board setup routines for the Emerson/Artesyn MVME7100
+ *
+ * Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A.
+ *
+ * Author: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+ *
+ * Based on earlier code by:
+ *
+ * Ajit Prem <ajit.prem@emerson.com>
+ * Copyright 2008 Emerson
+ *
+ * USB host fixup is borrowed by:
+ *
+ * Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+
+#include "mpc86xx.h"
+
+#define MVME7100_INTERRUPT_REG_2_OFFSET 0x05
+#define MVME7100_DS1375_MASK 0x40
+#define MVME7100_MAX6649_MASK 0x20
+#define MVME7100_ABORT_MASK 0x10
+
+/*
+ * Setup the architecture
+ */
+static void __init mvme7100_setup_arch(void)
+{
+ struct device_node *bcsr_node;
+ void __iomem *mvme7100_regs = NULL;
+ u8 reg;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mvme7100_setup_arch()", 0);
+
+#ifdef CONFIG_SMP
+ mpc86xx_smp_init();
+#endif
+
+ fsl_pci_assign_primary();
+
+ /* Remap BCSR registers */
+ bcsr_node = of_find_compatible_node(NULL, NULL,
+ "artesyn,mvme7100-bcsr");
+ if (bcsr_node) {
+ mvme7100_regs = of_iomap(bcsr_node, 0);
+ of_node_put(bcsr_node);
+ }
+
+ if (mvme7100_regs) {
+ /* Disable ds1375, max6649, and abort interrupts */
+ reg = readb(mvme7100_regs + MVME7100_INTERRUPT_REG_2_OFFSET);
+ reg |= MVME7100_DS1375_MASK | MVME7100_MAX6649_MASK
+ | MVME7100_ABORT_MASK;
+ writeb(reg, mvme7100_regs + MVME7100_INTERRUPT_REG_2_OFFSET);
+ } else
+ pr_warn("Unable to map board registers\n");
+
+ pr_info("MVME7100 board from Artesyn\n");
+}
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mvme7100_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "artesyn,MVME7100");
+}
+
+static void mvme7100_usb_host_fixup(struct pci_dev *pdev)
+{
+ unsigned int val;
+
+ if (!machine_is(mvme7100))
+ return;
+
+ /* Ensure only ports 1 & 2 are enabled */
+ pci_read_config_dword(pdev, 0xe0, &val);
+ pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x2);
+
+ /* System clock is 48-MHz Oscillator and EHCI Enabled. */
+ pci_write_config_dword(pdev, 0xe4, 1 << 5);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
+ mvme7100_usb_host_fixup);
+
+machine_arch_initcall(mvme7100, mpc86xx_common_publish_devices);
+
+define_machine(mvme7100) {
+ .name = "MVME7100",
+ .probe = mvme7100_probe,
+ .setup_arch = mvme7100_setup_arch,
+ .init_IRQ = mpc86xx_init_irq,
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .time_init = mpc86xx_time_init,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+};
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index 2a9cf278c12a..52af5735742e 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -67,9 +67,7 @@ sbc8641_show_cpuinfo(struct seq_file *m)
*/
static int __init sbc8641_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "wind,sbc8641"))
+ if (of_machine_is_compatible("wind,sbc8641"))
return 1; /* Looks good */
return 0;
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 157250426b56..564d99bb2a26 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -109,7 +109,7 @@ config 8xx_COPYBACK
config 8xx_GPIO
bool "GPIO API Support"
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
Saying Y here will cause the ports on an MPC8xx processor to be used
with the GPIO API. If you say N here, the kernel needs less memory.
diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c
index 61cae4c1edb8..333dece79394 100644
--- a/arch/powerpc/platforms/8xx/adder875.c
+++ b/arch/powerpc/platforms/8xx/adder875.c
@@ -88,8 +88,7 @@ static void __init adder875_setup(void)
static int __init adder875_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "analogue-and-micro,adder875");
+ return of_machine_is_compatible("analogue-and-micro,adder875");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
index 2bedeb7d5f8f..cd0d90f1fb1c 100644
--- a/arch/powerpc/platforms/8xx/ep88xc.c
+++ b/arch/powerpc/platforms/8xx/ep88xc.c
@@ -143,8 +143,7 @@ static void __init ep88xc_setup_arch(void)
static int __init ep88xc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,ep88xc");
+ return of_machine_is_compatible("fsl,ep88xc");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index c289fc77b4ba..b1ab6e96cb31 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -198,7 +198,7 @@ void mpc8xx_get_rtc_time(struct rtc_time *tm)
return;
}
-void mpc8xx_restart(char *cmd)
+void __noreturn mpc8xx_restart(char *cmd)
{
car8xx_t __iomem *clk_r = immr_map(im_clkrst);
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
index 78180c5e73ff..8d02f5ff4481 100644
--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
@@ -118,8 +118,7 @@ static void __init mpc86xads_setup_arch(void)
static int __init mpc86xads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,mpc866ads");
+ return of_machine_is_compatible("fsl,mpc866ads");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index 4d62bf9dc789..e821a42d5816 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -193,8 +193,7 @@ static void __init mpc885ads_setup_arch(void)
static int __init mpc885ads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,mpc885ads");
+ return of_machine_is_compatible("fsl,mpc885ads");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/8xx/mpc8xx.h b/arch/powerpc/platforms/8xx/mpc8xx.h
index 239a243a6161..31cc2ecace42 100644
--- a/arch/powerpc/platforms/8xx/mpc8xx.h
+++ b/arch/powerpc/platforms/8xx/mpc8xx.h
@@ -11,7 +11,7 @@
#ifndef __MPC8xx_H
#define __MPC8xx_H
-extern void mpc8xx_restart(char *cmd);
+extern void __noreturn mpc8xx_restart(char *cmd);
extern void mpc8xx_calibrate_decr(void);
extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index bee47a2b23e6..4cea8b1afa44 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -119,9 +119,7 @@ static void __init tqm8xx_setup_arch(void)
static int __init tqm8xx_probe(void)
{
- unsigned long node = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(node, "tqc,tqm8xx");
+ return of_machine_is_compatible("tqc,tqm8xx");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 46a3533d3acb..3663f71fd913 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -275,7 +275,7 @@ config TAU_AVERAGE
config QE_GPIO
bool "QE GPIO support"
depends on QUICC_ENGINE
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
Say Y here if you're going to use hardware that connects to the
QE GPIOs.
@@ -285,7 +285,7 @@ config CPM2
depends on (FSL_SOC_BOOKE && PPC32) || 8260
select CPM
select PPC_PCI_CHOICE
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
The CPM2 (Communications Processor Module) is a coprocessor on
embedded CPUs made by Freescale. Selecting this option means that
@@ -324,7 +324,7 @@ config OF_RTC
config SIMPLE_GPIO
bool "Support for simple, memory-mapped GPIO controllers"
depends on PPC
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
Say Y here to support simple, memory-mapped GPIO controllers.
These are usually BCSRs used to control board's switches, LEDs,
@@ -334,7 +334,7 @@ config SIMPLE_GPIO
config MCU_MPC8349EMITX
bool "MPC8349E-mITX MCU driver"
depends on I2C=y && PPC_83xx
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
Say Y here to enable soft power-off functionality on the Freescale
boards with the MPC8349E-mITX-compatible MCU chips. This driver will
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 77e9b8d591fb..f32edec13fd1 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,7 +1,6 @@
config PPC64
bool "64-bit kernel"
default n
- select HAVE_VIRT_CPU_ACCOUNTING
select ZLIB_DEFLATE
help
This option selects whether a 32-bit or a 64-bit kernel
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index 2fe12046279e..45cb9821173c 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -123,7 +123,7 @@ static int __init request_isa_regions(void)
}
machine_device_initcall(amigaone, request_isa_regions);
-void amigaone_restart(char *cmd)
+void __noreturn amigaone_restart(char *cmd)
{
local_irq_disable();
@@ -143,9 +143,7 @@ void amigaone_restart(char *cmd)
static int __init amigaone_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "eyetech,amigaone")) {
+ if (of_machine_is_compatible("eyetech,amigaone")) {
/*
* Coherent memory access cause complete system lockup! Thus
* disable this CPU feature, even if the CPU needs it.
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 82607d621aca..88301e53f085 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -85,61 +85,57 @@ static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
cancel_delayed_work_sync(&info->work);
}
-static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
+static int spu_gov_start(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- struct spu_gov_info_struct *info, *affected_info;
+ struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
+ struct spu_gov_info_struct *affected_info;
int i;
- int ret = 0;
- info = &per_cpu(spu_gov_info, cpu);
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if (!cpu_online(cpu)) {
- printk(KERN_ERR "cpu %d is not online\n", cpu);
- ret = -EINVAL;
- break;
- }
+ if (!cpu_online(cpu)) {
+ printk(KERN_ERR "cpu %d is not online\n", cpu);
+ return -EINVAL;
+ }
- if (!policy->cur) {
- printk(KERN_ERR "no cpu specified in policy\n");
- ret = -EINVAL;
- break;
- }
+ if (!policy->cur) {
+ printk(KERN_ERR "no cpu specified in policy\n");
+ return -EINVAL;
+ }
- /* initialize spu_gov_info for all affected cpus */
- for_each_cpu(i, policy->cpus) {
- affected_info = &per_cpu(spu_gov_info, i);
- affected_info->policy = policy;
- }
+ /* initialize spu_gov_info for all affected cpus */
+ for_each_cpu(i, policy->cpus) {
+ affected_info = &per_cpu(spu_gov_info, i);
+ affected_info->policy = policy;
+ }
- info->poll_int = POLL_TIME;
+ info->poll_int = POLL_TIME;
- /* setup timer */
- spu_gov_init_work(info);
+ /* setup timer */
+ spu_gov_init_work(info);
- break;
+ return 0;
+}
- case CPUFREQ_GOV_STOP:
- /* cancel timer */
- spu_gov_cancel_work(info);
+static void spu_gov_stop(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
+ int i;
- /* clean spu_gov_info for all affected cpus */
- for_each_cpu (i, policy->cpus) {
- info = &per_cpu(spu_gov_info, i);
- info->policy = NULL;
- }
+ /* cancel timer */
+ spu_gov_cancel_work(info);
- break;
+ /* clean spu_gov_info for all affected cpus */
+ for_each_cpu (i, policy->cpus) {
+ info = &per_cpu(spu_gov_info, i);
+ info->policy = NULL;
}
-
- return ret;
}
static struct cpufreq_governor spu_governor = {
.name = "spudemand",
- .governor = spu_gov_govern,
+ .start = spu_gov_start,
+ .stop = spu_gov_stop,
.owner = THIS_MODULE,
};
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 14a582b21274..9027d7c48507 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -178,7 +178,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
* default for now.*/
#ifdef CELL_IOMMU_STRICT_PROTECTION
/* to avoid referencing a global, we use a trick here to setup the
- * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
+ * protection bit. "prot" is setup to be 3 fields of 4 bits appended
* together for each of the 3 supported direction values. It is then
* shifted left so that the fields matching the desired direction
* lands on the appropriate bits, and other bits are masked out.
@@ -338,7 +338,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
start_seg = base >> IO_SEGMENT_SHIFT;
segments = size >> IO_SEGMENT_SHIFT;
pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
- /* PTEs for each segment must start on a 4K bounday */
+ /* PTEs for each segment must start on a 4K boundary */
pages_per_segment = max(pages_per_segment,
(1 << 12) / sizeof(unsigned long));
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 36cff28d0293..d3543e68efe8 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -255,13 +255,10 @@ static void __init cell_setup_arch(void)
static int __init cell_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "IBM,CBEA") &&
- !of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ if (!of_machine_is_compatible("IBM,CBEA") &&
+ !of_machine_is_compatible("IBM,CPBW-1.0"))
return 0;
- hpte_init_native();
pm_power_off = rtas_power_off;
return 1;
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 54ee5743cb72..d06dcac66fcb 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -217,7 +217,7 @@ static void spider_irq_cascade(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-/* For hooking up the cascace we have a problem. Our device-tree is
+/* For hooking up the cascade we have a problem. Our device-tree is
* crap and we don't know on which BE iic interrupt we are hooked on at
* least not the "standard" way. We can reconstitute it based on two
* informations though: which BE node we are connected to and whether
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 3cbe38fad609..bb4a8e07c229 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -69,7 +69,7 @@ static DEFINE_SPINLOCK(spu_lock);
* spu_full_list_lock and spu_full_list_mutex held, while iterating
* through it requires either of these locks.
*
- * In addition spu_full_list_lock protects all assignmens to
+ * In addition spu_full_list_lock protects all assignments to
* spu->mm.
*/
static LIST_HEAD(spu_full_list);
@@ -253,7 +253,7 @@ static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
* Setup the SPU kernel SLBs, in preparation for a context save/restore. We
* need to map both the context save area, and the save/restore code.
*
- * Because the lscsa and code may cross segment boundaires, we check to see
+ * Because the lscsa and code may cross segment boundaries, we check to see
* if mappings are required for the start and end of each range. We currently
* assume that the mappings are smaller that one segment - if not, something
* is seriously wrong.
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index c3327f3d8cf7..21b4bfb97200 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -535,8 +535,7 @@ static int __init init_affinity(void)
if (of_has_vicinity()) {
init_affinity_fw();
} else {
- long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ if (of_machine_is_compatible("IBM,CPBW-1.0"))
init_affinity_qs20_harcoded();
else
printk("No affinity configuration found\n");
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 84fb984f29c1..85c85eb3e245 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
if (rc < 0)
goto out;
- skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+ skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
out:
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 2936a0044c04..06254467e4dd 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -866,7 +866,7 @@ void spufs_wbox_callback(struct spu *spu)
* - end of the mapped area
*
* If the file is opened without O_NONBLOCK, we wait here until
- * space is availabyl, but return when we have been able to
+ * space is available, but return when we have been able to
* write something.
*/
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 9f79004e6d6f..cfacbee24d7b 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -435,7 +435,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
/* Note: we don't need to force_sig SIGTRAP on single-step
* since we have TIF_SINGLESTEP set, thus the kernel will do
- * it upon return from the syscall anyawy
+ * it upon return from the syscall anyway.
*/
if (unlikely(status & SPU_STATUS_SINGLE_STEP))
ret = -ERESTARTSYS;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 998f632e7cce..460f5f31d5cb 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -622,7 +622,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
/**
* find_victim - find a lower priority context to preempt
- * @ctx: canidate context for running
+ * @ctx: candidate context for running
*
* Returns the freed physical spu to run the new context on.
*/
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 987d1b8d68e3..bfb300633dfe 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -239,7 +239,7 @@ out:
of_node_put(np);
}
-static void briq_restart(char *cmd)
+static void __noreturn briq_restart(char *cmd)
{
local_irq_disable();
if (briq_SPOR)
@@ -253,7 +253,7 @@ static void briq_restart(char *cmd)
* But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
* the the built-in serial node. Instead, a /failsafe node is created.
*/
-static __init void chrp_init_early(void)
+static __init void chrp_init(void)
{
struct device_node *node;
const char *property;
@@ -587,6 +587,8 @@ static int __init chrp_probe(void)
pm_power_off = rtas_power_off;
+ chrp_init();
+
return 1;
}
@@ -595,7 +597,6 @@ define_machine(chrp) {
.probe = chrp_probe,
.setup_arch = chrp_setup_arch,
.init = chrp_init2,
- .init_early = chrp_init_early,
.show_cpuinfo = chrp_show_cpuinfo,
.init_IRQ = chrp_init_IRQ,
.restart = rtas_restart,
diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c
index ebd3963fdf91..d19e4e759597 100644
--- a/arch/powerpc/platforms/embedded6xx/c2k.c
+++ b/arch/powerpc/platforms/embedded6xx/c2k.c
@@ -99,7 +99,7 @@ static void c2k_reset_board(void)
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_VALUE_SET, 0x00080004);
}
-static void c2k_restart(char *cmd)
+static void __noreturn c2k_restart(char *cmd)
{
c2k_reset_board();
msleep(100);
@@ -123,15 +123,16 @@ void c2k_show_cpuinfo(struct seq_file *m)
*/
static int __init c2k_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "GEFanuc,C2K"))
+ if (!of_machine_is_compatible("GEFanuc,C2K"))
return 0;
printk(KERN_INFO "Detected a GEFanuc C2K board\n");
_set_L2CR(0);
_set_L2CR(L2CR_L2E | L2CR_L2PE | L2CR_L2I);
+
+ mv64x60_init_early();
+
return 1;
}
@@ -139,7 +140,6 @@ define_machine(c2k) {
.name = "C2K",
.probe = c2k_probe,
.setup_arch = c2k_setup_arch,
- .init_early = mv64x60_init_early,
.show_cpuinfo = c2k_show_cpuinfo,
.init_IRQ = mv64x60_init_irq,
.get_irq = mv64x60_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index fe0ed6ee285e..36789cec957c 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -29,14 +29,14 @@
#include "usbgecko_udbg.h"
-static void gamecube_spin(void)
+static void __noreturn gamecube_spin(void)
{
/* spin until power button pressed */
for (;;)
cpu_relax();
}
-static void gamecube_restart(char *cmd)
+static void __noreturn gamecube_restart(char *cmd)
{
local_irq_disable();
flipper_platform_reset();
@@ -49,26 +49,20 @@ static void gamecube_power_off(void)
gamecube_spin();
}
-static void gamecube_halt(void)
+static void __noreturn gamecube_halt(void)
{
gamecube_restart(NULL);
}
-static void __init gamecube_init_early(void)
-{
- ug_udbg_init();
-}
-
static int __init gamecube_probe(void)
{
- unsigned long dt_root;
-
- dt_root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube"))
+ if (!of_machine_is_compatible("nintendo,gamecube"))
return 0;
pm_power_off = gamecube_power_off;
+ ug_udbg_init();
+
return 1;
}
@@ -80,7 +74,6 @@ static void gamecube_shutdown(void)
define_machine(gamecube) {
.name = "gamecube",
.probe = gamecube_probe,
- .init_early = gamecube_init_early,
.restart = gamecube_restart,
.halt = gamecube_halt,
.init_IRQ = flipper_pic_probe,
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 8c305c7c8977..dafba1057a47 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -193,7 +193,7 @@ void holly_show_cpuinfo(struct seq_file *m)
seq_printf(m, "machine\t\t: PPC750 GX/CL\n");
}
-void holly_restart(char *cmd)
+void __noreturn holly_restart(char *cmd)
{
__be32 __iomem *ocn_bar1 = NULL;
unsigned long bar;
@@ -250,9 +250,7 @@ void holly_halt(void)
*/
static int __init holly_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,holly"))
+ if (!of_machine_is_compatible("ibm,holly"))
return 0;
return 1;
}
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index 540eeb58d3f0..f29cf29b11f8 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -100,7 +100,7 @@ static void __init linkstation_init_IRQ(void)
extern void avr_uart_configure(void);
extern void avr_uart_send(const char);
-static void linkstation_restart(char *cmd)
+static void __noreturn linkstation_restart(char *cmd)
{
local_irq_disable();
@@ -113,7 +113,7 @@ static void linkstation_restart(char *cmd)
avr_uart_send('G'); /* "kick" */
}
-static void linkstation_power_off(void)
+static void __noreturn linkstation_power_off(void)
{
local_irq_disable();
@@ -127,7 +127,7 @@ static void linkstation_power_off(void)
/* NOTREACHED */
}
-static void linkstation_halt(void)
+static void __noreturn linkstation_halt(void)
{
linkstation_power_off();
/* NOTREACHED */
@@ -141,11 +141,7 @@ static void linkstation_show_cpuinfo(struct seq_file *m)
static int __init linkstation_probe(void)
{
- unsigned long root;
-
- root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "linkstation"))
+ if (!of_machine_is_compatible("linkstation"))
return 0;
pm_power_off = linkstation_power_off;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index df4ad95f183e..80804f9916ee 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -146,7 +146,7 @@ void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
seq_printf(m, "vendor\t\t: Freescale Semiconductor\n");
}
-void mpc7448_hpc2_restart(char *cmd)
+static void __noreturn mpc7448_hpc2_restart(char *cmd)
{
local_irq_disable();
@@ -161,9 +161,7 @@ void mpc7448_hpc2_restart(char *cmd)
*/
static int __init mpc7448_hpc2_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "mpc74xx"))
+ if (!of_machine_is_compatible("mpc74xx"))
return 0;
return 1;
}
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 8f65aa3747f5..ed7321d6772e 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -177,7 +177,7 @@ static void mvme5100_show_cpuinfo(struct seq_file *m)
seq_puts(m, "Machine\t\t: MVME5100\n");
}
-static void mvme5100_restart(char *cmd)
+static void __noreturn mvme5100_restart(char *cmd)
{
local_irq_disable();
@@ -194,9 +194,7 @@ static void mvme5100_restart(char *cmd)
*/
static int __init mvme5100_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MVME5100");
+ return of_machine_is_compatible("MVME5100");
}
static int __init probe_of_platform_devices(void)
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index d572833ebd00..471a50bcd074 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -96,7 +96,7 @@ static void __init storcenter_init_IRQ(void)
mpic_init(mpic);
}
-static void storcenter_restart(char *cmd)
+static void __noreturn storcenter_restart(char *cmd)
{
local_irq_disable();
@@ -109,9 +109,7 @@ static void storcenter_restart(char *cmd)
static int __init storcenter_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "iomega,storcenter");
+ return of_machine_is_compatible("iomega,storcenter");
}
define_machine(storcenter){
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 352592d3e44e..3fd683e40bc9 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -112,7 +112,7 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
return delta + bl;
}
-static void wii_spin(void)
+static void __noreturn wii_spin(void)
{
local_irq_disable();
for (;;)
@@ -160,7 +160,7 @@ static void __init wii_setup_arch(void)
}
}
-static void wii_restart(char *cmd)
+static void __noreturn wii_restart(char *cmd)
{
local_irq_disable();
@@ -185,18 +185,13 @@ static void wii_power_off(void)
wii_spin();
}
-static void wii_halt(void)
+static void __noreturn wii_halt(void)
{
if (ppc_md.restart)
ppc_md.restart(NULL);
wii_spin();
}
-static void __init wii_init_early(void)
-{
- ug_udbg_init();
-}
-
static void __init wii_pic_probe(void)
{
flipper_pic_probe();
@@ -205,14 +200,13 @@ static void __init wii_pic_probe(void)
static int __init wii_probe(void)
{
- unsigned long dt_root;
-
- dt_root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(dt_root, "nintendo,wii"))
+ if (!of_machine_is_compatible("nintendo,wii"))
return 0;
pm_power_off = wii_power_off;
+ ug_udbg_init();
+
return 1;
}
@@ -225,7 +219,6 @@ static void wii_shutdown(void)
define_machine(wii) {
.name = "wii",
.probe = wii_probe,
- .init_early = wii_init_early,
.setup_arch = wii_setup_arch,
.restart = wii_restart,
.halt = wii_halt,
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index a923230e575b..a2f89e6326ce 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -568,6 +568,26 @@ void maple_pci_irq_fixup(struct pci_dev *dev)
DBG(" <- maple_pci_irq_fixup\n");
}
+static int maple_pci_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_controller *hose = pci_bus_to_host(bridge->bus);
+ struct device_node *np, *child;
+
+ if (hose != u3_agp)
+ return 0;
+
+ /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
+ * assume there is no P2P bridge on the AGP bus, which should be a
+ * safe assumptions hopefully.
+ */
+ np = hose->dn;
+ PCI_DN(np)->busno = 0xf0;
+ for_each_child_of_node(np, child)
+ PCI_DN(child)->busno = 0xf0;
+
+ return 0;
+}
+
void __init maple_pci_init(void)
{
struct device_node *np, *root;
@@ -605,19 +625,7 @@ void __init maple_pci_init(void)
if (ht && maple_add_bridge(ht) != 0)
of_node_put(ht);
- /* Setup the linkage between OF nodes and PHBs */
- pci_devs_phb_init();
-
- /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
- * assume there is no P2P bridge on the AGP bus, which should be a
- * safe assumptions hopefully.
- */
- if (u3_agp) {
- struct device_node *np = u3_agp->dn;
- PCI_DN(np)->busno = 0xf0;
- for (np = np->child; np; np = np->sibling)
- PCI_DN(np)->busno = 0xf0;
- }
+ ppc_md.pcibios_root_bridge_prepare = maple_pci_root_bridge_prepare;
/* Tell pci.c to not change any resource allocations. */
pci_add_flags(PCI_PROBE_ONLY);
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index a837188544c8..3c30c7a4534d 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -94,7 +94,7 @@ static unsigned long maple_find_nvram_base(void)
return result;
}
-static void maple_restart(char *cmd)
+static void __noreturn maple_restart(char *cmd)
{
unsigned int maple_nvram_base;
const unsigned int *maple_nvram_offset, *maple_nvram_command;
@@ -119,9 +119,10 @@ static void maple_restart(char *cmd)
for (;;) ;
fail:
printk(KERN_EMERG "Maple: Manual Restart Required\n");
+ for (;;) ;
}
-static void maple_power_off(void)
+static void __noreturn maple_power_off(void)
{
unsigned int maple_nvram_base;
const unsigned int *maple_nvram_offset, *maple_nvram_command;
@@ -146,9 +147,10 @@ static void maple_power_off(void)
for (;;) ;
fail:
printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
+ for (;;) ;
}
-static void maple_halt(void)
+static void __noreturn maple_halt(void)
{
maple_power_off();
}
@@ -196,18 +198,6 @@ void __init maple_setup_arch(void)
mmio_nvram_init();
}
-/*
- * Early initialization.
- */
-static void __init maple_init_early(void)
-{
- DBG(" -> maple_init_early\n");
-
- iommu_init_early_dart(&maple_pci_controller_ops);
-
- DBG(" <- maple_init_early\n");
-}
-
/*
* This is almost identical to pSeries and CHRP. We need to make that
* code generic at one point, with appropriate bits in the device-tree to
@@ -298,22 +288,14 @@ static void __init maple_progress(char *s, unsigned short hex)
*/
static int __init maple_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "Momentum,Maple") &&
- !of_flat_dt_is_compatible(root, "Momentum,Apache"))
+ if (!of_machine_is_compatible("Momentum,Maple") &&
+ !of_machine_is_compatible("Momentum,Apache"))
return 0;
- /*
- * On U3, the DART (iommu) must be allocated now since it
- * has an impact on htab_initialize (due to the large page it
- * occupies having to be broken up so the DART itself is not
- * part of the cacheable linar mapping
- */
- alloc_dart_table();
- hpte_init_native();
pm_power_off = maple_power_off;
+ iommu_init_early_dart(&maple_pci_controller_ops);
+
return 1;
}
@@ -321,7 +303,6 @@ define_machine(maple) {
.name = "Maple",
.probe = maple_probe,
.setup_arch = maple_setup_arch,
- .init_early = maple_init_early,
.init_IRQ = maple_init_IRQ,
.pci_irq_fixup = maple_pci_irq_fixup,
.pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index c929644e74a6..43dd3fb514e0 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -202,6 +202,11 @@ int __init iob_init(struct device_node *dn)
pr_debug(" -> %s\n", __func__);
+ /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
+ iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
+
+ printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
+
/* Allocate a spare page to map all invalid IOTLB pages. */
tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
if (!tmp)
@@ -260,13 +265,3 @@ void __init iommu_init_early_pasemi(void)
set_pci_dma_ops(&dma_iommu_ops);
}
-void __init alloc_iobmap_l2(void)
-{
-#ifndef CONFIG_PPC_PASEMI_IOMMU
- return;
-#endif
- /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
- iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
-
- printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
-}
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index 11f230a48227..74cbcb357612 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -8,7 +8,6 @@ extern void pas_pci_dma_dev_setup(struct pci_dev *dev);
extern void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
-extern void __init alloc_iobmap_l2(void);
extern void __init pasemi_map_registers(void);
/* Power savings modes, implemented in asm */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index f3a68a0fef23..10c4e8fc6ea9 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -229,9 +229,6 @@ void __init pas_pci_init(void)
of_node_get(np);
of_node_put(root);
-
- /* Setup the linkage between OF nodes and PHBs */
- pci_devs_phb_init();
}
void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index d71b2c7e8403..e86c1bd08f1f 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -62,7 +62,7 @@ static int num_mce_regs;
static int nmi_virq = NO_IRQ;
-static void pas_restart(char *cmd)
+static void __noreturn pas_restart(char *cmd)
{
/* Need to put others cpu in hold loop so they're not sleeping */
smp_send_stop();
@@ -339,11 +339,6 @@ out:
return !!(srr1 & 0x2);
}
-static void __init pas_init_early(void)
-{
- iommu_init_early_pasemi();
-}
-
#ifdef CONFIG_PCMCIA
static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
void *data)
@@ -420,15 +415,11 @@ machine_device_initcall(pasemi, pasemi_publish_devices);
*/
static int __init pas_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "PA6T-1682M") &&
- !of_flat_dt_is_compatible(root, "pasemi,pwrficient"))
+ if (!of_machine_is_compatible("PA6T-1682M") &&
+ !of_machine_is_compatible("pasemi,pwrficient"))
return 0;
- hpte_init_native();
-
- alloc_iobmap_l2();
+ iommu_init_early_pasemi();
return 1;
}
@@ -437,7 +428,6 @@ define_machine(pasemi) {
.name = "PA Semi PWRficient",
.probe = pas_probe,
.setup_arch = pas_setup_arch,
- .init_early = pas_init_early,
.init_IRQ = pas_init_IRQ,
.get_irq = mpic_get_irq,
.restart = pas_restart,
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 7553b6a77c64..6d6f277477aa 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -15,7 +15,7 @@
* This file thus provides a simple low level unified i2c interface for
* powermac that covers the various types of i2c busses used in Apple machines.
* For now, keywest, PMU and SMU, though we could add Cuda, or other bit
- * banging busses found on older chipstes in earlier machines if we ever need
+ * banging busses found on older chipsets in earlier machines if we ever need
* one of them.
*
* The drivers in this file are synchronous/blocking. In addition, the
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 59ab16fa600f..6e06c3be2e9a 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -878,6 +878,29 @@ void pmac_pci_irq_fixup(struct pci_dev *dev)
#endif /* CONFIG_PPC32 */
}
+#ifdef CONFIG_PPC64
+static int pmac_pci_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_controller *hose = pci_bus_to_host(bridge->bus);
+ struct device_node *np, *child;
+
+ if (hose != u3_agp)
+ return 0;
+
+ /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
+ * assume there is no P2P bridge on the AGP bus, which should be a
+ * safe assumptions for now. We should do something better in the
+ * future though
+ */
+ np = hose->dn;
+ PCI_DN(np)->busno = 0xf0;
+ for_each_child_of_node(np, child)
+ PCI_DN(child)->busno = 0xf0;
+
+ return 0;
+}
+#endif /* CONFIG_PPC64 */
+
void __init pmac_pci_init(void)
{
struct device_node *np, *root;
@@ -914,20 +937,7 @@ void __init pmac_pci_init(void)
if (ht && pmac_add_bridge(ht) != 0)
of_node_put(ht);
- /* Setup the linkage between OF nodes and PHBs */
- pci_devs_phb_init();
-
- /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
- * assume there is no P2P bridge on the AGP bus, which should be a
- * safe assumptions for now. We should do something better in the
- * future though
- */
- if (u3_agp) {
- struct device_node *np = u3_agp->dn;
- PCI_DN(np)->busno = 0xf0;
- for (np = np->child; np; np = np->sibling)
- PCI_DN(np)->busno = 0xf0;
- }
+ ppc_md.pcibios_root_bridge_prepare = pmac_pci_root_bridge_prepare;
/* pmac_check_ht_link(); */
#else /* CONFIG_PPC64 */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 8dd78f4e1af4..3de4a7c85140 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -52,7 +52,6 @@
#include <linux/suspend.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <linux/memblock.h>
#include <asm/reg.h>
#include <asm/sections.h>
@@ -97,11 +96,6 @@ int sccdbg;
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
EXPORT_SYMBOL(sys_ctrler);
-#ifdef CONFIG_PMAC_SMU
-unsigned long smu_cmdbuf_abs;
-EXPORT_SYMBOL(smu_cmdbuf_abs);
-#endif
-
static void pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
@@ -325,7 +319,6 @@ static void __init pmac_setup_arch(void)
defined(CONFIG_PPC64)
pmac_nvram_init();
#endif
-
#ifdef CONFIG_PPC32
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
@@ -383,7 +376,7 @@ void __init_refok note_bootable_part(dev_t dev, int part, int goodness)
}
#ifdef CONFIG_ADB_CUDA
-static void cuda_restart(void)
+static void __noreturn cuda_restart(void)
{
struct adb_request req;
@@ -392,7 +385,7 @@ static void cuda_restart(void)
cuda_poll();
}
-static void cuda_shutdown(void)
+static void __noreturn cuda_shutdown(void)
{
struct adb_request req;
@@ -416,7 +409,7 @@ static void cuda_shutdown(void)
#define smu_shutdown()
#endif
-static void pmac_restart(char *cmd)
+static void __noreturn pmac_restart(char *cmd)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
@@ -430,9 +423,10 @@ static void pmac_restart(char *cmd)
break;
default: ;
}
+ while (1) ;
}
-static void pmac_power_off(void)
+static void __noreturn pmac_power_off(void)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
@@ -446,9 +440,10 @@ static void pmac_power_off(void)
break;
default: ;
}
+ while (1) ;
}
-static void
+static void __noreturn
pmac_halt(void)
{
pmac_power_off();
@@ -457,7 +452,7 @@ pmac_halt(void)
/*
* Early initialization.
*/
-static void __init pmac_init_early(void)
+static void __init pmac_init(void)
{
/* Enable early btext debug if requested */
if (strstr(boot_command_line, "btextdbg")) {
@@ -489,9 +484,6 @@ static int __init pmac_declare_of_platform_devices(void)
{
struct device_node *np;
- if (machine_is(chrp))
- return -1;
-
np = of_find_node_by_name(NULL, "valkyrie");
if (np) {
of_platform_device_create(np, "valkyrie", NULL);
@@ -598,24 +590,10 @@ console_initcall(check_pmac_serial_console);
*/
static int __init pmac_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "Power Macintosh") &&
- !of_flat_dt_is_compatible(root, "MacRISC"))
+ if (!of_machine_is_compatible("Power Macintosh") &&
+ !of_machine_is_compatible("MacRISC"))
return 0;
-#ifdef CONFIG_PPC64
- /*
- * On U3, the DART (iommu) must be allocated now since it
- * has an impact on htab_initialize (due to the large page it
- * occupies having to be broken up so the DART itself is not
- * part of the cacheable linar mapping
- */
- alloc_dart_table();
-
- hpte_init_native();
-#endif
-
#ifdef CONFIG_PPC32
/* isa_io_base gets set in pmac_pci_init */
ISA_DMA_THRESHOLD = ~0L;
@@ -623,17 +601,10 @@ static int __init pmac_probe(void)
DMA_MODE_WRITE = 2;
#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PMAC_SMU
- /*
- * SMU based G5s need some memory below 2Gb, at least the current
- * driver needs that. We have to allocate it now. We allocate 4k
- * (1 small page) for now.
- */
- smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
-#endif /* CONFIG_PMAC_SMU */
-
pm_power_off = pmac_power_off;
+ pmac_init();
+
return 1;
}
@@ -641,7 +612,6 @@ define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
.setup_arch = pmac_setup_arch,
- .init_early = pmac_init_early,
.show_cpuinfo = pmac_show_cpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = NULL, /* changed later */
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 28a147ca32ba..834868b9fdc9 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -857,9 +857,8 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
{
int rc;
- switch(action) {
+ switch(action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
/* Open i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host) {
rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index cd9711e72df6..b5d98cb3f482 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -6,6 +6,7 @@ obj-y += opal-kmsg.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o
+obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 9226df11bf39..86544ea85dc3 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -36,6 +36,7 @@
#include <asm/msi_bitmap.h>
#include <asm/opal.h>
#include <asm/ppc-pci.h>
+#include <asm/pnv-pci.h>
#include "powernv.h"
#include "pci.h"
@@ -717,12 +718,12 @@ static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
return ret;
}
-static s64 pnv_eeh_phb_poll(struct pnv_phb *phb)
+static s64 pnv_eeh_poll(unsigned long id)
{
s64 rc = OPAL_HARDWARE;
while (1) {
- rc = opal_pci_poll(phb->opal_id);
+ rc = opal_pci_poll(id);
if (rc <= 0)
break;
@@ -762,7 +763,7 @@ int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
* reset followed by hot reset on root bus. So we also
* need the PCI bus settlement delay.
*/
- rc = pnv_eeh_phb_poll(phb);
+ rc = pnv_eeh_poll(phb->opal_id);
if (option == EEH_RESET_DEACTIVATE) {
if (system_state < SYSTEM_RUNNING)
udelay(1000 * EEH_PE_RST_SETTLE_TIME);
@@ -805,7 +806,7 @@ static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
goto out;
/* Poll state of the PHB until the request is done */
- rc = pnv_eeh_phb_poll(phb);
+ rc = pnv_eeh_poll(phb->opal_id);
if (option == EEH_RESET_DEACTIVATE)
msleep(EEH_PE_RST_SETTLE_TIME);
out:
@@ -815,7 +816,7 @@ out:
return 0;
}
-static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
+static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
{
struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
@@ -866,6 +867,44 @@ static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
return 0;
}
+static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct device_node *dn = pci_device_to_OF_node(pdev);
+ uint64_t id = PCI_SLOT_ID(phb->opal_id,
+ (pdev->bus->number << 8) | pdev->devfn);
+ uint8_t scope;
+ int64_t rc;
+
+ /* Hot reset to the bus if firmware cannot handle */
+ if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL))
+ return __pnv_eeh_bridge_reset(pdev, option);
+
+ switch (option) {
+ case EEH_RESET_FUNDAMENTAL:
+ scope = OPAL_RESET_PCI_FUNDAMENTAL;
+ break;
+ case EEH_RESET_HOT:
+ scope = OPAL_RESET_PCI_HOT;
+ break;
+ case EEH_RESET_DEACTIVATE:
+ return 0;
+ default:
+ dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n",
+ __func__, option);
+ return -EINVAL;
+ }
+
+ rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET);
+ if (rc <= OPAL_SUCCESS)
+ goto out;
+
+ rc = pnv_eeh_poll(id);
+out:
+ return (rc == OPAL_SUCCESS) ? 0 : -EIO;
+}
+
void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index fcc8b6861b63..479c25601612 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -27,9 +27,12 @@
#include "powernv.h"
#include "subcore.h"
+/* Power ISA 3.0 allows for stop states 0x0 - 0xF */
+#define MAX_STOP_STATE 0xF
+
static u32 supported_cpuidle_states;
-int pnv_save_sprs_for_winkle(void)
+static int pnv_save_sprs_for_deep_states(void)
{
int cpu;
int rc;
@@ -50,15 +53,19 @@ int pnv_save_sprs_for_winkle(void)
uint64_t pir = get_hard_smp_processor_id(cpu);
uint64_t hsprg0_val = (uint64_t)&paca[cpu];
- /*
- * HSPRG0 is used to store the cpu's pointer to paca. Hence last
- * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
- * with 63rd bit set, so that when a thread wakes up at 0x100 we
- * can use this bit to distinguish between fastsleep and
- * deep winkle.
- */
- hsprg0_val |= 1;
-
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * HSPRG0 is used to store the cpu's pointer to paca.
+ * Hence last 3 bits are guaranteed to be 0. Program
+ * slw to restore HSPRG0 with 63rd bit set, so that
+ * when a thread wakes up at 0x100 we can use this bit
+ * to distinguish between fastsleep and deep winkle.
+ * This is not necessary with stop/psscr since PLS
+ * field of psscr indicates which state we are waking
+ * up from.
+ */
+ hsprg0_val |= 1;
+ }
rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
if (rc != 0)
return rc;
@@ -130,8 +137,8 @@ static void pnv_alloc_idle_core_states(void)
update_subcore_sibling_mask();
- if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
- pnv_save_sprs_for_winkle();
+ if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
+ pnv_save_sprs_for_deep_states();
}
u32 pnv_get_supported_cpuidle_states(void)
@@ -230,43 +237,162 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
show_fastsleep_workaround_applyonce,
store_fastsleep_workaround_applyonce);
-static int __init pnv_init_idle_states(void)
+
+/*
+ * Used for ppc_md.power_save which needs a function with no parameters
+ */
+static void power9_idle(void)
{
- struct device_node *power_mgt;
- int dt_idle_states;
- u32 *flags;
- int i;
+ /* Requesting stop state 0 */
+ power9_idle_stop(0);
+}
+/*
+ * First deep stop state. Used to figure out when to save/restore
+ * hypervisor context.
+ */
+u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
- supported_cpuidle_states = 0;
+/*
+ * Deepest stop idle state. Used when a cpu is offlined
+ */
+u64 pnv_deepest_stop_state;
- if (cpuidle_disable != IDLE_NO_OVERRIDE)
- goto out;
+/*
+ * Power ISA 3.0 idle initialization.
+ *
+ * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
+ * Register (PSSCR) to control idle behavior.
+ *
+ * PSSCR layout:
+ * ----------------------------------------------------------
+ * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
+ * ----------------------------------------------------------
+ * 0 4 41 42 43 44 48 54 56 60
+ *
+ * PSSCR key fields:
+ * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
+ * lowest power-saving state the thread entered since stop instruction was
+ * last executed.
+ *
+ * Bit 41 - Status Disable(SD)
+ * 0 - Shows PLS entries
+ * 1 - PLS entries are all 0
+ *
+ * Bit 42 - Enable State Loss
+ * 0 - No state is lost irrespective of other fields
+ * 1 - Allows state loss
+ *
+ * Bit 43 - Exit Criterion
+ * 0 - Exit from power-save mode on any interrupt
+ * 1 - Exit from power-save mode controlled by LPCR's PECE bits
+ *
+ * Bits 44:47 - Power-Saving Level Limit
+ * This limits the power-saving level that can be entered into.
+ *
+ * Bits 60:63 - Requested Level
+ * Used to specify which power-saving level must be entered on executing
+ * stop instruction
+ *
+ * @np: /ibm,opal/power-mgt device node
+ * @flags: cpu-idle-state-flags array
+ * @dt_idle_states: Number of idle state entries
+ * Returns 0 on success
+ */
+static int __init pnv_arch300_idle_init(struct device_node *np, u32 *flags,
+ int dt_idle_states)
+{
+ u64 *psscr_val = NULL;
+ int rc = 0, i;
- if (!firmware_has_feature(FW_FEATURE_OPAL))
+ psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val),
+ GFP_KERNEL);
+ if (!psscr_val) {
+ rc = -1;
+ goto out;
+ }
+ if (of_property_read_u64_array(np,
+ "ibm,cpu-idle-state-psscr",
+ psscr_val, dt_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n");
+ rc = -1;
goto out;
+ }
- power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
- if (!power_mgt) {
+ /*
+ * Set pnv_first_deep_stop_state and pnv_deepest_stop_state.
+ * pnv_first_deep_stop_state should be set to the first stop
+ * level to cause hypervisor state loss.
+ * pnv_deepest_stop_state should be set to the deepest stop
+ * stop state.
+ */
+ pnv_first_deep_stop_state = MAX_STOP_STATE;
+ for (i = 0; i < dt_idle_states; i++) {
+ u64 psscr_rl = psscr_val[i] & PSSCR_RL_MASK;
+
+ if ((flags[i] & OPAL_PM_LOSE_FULL_CONTEXT) &&
+ (pnv_first_deep_stop_state > psscr_rl))
+ pnv_first_deep_stop_state = psscr_rl;
+
+ if (pnv_deepest_stop_state < psscr_rl)
+ pnv_deepest_stop_state = psscr_rl;
+ }
+
+out:
+ kfree(psscr_val);
+ return rc;
+}
+
+/*
+ * Probe device tree for supported idle states
+ */
+static void __init pnv_probe_idle_states(void)
+{
+ struct device_node *np;
+ int dt_idle_states;
+ u32 *flags = NULL;
+ int i;
+
+ np = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!np) {
pr_warn("opal: PowerMgmt Node not found\n");
goto out;
}
- dt_idle_states = of_property_count_u32_elems(power_mgt,
+ dt_idle_states = of_property_count_u32_elems(np,
"ibm,cpu-idle-state-flags");
if (dt_idle_states < 0) {
pr_warn("cpuidle-powernv: no idle states found in the DT\n");
goto out;
}
- flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
- if (of_property_read_u32_array(power_mgt,
+ flags = kcalloc(dt_idle_states, sizeof(*flags), GFP_KERNEL);
+
+ if (of_property_read_u32_array(np,
"ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
- goto out_free;
+ goto out;
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (pnv_arch300_idle_init(np, flags, dt_idle_states))
+ goto out;
}
for (i = 0; i < dt_idle_states; i++)
supported_cpuidle_states |= flags[i];
+out:
+ kfree(flags);
+}
+static int __init pnv_init_idle_states(void)
+{
+
+ supported_cpuidle_states = 0;
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ goto out;
+
+ pnv_probe_idle_states();
+
if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
patch_instruction(
(unsigned int *)pnv_fastsleep_workaround_at_entry,
@@ -285,8 +411,12 @@ static int __init pnv_init_idle_states(void)
}
pnv_alloc_idle_core_states();
-out_free:
- kfree(flags);
+
+ if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
+ ppc_md.power_save = power7_idle;
+ else if (supported_cpuidle_states & OPAL_PM_STOP_INST_FAST)
+ ppc_md.power_save = power9_idle;
+
out:
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 0459e100b4e7..4383a5ff82ba 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -180,7 +180,7 @@ long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
return rc;
}
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+ pnv_pci_phb3_tce_invalidate_entire(phb, false);
/* Add the table to the list so its TCE cache will get invalidated */
pnv_pci_link_table_and_group(phb->hose->node, num,
@@ -204,7 +204,7 @@ long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
return rc;
}
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+ pnv_pci_phb3_tce_invalidate_entire(phb, false);
pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
&npe->table_group);
@@ -270,7 +270,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
0 /* bypass base */, top);
if (rc == OPAL_SUCCESS)
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+ pnv_pci_phb3_tce_invalidate_entire(phb, false);
return rc;
}
@@ -334,7 +334,7 @@ void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
return;
}
- pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
+ pnv_pci_phb3_tce_invalidate_entire(npe->phb, false);
}
struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index bdc8c0c71d15..83bebeec0fea 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -117,6 +117,11 @@ int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
return -EINVAL;
}
+ /* Wakeup the poller before we wait for events to speed things
+ * up on platforms or simulators where the interrupts aren't
+ * functional.
+ */
+ opal_wake_poller();
wait_event(opal_async_wait, test_bit(token, opal_async_complete_map));
memcpy(msg, &opal_async_responses[token], sizeof(*msg));
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
index 00a29432be39..4495f428b500 100644
--- a/arch/powerpc/platforms/powernv/opal-memory-errors.c
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -44,7 +44,7 @@ static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
{
uint64_t paddr_start, paddr_end;
- pr_debug("%s: Retrived memory error event, type: 0x%x\n",
+ pr_debug("%s: Retrieved memory error event, type: 0x%x\n",
__func__, merr_evt->type);
switch (merr_evt->type) {
case OPAL_MEM_ERR_TYPE_RESILIENCE:
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index a06059df9239..308efd170c27 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -55,7 +55,7 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
goto out_token;
}
- ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ ret = opal_error_code(opal_get_async_rc(msg));
*sensor_data = be32_to_cpu(data);
break;
diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
index afe66c576a38..23fb6647dced 100644
--- a/arch/powerpc/platforms/powernv/opal-sysparam.c
+++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
@@ -67,7 +67,7 @@ static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
goto out_token;
}
- ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ ret = opal_error_code(opal_get_async_rc(msg));
out_token:
opal_async_release_token(token);
@@ -103,7 +103,7 @@ static int opal_set_sys_param(u32 param_id, u32 length, void *buffer)
goto out_token;
}
- ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ ret = opal_error_code(opal_get_async_rc(msg));
out_token:
opal_async_release_token(token);
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c
index e11273b2386d..1e496b780efd 100644
--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
+++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
@@ -1,6 +1,7 @@
#include <linux/percpu.h>
#include <linux/jump_label.h>
#include <asm/trace.h>
+#include <asm/asm-prototypes.h>
#ifdef HAVE_JUMP_LABEL
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e45b88a5d7e0..cf928bba4d9a 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -59,7 +59,7 @@ END_FTR_SECTION(0, 1); \
#define OPAL_CALL(name, token) \
_GLOBAL_TOC(name); \
mflr r0; \
- std r0,16(r1); \
+ std r0,PPC_LR_STKOFF(r1); \
li r0,token; \
OPAL_BRANCH(opal_tracepoint_entry) \
mfcr r12; \
@@ -92,7 +92,7 @@ opal_return:
FIXUP_ENDIAN
ld r2,PACATOC(r13);
lwz r4,8(r1);
- ld r5,16(r1);
+ ld r5,PPC_LR_STKOFF(r1);
ld r6,PACASAVEDMSR(r13);
mtspr SPRN_SRR0,r5;
mtspr SPRN_SRR1,r6;
@@ -157,43 +157,37 @@ opal_tracepoint_return:
blr
#endif
-/*
- * Make opal call in realmode. This is a generic function to be called
- * from realmode. It handles endianness.
- *
- * r13 - paca pointer
- * r1 - stack pointer
- * r0 - opal token
- */
-_GLOBAL(opal_call_realmode)
- mflr r12
- std r12,PPC_LR_STKOFF(r1)
- ld r2,PACATOC(r13)
- /* Set opal return address */
- LOAD_REG_ADDR(r12,return_from_opal_call)
- mtlr r12
-
- mfmsr r12
-#ifdef __LITTLE_ENDIAN__
- /* Handle endian-ness */
- li r11,MSR_LE
- andc r12,r12,r11
-#endif
- mtspr SPRN_HSRR1,r12
- LOAD_REG_ADDR(r11,opal)
- ld r12,8(r11)
- ld r2,0(r11)
- mtspr SPRN_HSRR0,r12
+#define OPAL_CALL_REAL(name, token) \
+ _GLOBAL_TOC(name); \
+ mflr r0; \
+ std r0,PPC_LR_STKOFF(r1); \
+ li r0,token; \
+ mfcr r12; \
+ stw r12,8(r1); \
+ \
+ /* Set opal return address */ \
+ LOAD_REG_ADDR(r11, opal_return_realmode); \
+ mtlr r11; \
+ mfmsr r12; \
+ li r11,MSR_LE; \
+ andc r12,r12,r11; \
+ mtspr SPRN_HSRR1,r12; \
+ LOAD_REG_ADDR(r11,opal); \
+ ld r12,8(r11); \
+ ld r2,0(r11); \
+ mtspr SPRN_HSRR0,r12; \
hrfid
-return_from_opal_call:
-#ifdef __LITTLE_ENDIAN__
+opal_return_realmode:
FIXUP_ENDIAN
-#endif
+ ld r2,PACATOC(r13);
+ lwz r11,8(r1);
ld r12,PPC_LR_STKOFF(r1)
+ mtcr r11;
mtlr r12
blr
+
OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL);
OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
@@ -271,6 +265,7 @@ OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE);
+OPAL_CALL_REAL(opal_rm_resync_timebase, OPAL_RESYNC_TIMEBASE);
OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN);
OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT);
OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO);
@@ -278,6 +273,7 @@ OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2);
OPAL_CALL(opal_dump_read, OPAL_DUMP_READ);
OPAL_CALL(opal_dump_ack, OPAL_DUMP_ACK);
OPAL_CALL(opal_get_msg, OPAL_GET_MSG);
+OPAL_CALL(opal_write_oppanel_async, OPAL_WRITE_OPPANEL_ASYNC);
OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION);
OPAL_CALL(opal_dump_resend_notification, OPAL_DUMP_RESEND);
OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT);
@@ -285,7 +281,9 @@ OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ);
OPAL_CALL(opal_get_param, OPAL_GET_PARAM);
OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
+OPAL_CALL_REAL(opal_rm_handle_hmi, OPAL_HANDLE_HMI);
OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE);
+OPAL_CALL_REAL(opal_rm_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE);
OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
@@ -302,3 +300,13 @@ OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG);
OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR);
OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR);
OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH);
+OPAL_CALL(opal_get_device_tree, OPAL_GET_DEVICE_TREE);
+OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE);
+OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE);
+OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE);
+OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR);
+OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR);
+OPAL_CALL(opal_int_eoi, OPAL_INT_EOI);
+OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR);
+OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL);
+OPAL_CALL_REAL(opal_rm_pci_tce_kill, OPAL_PCI_TCE_KILL);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 0256d0729252..8b4fc68cebcb 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -55,8 +55,9 @@ struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock);
static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
static uint32_t opal_heartbeat;
+static struct task_struct *kopald_tsk;
-static void opal_reinit_cores(void)
+void opal_configure_cores(void)
{
/* Do the actual re-init, This will clobber all FPRs, VRs, etc...
*
@@ -69,6 +70,10 @@ static void opal_reinit_cores(void)
#else
opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
#endif
+
+ /* Restore some bits */
+ if (cur_cpu_spec->cpu_restore)
+ cur_cpu_spec->cpu_restore();
}
int __init early_init_dt_scan_opal(unsigned long node,
@@ -105,13 +110,6 @@ int __init early_init_dt_scan_opal(unsigned long node,
panic("OPAL != V3 detected, no longer supported.\n");
}
- /* Reinit all cores with the right endian */
- opal_reinit_cores();
-
- /* Restore some bits */
- if (cur_cpu_spec->cpu_restore)
- cur_cpu_spec->cpu_restore();
-
return 1;
}
@@ -653,6 +651,7 @@ static void opal_i2c_create_devs(void)
static int kopald(void *unused)
{
+ unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1;
__be64 events;
set_freezable();
@@ -660,12 +659,18 @@ static int kopald(void *unused)
try_to_freeze();
opal_poll_events(&events);
opal_handle_events(be64_to_cpu(events));
- msleep_interruptible(opal_heartbeat);
+ schedule_timeout_interruptible(timeout);
} while (!kthread_should_stop());
return 0;
}
+void opal_wake_poller(void)
+{
+ if (kopald_tsk)
+ wake_up_process(kopald_tsk);
+}
+
static void opal_init_heartbeat(void)
{
/* Old firwmware, we assume the HVC heartbeat is sufficient */
@@ -674,7 +679,7 @@ static void opal_init_heartbeat(void)
opal_heartbeat = 0;
if (opal_heartbeat)
- kthread_run(kopald, NULL, "kopald");
+ kopald_tsk = kthread_run(kopald, NULL, "kopald");
}
static int __init opal_init(void)
@@ -751,6 +756,9 @@ static int __init opal_init(void)
opal_pdev_init(opal_node, "ibm,opal-flash");
opal_pdev_init(opal_node, "ibm,opal-prd");
+ /* Initialise platform device: oppanel interface */
+ opal_pdev_init(opal_node, "ibm,opal-oppanel");
+
/* Initialise OPAL kmsg dumper for flushing console on panic */
opal_kmsg_init();
@@ -885,3 +893,5 @@ EXPORT_SYMBOL_GPL(opal_i2c_request);
/* Export these symbols for PowerNV LED class driver */
EXPORT_SYMBOL_GPL(opal_leds_get_ind);
EXPORT_SYMBOL_GPL(opal_leds_set_ind);
+/* Export this symbol for PowerNV Operator Panel class driver */
+EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
new file mode 100644
index 000000000000..1349a099c74c
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-cxl.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2014-2016 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <asm/pci-bridge.h>
+#include <asm/pnv-pci.h>
+#include <asm/opal.h>
+#include <misc/cxl.h>
+
+#include "pci.h"
+
+struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+ return of_node_get(hose->dn);
+}
+EXPORT_SYMBOL(pnv_pci_get_phb_node);
+
+int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pe;
+ int rc;
+
+ pe = pnv_ioda_get_pe(dev);
+ if (!pe)
+ return -ENODEV;
+
+ pe_info(pe, "Switching PHB to CXL\n");
+
+ rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
+ if (rc == OPAL_UNSUPPORTED)
+ dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
+ else if (rc)
+ dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
+
+/* Find PHB for cxl dev and allocate MSI hwirqs?
+ * Returns the absolute hardware IRQ number
+ */
+int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
+
+ if (hwirq < 0) {
+ dev_warn(&dev->dev, "Failed to find a free MSI\n");
+ return -ENOSPC;
+ }
+
+ return phb->msi_base + hwirq;
+}
+EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
+
+void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
+}
+EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
+
+void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int i, hwirq;
+
+ for (i = 1; i < CXL_IRQ_RANGES; i++) {
+ if (!irqs->range[i])
+ continue;
+ pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
+ i, irqs->offset[i],
+ irqs->range[i]);
+ hwirq = irqs->offset[i] - phb->msi_base;
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
+ irqs->range[i]);
+ }
+}
+EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
+
+int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int i, hwirq, try;
+
+ memset(irqs, 0, sizeof(struct cxl_irq_ranges));
+
+ /* 0 is reserved for the multiplexed PSL DSI interrupt */
+ for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
+ try = num;
+ while (try) {
+ hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
+ if (hwirq >= 0)
+ break;
+ try /= 2;
+ }
+ if (!try)
+ goto fail;
+
+ irqs->offset[i] = phb->msi_base + hwirq;
+ irqs->range[i] = try;
+ pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
+ i, irqs->offset[i], irqs->range[i]);
+ num -= try;
+ }
+ if (num)
+ goto fail;
+
+ return 0;
+fail:
+ pnv_cxl_release_hwirq_ranges(irqs, dev);
+ return -ENOSPC;
+}
+EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
+
+int pnv_cxl_get_irq_count(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ return phb->msi_bmp.irq_count;
+}
+EXPORT_SYMBOL(pnv_cxl_get_irq_count);
+
+int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
+ unsigned int virq)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ unsigned int xive_num = hwirq - phb->msi_base;
+ struct pnv_ioda_pe *pe;
+ int rc;
+
+ if (!(pe = pnv_ioda_get_pe(dev)))
+ return -ENODEV;
+
+ /* Assign XIVE to PE */
+ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
+ if (rc) {
+ pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
+ "hwirq 0x%x XIVE 0x%x PE\n",
+ pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
+ return -EIO;
+ }
+ pnv_set_msi_irq_chip(phb, virq);
+
+ return 0;
+}
+EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
+
+#if IS_MODULE(CONFIG_CXL)
+static inline int get_cxl_module(void)
+{
+ struct module *cxl_module;
+
+ mutex_lock(&module_mutex);
+
+ cxl_module = find_module("cxl");
+ if (cxl_module)
+ __module_get(cxl_module);
+
+ mutex_unlock(&module_mutex);
+
+ if (!cxl_module)
+ return -ENODEV;
+
+ return 0;
+}
+#else
+static inline int get_cxl_module(void) { return 0; }
+#endif
+
+/*
+ * Sets flags and switches the controller ops to enable the cxl kernel api.
+ * Originally the cxl kernel API operated on a virtual PHB, but certain cards
+ * such as the Mellanox CX4 use a peer model instead and for these cards the
+ * cxl kernel api will operate on the real PHB.
+ */
+int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable)
+{
+ struct pnv_phb *phb = hose->private_data;
+ int rc;
+
+ if (!enable) {
+ /*
+ * Once cxl mode is enabled on the PHB, there is currently no
+ * known safe method to disable it again, and trying risks a
+ * checkstop. If we can find a way to safely disable cxl mode
+ * in the future we can revisit this, but for now the only sane
+ * thing to do is to refuse to disable cxl mode:
+ */
+ return -EPERM;
+ }
+
+ /*
+ * Hold a reference to the cxl module since several PHB operations now
+ * depend on it, and it would be insane to allow it to be removed so
+ * long as we are in this mode (and since we can't safely disable this
+ * mode once enabled...).
+ */
+ rc = get_cxl_module();
+ if (rc)
+ return rc;
+
+ phb->flags |= PNV_PHB_FLAG_CXL;
+ hose->controller_ops = pnv_cxl_cx4_ioda_controller_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api);
+
+bool pnv_pci_on_cxl_phb(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ return !!(phb->flags & PNV_PHB_FLAG_CXL);
+}
+EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb);
+
+struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose)
+{
+ struct pnv_phb *phb = hose->private_data;
+
+ return (struct cxl_afu *)phb->cxl_afu;
+}
+EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu);
+
+void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ phb->cxl_afu = afu;
+}
+EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu);
+
+/*
+ * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
+ * by other functions on the device for memory access and interrupts. When the
+ * other functions are enabled we explicitly take a reference on the cxl
+ * function since they will use it, and allocate a default context associated
+ * with that function just like the vPHB model of the cxl kernel API.
+ */
+bool pnv_cxl_enable_device_hook(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct cxl_afu *afu = phb->cxl_afu;
+
+ if (!pnv_pci_enable_device_hook(dev))
+ return false;
+
+
+ /* No special handling for the cxl function, which is always PF 0 */
+ if (PCI_FUNC(dev->devfn) == 0)
+ return true;
+
+ if (!afu) {
+ dev_WARN(&dev->dev, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
+ return false;
+ }
+
+ dev_info(&dev->dev, "Enabling function on CXL enabled PHB with peer AFU\n");
+
+ /* Make sure the peer AFU can't go away while this device is active */
+ cxl_afu_get(afu);
+
+ return cxl_pci_associate_default_context(dev, afu);
+}
+
+void pnv_cxl_disable_device(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct cxl_afu *afu = phb->cxl_afu;
+
+ /* No special handling for cxl function: */
+ if (PCI_FUNC(dev->devfn) == 0)
+ return;
+
+ cxl_pci_disable_device(dev);
+ cxl_afu_put(afu);
+}
+
+/*
+ * This is a special version of pnv_setup_msi_irqs for cards in cxl mode. This
+ * function handles setting up the IVTE entries for the XSL to use.
+ *
+ * We are currently not filling out the MSIX table, since the only currently
+ * supported adapter (CX4) uses a custom MSIX table format in cxl mode and it
+ * is up to their driver to fill that out. In the future we may fill out the
+ * MSIX table (and change the IVTE entries to be an index to the MSIX table)
+ * for adapters implementing the Full MSI-X mode described in the CAIA.
+ */
+int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct msi_desc *entry;
+ struct cxl_context *ctx = NULL;
+ unsigned int virq;
+ int hwirq;
+ int afu_irq = 0;
+ int rc;
+
+ if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
+ return -ENODEV;
+
+ if (pdev->no_64bit_msi && !phb->msi32_support)
+ return -ENODEV;
+
+ rc = cxl_cx4_setup_msi_irqs(pdev, nvec, type);
+ if (rc)
+ return rc;
+
+ for_each_pci_msi_entry(entry, pdev) {
+ if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
+ pr_warn("%s: Supports only 64-bit MSIs\n",
+ pci_name(pdev));
+ return -ENXIO;
+ }
+
+ hwirq = cxl_next_msi_hwirq(pdev, &ctx, &afu_irq);
+ if (WARN_ON(hwirq <= 0))
+ return (hwirq ? hwirq : -ENOMEM);
+
+ virq = irq_create_mapping(NULL, hwirq);
+ if (virq == NO_IRQ) {
+ pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
+ pci_name(pdev));
+ return -ENOMEM;
+ }
+
+ rc = pnv_cxl_ioda_msi_setup(pdev, hwirq, virq);
+ if (rc) {
+ pr_warn("%s: Failed to setup cxl mode MSI\n", pci_name(pdev));
+ irq_dispose_mapping(virq);
+ return rc;
+ }
+
+ irq_set_msi_desc(virq, entry);
+ }
+
+ return 0;
+}
+
+void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct msi_desc *entry;
+ irq_hw_number_t hwirq;
+
+ if (WARN_ON(!phb))
+ return;
+
+ for_each_pci_msi_entry(entry, pdev) {
+ if (entry->irq == NO_IRQ)
+ continue;
+ hwirq = virq_to_hw(entry->irq);
+ irq_set_msi_desc(entry->irq, NULL);
+ irq_dispose_mapping(entry->irq);
+ }
+
+ cxl_cx4_teardown_msi_irqs(pdev);
+}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 3a5ea8236db8..891fc4a453df 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -55,6 +55,7 @@
#define POWERNV_IOMMU_DEFAULT_LEVELS 1
#define POWERNV_IOMMU_MAX_LEVELS 5
+static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU" };
static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
@@ -141,16 +142,14 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
{
- unsigned long pe;
+ unsigned long pe = phb->ioda.total_pe_num - 1;
- do {
- pe = find_next_zero_bit(phb->ioda.pe_alloc,
- phb->ioda.total_pe_num, 0);
- if (pe >= phb->ioda.total_pe_num)
- return NULL;
- } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
+ for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
+ if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
+ return pnv_ioda_init_pe(phb, pe);
+ }
- return pnv_ioda_init_pe(phb, pe);
+ return NULL;
}
static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
@@ -192,18 +191,15 @@ static int pnv_ioda2_init_m64(struct pnv_phb *phb)
goto fail;
}
- /* Mark the M64 BAR assigned */
- set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);
-
/*
- * Strip off the segment used by the reserved PE, which is
- * expected to be 0 or last one of PE capabicity.
+ * Exclude the segments for reserved and root bus PE, which
+ * are first or last two PEs.
*/
r = &phb->hose->mem_resources[1];
if (phb->ioda.reserved_pe_idx == 0)
- r->start += phb->ioda.m64_segsize;
+ r->start += (2 * phb->ioda.m64_segsize);
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
- r->end -= phb->ioda.m64_segsize;
+ r->end -= (2 * phb->ioda.m64_segsize);
else
pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
phb->ioda.reserved_pe_idx);
@@ -283,14 +279,14 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
}
/*
- * Exclude the segment used by the reserved PE, which
- * is expected to be 0 or last supported PE#.
+ * Exclude the segments for reserved and root bus PE, which
+ * are first or last two PEs.
*/
r = &phb->hose->mem_resources[1];
if (phb->ioda.reserved_pe_idx == 0)
- r->start += phb->ioda.m64_segsize;
+ r->start += (2 * phb->ioda.m64_segsize);
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
- r->end -= phb->ioda.m64_segsize;
+ r->end -= (2 * phb->ioda.m64_segsize);
else
WARN(1, "Wrong reserved PE#%d on PHB#%d\n",
phb->ioda.reserved_pe_idx, phb->hose->global_number);
@@ -405,6 +401,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
struct pci_controller *hose = phb->hose;
struct device_node *dn = hose->dn;
struct resource *res;
+ u32 m64_range[2], i;
const u32 *r;
u64 pci_addr;
@@ -425,6 +422,30 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
return;
}
+ /*
+ * Find the available M64 BAR range and pickup the last one for
+ * covering the whole 64-bits space. We support only one range.
+ */
+ if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
+ m64_range, 2)) {
+ /* In absence of the property, assume 0..15 */
+ m64_range[0] = 0;
+ m64_range[1] = 16;
+ }
+ /* We only support 64 bits in our allocator */
+ if (m64_range[1] > 63) {
+ pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
+ __func__, m64_range[1], phb->hose->global_number);
+ m64_range[1] = 63;
+ }
+ /* Empty range, no m64 */
+ if (m64_range[1] <= m64_range[0]) {
+ pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
+ __func__, phb->hose->global_number);
+ return;
+ }
+
+ /* Configure M64 informations */
res = &hose->mem_resources[1];
res->name = dn->full_name;
res->start = of_translate_address(dn, r + 2);
@@ -437,11 +458,28 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
phb->ioda.m64_base = pci_addr;
- pr_info(" MEM64 0x%016llx..0x%016llx -> 0x%016llx\n",
- res->start, res->end, pci_addr);
+ /* This lines up nicely with the display from processing OF ranges */
+ pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
+ res->start, res->end, pci_addr, m64_range[0],
+ m64_range[0] + m64_range[1] - 1);
+
+ /* Mark all M64 used up by default */
+ phb->ioda.m64_bar_alloc = (unsigned long)-1;
/* Use last M64 BAR to cover M64 window */
- phb->ioda.m64_bar_idx = 15;
+ m64_range[1]--;
+ phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
+
+ pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
+
+ /* Mark remaining ones free */
+ for (i = m64_range[0]; i < m64_range[1]; i++)
+ clear_bit(i, &phb->ioda.m64_bar_alloc);
+
+ /*
+ * Setup init functions for M64 based on IODA version, IODA3 uses
+ * the IODA2 code.
+ */
if (phb->type == PNV_PHB_IODA1)
phb->init_m64 = pnv_ioda1_init_m64;
else
@@ -596,7 +634,7 @@ static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
* but in the meantime, we need to protect them to avoid warnings
*/
#ifdef CONFIG_PCI_MSI
-static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
+struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -714,7 +752,6 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
return 0;
}
-#ifdef CONFIG_PCI_IOV
static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
struct pci_dev *parent;
@@ -749,9 +786,11 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
}
rid_end = pe->rid + (count << 8);
} else {
+#ifdef CONFIG_PCI_IOV
if (pe->flags & PNV_IODA_PE_VF)
parent = pe->parent_dev;
else
+#endif
parent = pe->pdev->bus->self;
bcomp = OpalPciBusAll;
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
@@ -761,7 +800,7 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
/* Clear the reverse map */
for (rid = pe->rid; rid < rid_end; rid++)
- phb->ioda.pe_rmap[rid] = 0;
+ phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
/* Release from all parents PELT-V */
while (parent) {
@@ -789,11 +828,12 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
pe->pbus = NULL;
pe->pdev = NULL;
+#ifdef CONFIG_PCI_IOV
pe->parent_dev = NULL;
+#endif
return 0;
}
-#endif /* CONFIG_PCI_IOV */
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
@@ -1024,6 +1064,16 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pci_name(dev));
continue;
}
+
+ /*
+ * In partial hotplug case, the PCI device might be still
+ * associated with the PE and needn't attach it to the PE
+ * again.
+ */
+ if (pdn->pe_number != IODA_INVALID_PE)
+ continue;
+
+ pe->device_count++;
pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -1042,9 +1092,26 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
struct pci_controller *hose = pci_bus_to_host(bus);
struct pnv_phb *phb = hose->private_data;
struct pnv_ioda_pe *pe = NULL;
+ unsigned int pe_num;
+
+ /*
+ * In partial hotplug case, the PE instance might be still alive.
+ * We should reuse it instead of allocating a new one.
+ */
+ pe_num = phb->ioda.pe_rmap[bus->number << 8];
+ if (pe_num != IODA_INVALID_PE) {
+ pe = &phb->ioda.pe_array[pe_num];
+ pnv_ioda_setup_same_PE(bus, pe);
+ return NULL;
+ }
+
+ /* PE number for root bus should have been reserved */
+ if (pci_is_root_bus(bus) &&
+ phb->ioda.root_pe_idx != IODA_INVALID_PE)
+ pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
/* Check if PE is determined by M64 */
- if (phb->pick_m64_pe)
+ if (!pe && phb->pick_m64_pe)
pe = phb->pick_m64_pe(bus, all);
/* The PE number isn't pinned by M64 */
@@ -1156,30 +1223,6 @@ static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
pnv_ioda_setup_npu_PE(pdev);
}
-static void pnv_ioda_setup_PEs(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- pnv_ioda_setup_bus_PE(bus, false);
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->subordinate) {
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
- pnv_ioda_setup_bus_PE(dev->subordinate, true);
- else
- pnv_ioda_setup_PEs(dev->subordinate);
- }
- }
-}
-
-/*
- * Configure PEs so that the downstream PCI buses and devices
- * could have their associated PE#. Unfortunately, we didn't
- * figure out the way to identify the PLX bridge yet. So we
- * simply put the PCI bus and the subordinate behind the root
- * port to PE# here. The game rule here is expected to be changed
- * as soon as we can detected PLX bridge correctly.
- */
static void pnv_pci_ioda_setup_PEs(void)
{
struct pci_controller *hose, *tmp;
@@ -1187,22 +1230,11 @@ static void pnv_pci_ioda_setup_PEs(void)
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
phb = hose->private_data;
-
- /* M64 layout might affect PE allocation */
- if (phb->reserve_m64_pe)
- phb->reserve_m64_pe(hose->bus, NULL, true);
-
- /*
- * On NPU PHB, we expect separate PEs for individual PCI
- * functions. PCI bus dependent PEs are required for the
- * remaining types of PHBs.
- */
if (phb->type == PNV_PHB_NPU) {
/* PE#0 is needed for error reporting */
pnv_ioda_reserve_pe(phb, 0);
pnv_ioda_setup_npu_PEs(hose->bus);
- } else
- pnv_ioda_setup_PEs(hose->bus);
+ }
}
}
@@ -1728,7 +1760,14 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
}
}
-static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
+static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
+ bool real_mode)
+{
+ return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
+ (phb->regs + 0x210);
+}
+
+static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
unsigned long index, unsigned long npages, bool rm)
{
struct iommu_table_group_link *tgl = list_first_entry_or_null(
@@ -1736,33 +1775,17 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
next);
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
- __be64 __iomem *invalidate = rm ?
- (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
- pe->phb->ioda.tce_inval_reg;
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
unsigned long start, end, inc;
- const unsigned shift = tbl->it_page_shift;
start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
npages - 1);
- /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
- if (tbl->it_busno) {
- start <<= shift;
- end <<= shift;
- inc = 128ull << shift;
- start |= tbl->it_busno;
- end |= tbl->it_busno;
- } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
- /* p7ioc-style invalidation, 2 TCEs per write */
- start |= (1ull << 63);
- end |= (1ull << 63);
- inc = 16;
- } else {
- /* Default (older HW) */
- inc = 128;
- }
-
+ /* p7ioc-style invalidation, 2 TCEs per write */
+ start |= (1ull << 63);
+ end |= (1ull << 63);
+ inc = 16;
end |= inc - 1; /* round up end to be different than start */
mb(); /* Ensure above stores are visible */
@@ -1788,8 +1811,8 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
attrs);
- if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
- pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
+ if (!ret)
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
return ret;
}
@@ -1800,9 +1823,8 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
{
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
- if (!ret && (tbl->it_type &
- (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
- pnv_pci_ioda1_tce_invalidate(tbl, index, 1, false);
+ if (!ret)
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
return ret;
}
@@ -1813,8 +1835,7 @@ static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
{
pnv_tce_free(tbl, index, npages);
- if (tbl->it_type & TCE_PCI_SWINV_FREE)
- pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
}
static struct iommu_table_ops pnv_ioda1_iommu_ops = {
@@ -1826,45 +1847,42 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
.get = pnv_tce_get,
};
-#define TCE_KILL_INVAL_ALL PPC_BIT(0)
-#define TCE_KILL_INVAL_PE PPC_BIT(1)
-#define TCE_KILL_INVAL_TCE PPC_BIT(2)
+#define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0)
+#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
+#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
-void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
+void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
{
- const unsigned long val = TCE_KILL_INVAL_ALL;
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
+ const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
mb(); /* Ensure previous TCE table stores are visible */
if (rm)
- __raw_rm_writeq(cpu_to_be64(val),
- (__be64 __iomem *)
- phb->ioda.tce_inval_reg_phys);
+ __raw_rm_writeq(cpu_to_be64(val), invalidate);
else
- __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
+ __raw_writeq(cpu_to_be64(val), invalidate);
}
-static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
+static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
{
/* 01xb - invalidate TCEs that match the specified PE# */
- unsigned long val = TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
- struct pnv_phb *phb = pe->phb;
-
- if (!phb->ioda.tce_inval_reg)
- return;
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
+ unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
mb(); /* Ensure above stores are visible */
- __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
+ __raw_writeq(cpu_to_be64(val), invalidate);
}
-static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
- __be64 __iomem *invalidate, unsigned shift,
- unsigned long index, unsigned long npages)
+static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
+ unsigned shift, unsigned long index,
+ unsigned long npages)
{
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
unsigned long start, end, inc;
/* We'll invalidate DMA address in PE scope */
- start = TCE_KILL_INVAL_TCE;
- start |= (pe_number & 0xFF);
+ start = PHB3_TCE_KILL_INVAL_ONE;
+ start |= (pe->pe_number & 0xFF);
end = start;
/* Figure out the start, end and step */
@@ -1882,6 +1900,17 @@ static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
}
}
+static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb;
+
+ if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
+ pnv_pci_phb3_tce_invalidate_pe(pe);
+ else
+ opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
+ pe->pe_number, 0, 0, 0);
+}
+
static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
unsigned long index, unsigned long npages, bool rm)
{
@@ -1890,22 +1919,31 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
- __be64 __iomem *invalidate = rm ?
- (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
- pe->phb->ioda.tce_inval_reg;
+ struct pnv_phb *phb = pe->phb;
+ unsigned int shift = tbl->it_page_shift;
- if (pe->phb->type == PNV_PHB_NPU) {
+ if (phb->type == PNV_PHB_NPU) {
/*
* The NVLink hardware does not support TCE kill
* per TCE entry so we have to invalidate
* the entire cache for it.
*/
- pnv_pci_ioda2_tce_invalidate_entire(pe->phb, rm);
+ pnv_pci_phb3_tce_invalidate_entire(phb, rm);
continue;
}
- pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
- invalidate, tbl->it_page_shift,
- index, npages);
+ if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
+ pnv_pci_phb3_tce_invalidate(pe, rm, shift,
+ index, npages);
+ else if (rm)
+ opal_rm_pci_tce_kill(phb->opal_id,
+ OPAL_PCI_TCE_KILL_PAGES,
+ pe->pe_number, 1u << shift,
+ index << shift, npages);
+ else
+ opal_pci_tce_kill(phb->opal_id,
+ OPAL_PCI_TCE_KILL_PAGES,
+ pe->pe_number, 1u << shift,
+ index << shift, npages);
}
}
@@ -1917,7 +1955,7 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
attrs);
- if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
+ if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
return ret;
@@ -1929,8 +1967,7 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
{
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
- if (!ret && (tbl->it_type &
- (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
+ if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
return ret;
@@ -1942,8 +1979,7 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
{
pnv_tce_free(tbl, index, npages);
- if (tbl->it_type & TCE_PCI_SWINV_FREE)
- pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
}
static void pnv_ioda2_table_free(struct iommu_table *tbl)
@@ -2112,12 +2148,6 @@ found:
base * PNV_IODA1_DMA32_SEGSIZE,
IOMMU_PAGE_SHIFT_4K);
- /* OPAL variant of P7IOC SW invalidated TCEs */
- if (phb->ioda.tce_inval_reg)
- tbl->it_type |= (TCE_PCI_SWINV_CREATE |
- TCE_PCI_SWINV_FREE |
- TCE_PCI_SWINV_PAIR);
-
tbl->it_ops = &pnv_ioda1_iommu_ops;
pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
@@ -2179,7 +2209,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
pnv_pci_link_table_and_group(phb->hose->node, num,
tbl, &pe->table_group);
- pnv_pci_ioda2_tce_invalidate_pe(pe);
+ pnv_pci_phb3_tce_invalidate_pe(pe);
return 0;
}
@@ -2240,8 +2270,6 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
}
tbl->it_ops = &pnv_ioda2_iommu_ops;
- if (pe->phb->ioda.tce_inval_reg)
- tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
*ptbl = tbl;
@@ -2290,10 +2318,6 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
if (!pnv_iommu_bypass_disabled)
pnv_pci_ioda2_set_bypass(pe, true);
- /* OPAL variant of PHB3 invalidated TCEs */
- if (pe->phb->ioda.tce_inval_reg)
- tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
-
/*
* Setting table base here only for carrying iommu_group
* further down to let iommu_add_device() do the job.
@@ -2323,7 +2347,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
if (ret)
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
else
- pnv_pci_ioda2_tce_invalidate_pe(pe);
+ pnv_pci_phb3_tce_invalidate_pe(pe);
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
@@ -2504,19 +2528,6 @@ static void pnv_pci_ioda_setup_iommu_api(void)
static void pnv_pci_ioda_setup_iommu_api(void) { };
#endif
-static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
-{
- const __be64 *swinvp;
-
- /* OPAL variant of PHB3 invalidated TCEs */
- swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
- if (!swinvp)
- return;
-
- phb->ioda.tce_inval_reg_phys = be64_to_cpup(swinvp);
- phb->ioda.tce_inval_reg = ioremap(phb->ioda.tce_inval_reg_phys, 8);
-}
-
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
unsigned levels, unsigned long limit,
unsigned long *current_offset, unsigned long *total_allocated)
@@ -2657,6 +2668,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
{
int64_t rc;
+ if (!pnv_pci_ioda_pe_dma_weight(pe))
+ return;
+
/* TVE #1 is selected by PCI address bit 59 */
pe->tce_bypass_base = 1ull << 59;
@@ -2688,49 +2702,6 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
pnv_ioda_setup_bus_dma(pe, pe->pbus);
}
-static void pnv_ioda_setup_dma(struct pnv_phb *phb)
-{
- struct pci_controller *hose = phb->hose;
- struct pnv_ioda_pe *pe;
- unsigned int weight;
-
- /* If we have more PE# than segments available, hand out one
- * per PE until we run out and let the rest fail. If not,
- * then we assign at least one segment per PE, plus more based
- * on the amount of devices under that PE
- */
- pr_info("PCI: Domain %04x has %d available 32-bit DMA segments\n",
- hose->global_number, phb->ioda.dma32_count);
-
- pnv_pci_ioda_setup_opal_tce_kill(phb);
-
- /* Walk our PE list and configure their DMA segments */
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- weight = pnv_pci_ioda_pe_dma_weight(pe);
- if (!weight)
- continue;
-
- /*
- * For IODA2 compliant PHB3, we needn't care about the weight.
- * The all available 32-bits DMA space will be assigned to
- * the specific PE.
- */
- if (phb->type == PNV_PHB_IODA1) {
- pnv_pci_ioda1_setup_dma_pe(phb, pe);
- } else if (phb->type == PNV_PHB_IODA2) {
- pe_info(pe, "Assign DMA32 space\n");
- pnv_pci_ioda2_setup_dma_pe(phb, pe);
- } else if (phb->type == PNV_PHB_NPU) {
- /*
- * We initialise the DMA space for an NPU PHB
- * after setup of the PHB is complete as we
- * point the NPU TVT to the the same location
- * as the PHB3 TVT.
- */
- }
- }
-}
-
#ifdef CONFIG_PCI_MSI
static void pnv_ioda2_msi_eoi(struct irq_data *d)
{
@@ -2747,12 +2718,13 @@ static void pnv_ioda2_msi_eoi(struct irq_data *d)
}
-static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
+void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
{
struct irq_data *idata;
struct irq_chip *ichip;
- if (phb->type != PNV_PHB_IODA2)
+ /* The MSI EOI OPAL call is only needed on PHB3 */
+ if (phb->model != PNV_PHB_MODEL_PHB3)
return;
if (!phb->ioda.irq_chip_init) {
@@ -2769,157 +2741,6 @@ static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
irq_set_chip(virq, &phb->ioda.irq_chip);
}
-#ifdef CONFIG_CXL_BASE
-
-struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- return of_node_get(hose->dn);
-}
-EXPORT_SYMBOL(pnv_pci_get_phb_node);
-
-int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pnv_ioda_pe *pe;
- int rc;
-
- pe = pnv_ioda_get_pe(dev);
- if (!pe)
- return -ENODEV;
-
- pe_info(pe, "Switching PHB to CXL\n");
-
- rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
- if (rc)
- dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
-
- return rc;
-}
-EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
-
-/* Find PHB for cxl dev and allocate MSI hwirqs?
- * Returns the absolute hardware IRQ number
- */
-int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
-
- if (hwirq < 0) {
- dev_warn(&dev->dev, "Failed to find a free MSI\n");
- return -ENOSPC;
- }
-
- return phb->msi_base + hwirq;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
-
-void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
-
-void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq;
-
- for (i = 1; i < CXL_IRQ_RANGES; i++) {
- if (!irqs->range[i])
- continue;
- pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
- i, irqs->offset[i],
- irqs->range[i]);
- hwirq = irqs->offset[i] - phb->msi_base;
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
- irqs->range[i]);
- }
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
-
-int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq, try;
-
- memset(irqs, 0, sizeof(struct cxl_irq_ranges));
-
- /* 0 is reserved for the multiplexed PSL DSI interrupt */
- for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
- try = num;
- while (try) {
- hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
- if (hwirq >= 0)
- break;
- try /= 2;
- }
- if (!try)
- goto fail;
-
- irqs->offset[i] = phb->msi_base + hwirq;
- irqs->range[i] = try;
- pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
- i, irqs->offset[i], irqs->range[i]);
- num -= try;
- }
- if (num)
- goto fail;
-
- return 0;
-fail:
- pnv_cxl_release_hwirq_ranges(irqs, dev);
- return -ENOSPC;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
-
-int pnv_cxl_get_irq_count(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- return phb->msi_bmp.irq_count;
-}
-EXPORT_SYMBOL(pnv_cxl_get_irq_count);
-
-int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
- unsigned int virq)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- unsigned int xive_num = hwirq - phb->msi_base;
- struct pnv_ioda_pe *pe;
- int rc;
-
- if (!(pe = pnv_ioda_get_pe(dev)))
- return -ENODEV;
-
- /* Assign XIVE to PE */
- rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
- if (rc) {
- pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
- "hwirq 0x%x XIVE 0x%x PE\n",
- pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
- return -EIO;
- }
- set_msi_irq_chip(phb, virq);
-
- return 0;
-}
-EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
-#endif
-
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg)
@@ -2976,7 +2797,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
}
msg->data = be32_to_cpu(data);
- set_msi_irq_chip(phb, virq);
+ pnv_set_msi_irq_chip(phb, virq);
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
" address=%x_%08x data=%x PE# %d\n",
@@ -3197,41 +3018,6 @@ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
}
}
-static void pnv_pci_ioda_setup_seg(void)
-{
- struct pci_controller *tmp, *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
-
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- phb = hose->private_data;
-
- /* NPU PHB does not support IO or MMIO segmentation */
- if (phb->type == PNV_PHB_NPU)
- continue;
-
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- pnv_ioda_setup_pe_seg(pe);
- }
- }
-}
-
-static void pnv_pci_ioda_setup_DMA(void)
-{
- struct pci_controller *hose, *tmp;
- struct pnv_phb *phb;
-
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- pnv_ioda_setup_dma(hose->private_data);
-
- /* Mark the PHB initialization done */
- phb = hose->private_data;
- phb->initialized = 1;
- }
-
- pnv_pci_ioda_setup_iommu_api();
-}
-
static void pnv_pci_ioda_create_dbgfs(void)
{
#ifdef CONFIG_DEBUG_FS
@@ -3242,6 +3028,9 @@ static void pnv_pci_ioda_create_dbgfs(void)
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
phb = hose->private_data;
+ /* Notify initialization of PHB done */
+ phb->initialized = 1;
+
sprintf(name, "PCI%04x", hose->global_number);
phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
if (!phb->dbgfs)
@@ -3254,9 +3043,7 @@ static void pnv_pci_ioda_create_dbgfs(void)
static void pnv_pci_ioda_fixup(void)
{
pnv_pci_ioda_setup_PEs();
- pnv_pci_ioda_setup_seg();
- pnv_pci_ioda_setup_DMA();
-
+ pnv_pci_ioda_setup_iommu_api();
pnv_pci_ioda_create_dbgfs();
#ifdef CONFIG_EEH
@@ -3306,6 +3093,115 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
return phb->ioda.io_segsize;
}
+/*
+ * We are updating root port or the upstream port of the
+ * bridge behind the root port with PHB's windows in order
+ * to accommodate the changes on required resources during
+ * PCI (slot) hotplug, which is connected to either root
+ * port or the downstream ports of PCIe switch behind the
+ * root port.
+ */
+static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
+ unsigned long type)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dev *bridge = bus->self;
+ struct resource *r, *w;
+ bool msi_region = false;
+ int i;
+
+ /* Check if we need apply fixup to the bridge's windows */
+ if (!pci_is_root_bus(bridge->bus) &&
+ !pci_is_root_bus(bridge->bus->self->bus))
+ return;
+
+ /* Fixup the resources */
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
+ if (!r->flags || !r->parent)
+ continue;
+
+ w = NULL;
+ if (r->flags & type & IORESOURCE_IO)
+ w = &hose->io_resource;
+ else if (pnv_pci_is_mem_pref_64(r->flags) &&
+ (type & IORESOURCE_PREFETCH) &&
+ phb->ioda.m64_segsize)
+ w = &hose->mem_resources[1];
+ else if (r->flags & type & IORESOURCE_MEM) {
+ w = &hose->mem_resources[0];
+ msi_region = true;
+ }
+
+ r->start = w->start;
+ r->end = w->end;
+
+ /* The 64KB 32-bits MSI region shouldn't be included in
+ * the 32-bits bridge window. Otherwise, we can see strange
+ * issues. One of them is EEH error observed on Garrison.
+ *
+ * Exclude top 1MB region which is the minimal alignment of
+ * 32-bits bridge window.
+ */
+ if (msi_region) {
+ r->end += 0x10000;
+ r->end -= 0x100000;
+ }
+ }
+}
+
+static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dev *bridge = bus->self;
+ struct pnv_ioda_pe *pe;
+ bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
+
+ /* Extend bridge's windows if necessary */
+ pnv_pci_fixup_bridge_resources(bus, type);
+
+ /* The PE for root bus should be realized before any one else */
+ if (!phb->ioda.root_pe_populated) {
+ pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
+ if (pe) {
+ phb->ioda.root_pe_idx = pe->pe_number;
+ phb->ioda.root_pe_populated = true;
+ }
+ }
+
+ /* Don't assign PE to PCI bus, which doesn't have subordinate devices */
+ if (list_empty(&bus->devices))
+ return;
+
+ /* Reserve PEs according to used M64 resources */
+ if (phb->reserve_m64_pe)
+ phb->reserve_m64_pe(bus, NULL, all);
+
+ /*
+ * Assign PE. We might run here because of partial hotplug.
+ * For the case, we just pick up the existing PE and should
+ * not allocate resources again.
+ */
+ pe = pnv_ioda_setup_bus_PE(bus, all);
+ if (!pe)
+ return;
+
+ pnv_ioda_setup_pe_seg(pe);
+ switch (phb->type) {
+ case PNV_PHB_IODA1:
+ pnv_pci_ioda1_setup_dma_pe(phb, pe);
+ break;
+ case PNV_PHB_IODA2:
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ break;
+ default:
+ pr_warn("%s: No DMA for PHB#%d (type %d)\n",
+ __func__, phb->hose->global_number, phb->type);
+ }
+}
+
#ifdef CONFIG_PCI_IOV
static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
int resno)
@@ -3345,7 +3241,7 @@ static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
/* Prevent enabling devices for which we couldn't properly
* assign a PE
*/
-static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
+bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -3366,6 +3262,178 @@ static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
return true;
}
+static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
+ int num)
+{
+ struct pnv_ioda_pe *pe = container_of(table_group,
+ struct pnv_ioda_pe, table_group);
+ struct pnv_phb *phb = pe->phb;
+ unsigned int idx;
+ long rc;
+
+ pe_info(pe, "Removing DMA window #%d\n", num);
+ for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
+ if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
+ continue;
+
+ rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
+ idx, 0, 0ul, 0ul, 0ul);
+ if (rc != OPAL_SUCCESS) {
+ pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
+ rc, idx);
+ return rc;
+ }
+
+ phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
+ }
+
+ pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
+ return OPAL_SUCCESS;
+}
+
+static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
+{
+ unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
+ struct iommu_table *tbl = pe->table_group.tables[0];
+ int64_t rc;
+
+ if (!weight)
+ return;
+
+ rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
+ if (rc != OPAL_SUCCESS)
+ return;
+
+ pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
+ if (pe->table_group.group) {
+ iommu_group_put(pe->table_group.group);
+ WARN_ON(pe->table_group.group);
+ }
+
+ free_pages(tbl->it_base, get_order(tbl->it_size << 3));
+ iommu_free_table(tbl, "pnv");
+}
+
+static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
+{
+ struct iommu_table *tbl = pe->table_group.tables[0];
+ unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
+#ifdef CONFIG_IOMMU_API
+ int64_t rc;
+#endif
+
+ if (!weight)
+ return;
+
+#ifdef CONFIG_IOMMU_API
+ rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+ if (rc)
+ pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
+#endif
+
+ pnv_pci_ioda2_set_bypass(pe, false);
+ if (pe->table_group.group) {
+ iommu_group_put(pe->table_group.group);
+ WARN_ON(pe->table_group.group);
+ }
+
+ pnv_pci_ioda2_table_free_pages(tbl);
+ iommu_free_table(tbl, "pnv");
+}
+
+static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
+ unsigned short win,
+ unsigned int *map)
+{
+ struct pnv_phb *phb = pe->phb;
+ int idx;
+ int64_t rc;
+
+ for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
+ if (map[idx] != pe->pe_number)
+ continue;
+
+ if (win == OPAL_M64_WINDOW_TYPE)
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ phb->ioda.reserved_pe_idx, win,
+ idx / PNV_IODA1_M64_SEGS,
+ idx % PNV_IODA1_M64_SEGS);
+ else
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ phb->ioda.reserved_pe_idx, win, 0, idx);
+
+ if (rc != OPAL_SUCCESS)
+ pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n",
+ rc, win, idx);
+
+ map[idx] = IODA_INVALID_PE;
+ }
+}
+
+static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb;
+
+ if (phb->type == PNV_PHB_IODA1) {
+ pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
+ phb->ioda.io_segmap);
+ pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
+ phb->ioda.m32_segmap);
+ pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
+ phb->ioda.m64_segmap);
+ } else if (phb->type == PNV_PHB_IODA2) {
+ pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
+ phb->ioda.m32_segmap);
+ }
+}
+
+static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb;
+ struct pnv_ioda_pe *slave, *tmp;
+
+ /* Release slave PEs in compound PE */
+ if (pe->flags & PNV_IODA_PE_MASTER) {
+ list_for_each_entry_safe(slave, tmp, &pe->slaves, list)
+ pnv_ioda_release_pe(slave);
+ }
+
+ list_del(&pe->list);
+ switch (phb->type) {
+ case PNV_PHB_IODA1:
+ pnv_pci_ioda1_release_pe_dma(pe);
+ break;
+ case PNV_PHB_IODA2:
+ pnv_pci_ioda2_release_pe_dma(pe);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ pnv_ioda_release_pe_seg(pe);
+ pnv_ioda_deconfigure_pe(pe->phb, pe);
+ pnv_ioda_free_pe(pe);
+}
+
+static void pnv_pci_release_device(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+
+ if (pdev->is_virtfn)
+ return;
+
+ if (!pdn || pdn->pe_number == IODA_INVALID_PE)
+ return;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ WARN_ON(--pe->device_count < 0);
+ if (pe->device_count == 0)
+ pnv_ioda_release_pe(pe);
+}
+
static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
@@ -3382,7 +3450,9 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.teardown_msi_irqs = pnv_teardown_msi_irqs,
#endif
.enable_device_hook = pnv_pci_enable_device_hook,
+ .release_device = pnv_pci_release_device,
.window_alignment = pnv_pci_window_alignment,
+ .setup_bridge = pnv_pci_setup_bridge,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.dma_set_mask = pnv_pci_ioda_dma_set_mask,
.dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
@@ -3410,6 +3480,26 @@ static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
.shutdown = pnv_pci_ioda_shutdown,
};
+#ifdef CONFIG_CXL_BASE
+const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops = {
+ .dma_dev_setup = pnv_pci_dma_dev_setup,
+ .dma_bus_setup = pnv_pci_dma_bus_setup,
+#ifdef CONFIG_PCI_MSI
+ .setup_msi_irqs = pnv_cxl_cx4_setup_msi_irqs,
+ .teardown_msi_irqs = pnv_cxl_cx4_teardown_msi_irqs,
+#endif
+ .enable_device_hook = pnv_cxl_enable_device_hook,
+ .disable_device = pnv_cxl_disable_device,
+ .release_device = pnv_pci_release_device,
+ .window_alignment = pnv_pci_window_alignment,
+ .setup_bridge = pnv_pci_setup_bridge,
+ .reset_secondary_bus = pnv_pci_reset_secondary_bus,
+ .dma_set_mask = pnv_pci_ioda_dma_set_mask,
+ .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
+ .shutdown = pnv_pci_ioda_shutdown,
+};
+#endif
+
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
u64 hub_id, int ioda_type)
{
@@ -3417,6 +3507,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
struct pnv_phb *phb;
unsigned long size, m64map_off, m32map_off, pemap_off;
unsigned long iomap_off = 0, dma32map_off = 0;
+ struct resource r;
const __be64 *prop64;
const __be32 *prop32;
int len;
@@ -3425,7 +3516,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
void *aux;
long rc;
- pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
+ if (!of_device_is_available(np))
+ return;
+
+ pr_info("Initializing %s PHB (%s)\n",
+ pnv_phb_names[ioda_type], of_node_full_name(np));
prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
if (!prop64) {
@@ -3476,9 +3571,12 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
/* Get registers */
- phb->regs = of_iomap(np, 0);
- if (phb->regs == NULL)
- pr_err(" Failed to map registers !\n");
+ if (!of_address_to_resource(np, 0, &r)) {
+ phb->regs_phys = r.start;
+ phb->regs = ioremap(r.start, resource_size(&r));
+ if (phb->regs == NULL)
+ pr_err(" Failed to map registers !\n");
+ }
/* Initialize more IODA stuff */
phb->ioda.total_pe_num = 1;
@@ -3489,6 +3587,10 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
if (prop32)
phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
+ /* Invalidate RID to PE# mapping */
+ for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
+ phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
+
/* Parse 64-bit MMIO range */
pnv_ioda_parse_m64_window(phb);
@@ -3540,7 +3642,22 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
}
phb->ioda.pe_array = aux + pemap_off;
- set_bit(phb->ioda.reserved_pe_idx, phb->ioda.pe_alloc);
+
+ /*
+ * Choose PE number for root bus, which shouldn't have
+ * M64 resources consumed by its child devices. To pick
+ * the PE number adjacent to the reserved one if possible.
+ */
+ pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
+ if (phb->ioda.reserved_pe_idx == 0) {
+ phb->ioda.root_pe_idx = 1;
+ pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
+ } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
+ phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
+ pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
+ } else {
+ phb->ioda.root_pe_idx = IODA_INVALID_PE;
+ }
INIT_LIST_HEAD(&phb->ioda.pe_list);
mutex_init(&phb->ioda.pe_list_mutex);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 1d92bd93bcd9..6701dd5ded20 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -26,6 +26,7 @@
#include <asm/machdep.h>
#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
+#include <asm/pnv-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
#include <asm/tce.h>
@@ -36,8 +37,124 @@
#include "powernv.h"
#include "pci.h"
-/* Delay in usec */
-#define PCI_RESET_DELAY_US 3000000
+int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
+{
+ struct device_node *parent = np;
+ u32 bdfn;
+ u64 phbid;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &bdfn);
+ if (ret)
+ return -ENXIO;
+
+ bdfn = ((bdfn & 0x00ffff00) >> 8);
+ while ((parent = of_get_parent(parent))) {
+ if (!PCI_DN(parent)) {
+ of_node_put(parent);
+ break;
+ }
+
+ if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) {
+ of_node_put(parent);
+ continue;
+ }
+
+ ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
+ if (ret) {
+ of_node_put(parent);
+ return -ENXIO;
+ }
+
+ *id = PCI_SLOT_ID(phbid, bdfn);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
+
+int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
+{
+ int64_t rc;
+
+ if (!opal_check_token(OPAL_GET_DEVICE_TREE))
+ return -ENXIO;
+
+ rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
+ if (rc < OPAL_SUCCESS)
+ return -EIO;
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
+
+int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
+{
+ int64_t rc;
+
+ if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
+ return -ENXIO;
+
+ rc = opal_pci_get_presence_state(id, (uint64_t)state);
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
+
+int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
+{
+ int64_t rc;
+
+ if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
+ return -ENXIO;
+
+ rc = opal_pci_get_power_state(id, (uint64_t)state);
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
+
+int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
+{
+ struct opal_msg m;
+ int token, ret;
+ int64_t rc;
+
+ if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
+ return -ENXIO;
+
+ token = opal_async_get_token_interruptible();
+ if (unlikely(token < 0))
+ return token;
+
+ rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
+ if (rc == OPAL_SUCCESS) {
+ ret = 0;
+ goto exit;
+ } else if (rc != OPAL_ASYNC_COMPLETION) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ ret = opal_async_wait_response(token, &m);
+ if (ret < 0)
+ goto exit;
+
+ if (msg) {
+ ret = 1;
+ memcpy(msg, &m, sizeof(m));
+ }
+
+exit:
+ opal_async_release_token(token);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
#ifdef CONFIG_PCI_MSI
int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -620,8 +737,8 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
if (newtce & TCE_PCI_WRITE)
newtce |= TCE_PCI_READ;
- oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
- *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
+ *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
*direction = iommu_tce_direction(oldtce);
return 0;
@@ -815,13 +932,14 @@ void __init pnv_pci_init(void)
for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
pnv_pci_init_ioda2_phb(np);
+ /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
+ for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
+ pnv_pci_init_ioda2_phb(np);
+
/* Look for NPU PHBs */
for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
pnv_pci_init_npu_phb(np);
- /* Setup the linkage between OF nodes and PHBs */
- pci_devs_phb_init();
-
/* Configure IOMMU DMA hooks */
set_pci_dma_ops(&dma_iommu_ops);
}
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7dee25e304db..d088d4f06116 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -1,6 +1,10 @@
#ifndef __POWERNV_PCI_H
#define __POWERNV_PCI_H
+#include <linux/iommu.h>
+#include <asm/iommu.h>
+#include <asm/msi_bitmap.h>
+
struct pci_dn;
enum pnv_phb_type {
@@ -30,6 +34,7 @@ struct pnv_phb;
struct pnv_ioda_pe {
unsigned long flags;
struct pnv_phb *phb;
+ int device_count;
/* A PE can be associated with a single device or an
* entire bus (& children). In the former case, pdev
@@ -71,6 +76,7 @@ struct pnv_ioda_pe {
};
#define PNV_PHB_FLAG_EEH (1 << 0)
+#define PNV_PHB_FLAG_CXL (1 << 1) /* Real PHB supporting the cxl kernel API */
struct pnv_phb {
struct pci_controller *hose;
@@ -80,6 +86,7 @@ struct pnv_phb {
u64 opal_id;
int flags;
void __iomem *regs;
+ u64 regs_phys;
int initialized;
spinlock_t lock;
@@ -110,6 +117,8 @@ struct pnv_phb {
/* Global bridge info */
unsigned int total_pe_num;
unsigned int reserved_pe_idx;
+ unsigned int root_pe_idx;
+ bool root_pe_populated;
/* 32-bit MMIO window */
unsigned int m32_size;
@@ -152,17 +161,8 @@ struct pnv_phb {
struct list_head pe_list;
struct mutex pe_list_mutex;
- /* Reverse map of PEs, will have to extend if
- * we are to support more than 256 PEs, indexed
- * bus { bus, devfn }
- */
- unsigned char pe_rmap[0x10000];
-
- /* TCE cache invalidate registers (physical and
- * remapped)
- */
- phys_addr_t tce_inval_reg_phys;
- __be64 __iomem *tce_inval_reg;
+ /* Reverse map of PEs, indexed by {bus, devfn} */
+ unsigned int pe_rmap[0x10000];
} ioda;
/* PHB and hub status structure */
@@ -173,6 +173,9 @@ struct pnv_phb {
struct OpalIoP7IOCErrorData hub_diag;
} diag;
+#ifdef CONFIG_CXL_BASE
+ struct cxl_afu *cxl_afu;
+#endif
};
extern struct pci_ops pnv_pci_ops;
@@ -203,8 +206,6 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_init_npu_phb(struct device_node *np);
-extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
- __be64 *startp, __be64 *endp, bool rm);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
@@ -212,6 +213,9 @@ extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
+extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
+extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
+extern bool pnv_pci_enable_device_hook(struct pci_dev *dev);
extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...);
@@ -224,7 +228,7 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
/* Nvlink functions */
extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
-extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
+extern void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe);
extern long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
struct iommu_table *tbl);
@@ -232,4 +236,15 @@ extern long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num);
extern void pnv_npu_take_ownership(struct pnv_ioda_pe *npe);
extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
+
+/* cxl functions */
+extern bool pnv_cxl_enable_device_hook(struct pci_dev *dev);
+extern void pnv_cxl_disable_device(struct pci_dev *dev);
+extern int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
+extern void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
+
+
+/* phb ops (cxl switches these when enabling the kernel api on the phb) */
+extern const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops;
+
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 6dbc0a1da1f6..da7c843ac7f1 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -18,6 +18,7 @@ static inline void pnv_pci_shutdown(void) { }
#endif
extern u32 pnv_get_supported_cpuidle_states(void);
+extern u64 pnv_deepest_stop_state;
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index ee6430bedcc3..efe8b6bb168b 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -58,7 +58,7 @@ static void __init pnv_setup_arch(void)
/* XXX PMCS */
}
-static void __init pnv_init_early(void)
+static void __init pnv_init(void)
{
/*
* Initialize the LPC bus now so that legacy serial
@@ -268,21 +268,16 @@ static void __init pnv_setup_machdep_opal(void)
static int __init pnv_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,powernv"))
+ if (!of_machine_is_compatible("ibm,powernv"))
return 0;
- if (IS_ENABLED(CONFIG_PPC_RADIX_MMU) && radix_enabled())
- radix_init_native();
- else if (IS_ENABLED(CONFIG_PPC_STD_MMU_64))
- hpte_init_native();
-
if (firmware_has_feature(FW_FEATURE_OPAL))
pnv_setup_machdep_opal();
pr_debug("PowerNV detected !\n");
+ pnv_init();
+
return 1;
}
@@ -308,14 +303,13 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu)
define_machine(powernv) {
.name = "PowerNV",
.probe = pnv_probe,
- .init_early = pnv_init_early,
.setup_arch = pnv_setup_arch,
.init_IRQ = pnv_init_IRQ,
.show_cpuinfo = pnv_show_cpuinfo,
.get_proc_freq = pnv_get_proc_freq,
.progress = pnv_progress,
.machine_shutdown = pnv_shutdown,
- .power_save = power7_idle,
+ .power_save = NULL,
.calibrate_decr = generic_calibrate_decr,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index ad7b1a3dbed0..c789258ae1e1 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -182,7 +182,9 @@ static void pnv_smp_cpu_kill_self(void)
ppc64_runlatch_off();
- if (idle_states & OPAL_PM_WINKLE_ENABLED)
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ srr1 = power9_idle_stop(pnv_deepest_stop_state);
+ else if (idle_states & OPAL_PM_WINKLE_ENABLED)
srr1 = power7_winkle();
else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1))
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index c9a3e677192a..cb3c50328de8 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -195,12 +195,12 @@ static void ps3_hpte_clear(void)
void __init ps3_hpte_init(unsigned long htab_size)
{
- ppc_md.hpte_invalidate = ps3_hpte_invalidate;
- ppc_md.hpte_updatepp = ps3_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
- ppc_md.hpte_insert = ps3_hpte_insert;
- ppc_md.hpte_remove = ps3_hpte_remove;
- ppc_md.hpte_clear_all = ps3_hpte_clear;
+ mmu_hash_ops.hpte_invalidate = ps3_hpte_invalidate;
+ mmu_hash_ops.hpte_updatepp = ps3_hpte_updatepp;
+ mmu_hash_ops.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
+ mmu_hash_ops.hpte_insert = ps3_hpte_insert;
+ mmu_hash_ops.hpte_remove = ps3_hpte_remove;
+ mmu_hash_ops.hpte_clear_all = ps3_hpte_clear;
ppc64_pft_size = __ilog2(htab_size);
}
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index bfccdc7cb85f..814a7eaa7769 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -1198,7 +1198,7 @@ int ps3_repository_delete_highmem_info(unsigned int region_index)
return result ? -1 : 0;
}
-#endif /* defined(CONFIG_PS3_WRITE_REPOSITORY) */
+#endif /* defined(CONFIG_PS3_REPOSITORY_WRITE) */
#if defined(DEBUG)
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 799c8580ab09..3a487e7f4a5e 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -80,7 +80,7 @@ static void ps3_power_save(void)
lv1_pause(0);
}
-static void ps3_restart(char *cmd)
+static void __noreturn ps3_restart(char *cmd)
{
DBG("%s:%d cmd '%s'\n", __func__, __LINE__, cmd);
@@ -96,7 +96,7 @@ static void ps3_power_off(void)
ps3_sys_manager_power_off(); /* never returns */
}
-static void ps3_halt(void)
+static void __noreturn ps3_halt(void)
{
DBG("%s:%d\n", __func__, __LINE__);
@@ -226,23 +226,24 @@ static void __init ps3_progress(char *s, unsigned short hex)
printk("*** %04x : %s\n", hex, s ? s : "");
}
-static int __init ps3_probe(void)
+void __init ps3_early_mm_init(void)
{
unsigned long htab_size;
- unsigned long dt_root;
+ ps3_mm_init();
+ ps3_mm_vas_create(&htab_size);
+ ps3_hpte_init(htab_size);
+}
+
+static int __init ps3_probe(void)
+{
DBG(" -> %s:%d\n", __func__, __LINE__);
- dt_root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(dt_root, "sony,ps3"))
+ if (!of_machine_is_compatible("sony,ps3"))
return 0;
- powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
-
ps3_os_area_save_params();
- ps3_mm_init();
- ps3_mm_vas_create(&htab_size);
- ps3_hpte_init(htab_size);
+
pm_power_off = ps3_power_off;
DBG(" <- %s:%d\n", __func__, __LINE__);
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index fc44ad0475f8..66e7227469b8 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -574,7 +574,7 @@ static int cmm_mem_going_offline(void *arg)
cmm_dbg("Failed to allocate memory for list "
"management. Memory hotplug "
"failed.\n");
- return ENOMEM;
+ return -ENOMEM;
}
memcpy(npa, pa_curr, PAGE_SIZE);
if (pa_curr == cmm_page_list)
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 2b93ae8d557a..4748124faa10 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -27,6 +27,15 @@
#include <asm/uaccess.h>
#include <asm/rtas.h>
+struct workqueue_struct *pseries_hp_wq;
+
+struct pseries_hp_work {
+ struct work_struct work;
+ struct pseries_hp_errorlog *errlog;
+ struct completion *hp_completion;
+ int *rc;
+};
+
struct cc_workarea {
__be32 drc_index;
__be32 zero;
@@ -368,10 +377,51 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
return rc;
}
+void pseries_hp_work_fn(struct work_struct *work)
+{
+ struct pseries_hp_work *hp_work =
+ container_of(work, struct pseries_hp_work, work);
+
+ if (hp_work->rc)
+ *(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog);
+ else
+ handle_dlpar_errorlog(hp_work->errlog);
+
+ if (hp_work->hp_completion)
+ complete(hp_work->hp_completion);
+
+ kfree(hp_work->errlog);
+ kfree((void *)work);
+}
+
+void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
+ struct completion *hotplug_done, int *rc)
+{
+ struct pseries_hp_work *work;
+ struct pseries_hp_errorlog *hp_errlog_copy;
+
+ hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog),
+ GFP_KERNEL);
+ memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog));
+
+ work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL);
+ if (work) {
+ INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
+ work->errlog = hp_errlog_copy;
+ work->hp_completion = hotplug_done;
+ work->rc = rc;
+ queue_work(pseries_hp_wq, (struct work_struct *)work);
+ } else {
+ *rc = -ENOMEM;
+ complete(hotplug_done);
+ }
+}
+
static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
struct pseries_hp_errorlog *hp_elog;
+ struct completion hotplug_done;
const char *arg;
int rc;
@@ -439,7 +489,9 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
goto dlpar_store_out;
}
- rc = handle_dlpar_errorlog(hp_elog);
+ init_completion(&hotplug_done);
+ queue_hotplug_event(hp_elog, &hotplug_done, &rc);
+ wait_for_completion(&hotplug_done);
dlpar_store_out:
kfree(hp_elog);
@@ -450,6 +502,8 @@ static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
static int __init pseries_dlpar_init(void)
{
+ pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
+ WQ_UNBOUND, 1);
return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
}
machine_device_initcall(pseries, pseries_dlpar_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index ac3ffd97e059..1c428f06b14c 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -2,7 +2,7 @@
* The file intends to implement the platform dependent EEH operations on pseries.
* Actually, the pseries platform is built based on RTAS heavily. That means the
* pseries platform dependent EEH operations will be built on RTAS calls. The functions
- * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
+ * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
* been done.
*
* Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
@@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2;
static int ibm_slot_error_detail;
static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
-static int ibm_configure_bridge;
static int ibm_configure_pe;
/*
@@ -81,7 +80,14 @@ static int pseries_eeh_init(void)
ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
ibm_configure_pe = rtas_token("ibm,configure-pe");
- ibm_configure_bridge = rtas_token("ibm,configure-bridge");
+
+ /*
+ * ibm,configure-pe and ibm,configure-bridge have the same semantics,
+ * however ibm,configure-pe can be faster. If we can't find
+ * ibm,configure-pe then fall back to using ibm,configure-bridge.
+ */
+ if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
+ ibm_configure_pe = rtas_token("ibm,configure-bridge");
/*
* Necessary sanity check. We needn't check "get-config-addr-info"
@@ -93,8 +99,7 @@ static int pseries_eeh_init(void)
(ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
- (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
- ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
+ ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
pr_info("EEH functionality not supported\n");
return -EINVAL;
}
@@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
{
int config_addr;
int ret;
+ /* Waiting 0.2s maximum before skipping configuration */
+ int max_wait = 200;
/* Figure out the PE address */
config_addr = pe->config_addr;
if (pe->addr)
config_addr = pe->addr;
- /* Use new configure-pe function, if supported */
- if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+ while (max_wait > 0) {
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
config_addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid));
- } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
- ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
- config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid));
- } else {
- return -EFAULT;
- }
- if (ret)
- pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
- __func__, pe->phb->global_number, pe->addr, ret);
+ if (!ret)
+ return ret;
+
+ /*
+ * If RTAS returns a delay value that's above 100ms, cut it
+ * down to 100ms in case firmware made a mistake. For more
+ * on how these delay values work see rtas_busy_delay_time
+ */
+ if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
+ ret <= RTAS_EXTENDED_DELAY_MAX)
+ ret = RTAS_EXTENDED_DELAY_MIN+2;
+
+ max_wait -= rtas_busy_delay_time(ret);
+
+ if (max_wait < 0)
+ break;
+
+ rtas_busy_delay(ret);
+ }
+ pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+ __func__, pe->phb->global_number, pe->addr, ret);
return ret;
}
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index 18380e8f6dfe..a6ddca833119 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -26,48 +26,21 @@ void request_event_sources_irqs(struct device_node *np,
{
int i, index, count = 0;
struct of_phandle_args oirq;
- const u32 *opicprop;
- unsigned int opicplen;
unsigned int virqs[16];
- /* Check for obsolete "open-pic-interrupt" property. If present, then
- * map those interrupts using the default interrupt host and default
- * trigger
- */
- opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
- if (opicprop) {
- opicplen /= sizeof(u32);
- for (i = 0; i < opicplen; i++) {
- if (count > 15)
- break;
- virqs[count] = irq_create_mapping(NULL, *(opicprop++));
- if (virqs[count] == NO_IRQ) {
- pr_err("event-sources: Unable to allocate "
- "interrupt number for %s\n",
- np->full_name);
- WARN_ON(1);
- }
- else
- count++;
-
- }
- }
- /* Else use normal interrupt tree parsing */
- else {
- /* First try to do a proper OF tree parsing */
- for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
- index++) {
- if (count > 15)
- break;
- virqs[count] = irq_create_of_mapping(&oirq);
- if (virqs[count] == NO_IRQ) {
- pr_err("event-sources: Unable to allocate "
- "interrupt number for %s\n",
- np->full_name);
- WARN_ON(1);
- }
- else
- count++;
+ /* First try to do a proper OF tree parsing */
+ for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
+ index++) {
+ if (count > 15)
+ break;
+ virqs[count] = irq_create_of_mapping(&oirq);
+ if (virqs[count] == NO_IRQ) {
+ pr_err("event-sources: Unable to allocate "
+ "interrupt number for %s\n",
+ np->full_name);
+ WARN_ON(1);
+ } else {
+ count++;
}
}
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 8c80588abacc..ea7f09bd73b1 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -22,6 +22,7 @@
*/
+#include <linux/of_fdt.h>
#include <asm/firmware.h>
#include <asm/prom.h>
#include <asm/udbg.h>
@@ -69,7 +70,8 @@ hypertas_fw_features_table[] = {
* device-tree/ibm,hypertas-functions. Ultimately this functionality may
* be moved into prom.c prom_init().
*/
-void __init fw_hypertas_feature_init(const char *hypertas, unsigned long len)
+static void __init fw_hypertas_feature_init(const char *hypertas,
+ unsigned long len)
{
const char *s;
int i;
@@ -113,7 +115,7 @@ vec5_fw_features_table[] = {
{FW_FEATURE_PRRN, OV5_PRRN},
};
-void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
+static void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
{
unsigned int index, feat;
int i;
@@ -131,3 +133,45 @@ void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
pr_debug(" <- fw_vec5_feature_init()\n");
}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init probe_fw_features(unsigned long node, const char *uname, int
+ depth, void *data)
+{
+ const char *prop;
+ int len;
+ static int hypertas_found;
+ static int vec5_found;
+
+ if (depth != 1)
+ return 0;
+
+ if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
+ prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
+ &len);
+ if (prop) {
+ powerpc_firmware_features |= FW_FEATURE_LPAR;
+ fw_hypertas_feature_init(prop, len);
+ }
+
+ hypertas_found = 1;
+ }
+
+ if (!strcmp(uname, "chosen")) {
+ prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
+ &len);
+ if (prop)
+ fw_vec5_feature_init(prop, len);
+
+ vec5_found = 1;
+ }
+
+ return hypertas_found && vec5_found;
+}
+
+void __init pseries_probe_fw_features(void)
+{
+ of_scan_flat_dt(probe_fw_features, NULL);
+}
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 282837a1d74b..a1b63e00b2f7 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -903,8 +903,6 @@ static int parse_cede_parameters(void)
static int __init pseries_cpu_hotplug_init(void)
{
- struct device_node *np;
- const char *typep;
int cpu;
int qcss_tok;
@@ -913,17 +911,6 @@ static int __init pseries_cpu_hotplug_init(void)
ppc_md.cpu_release = dlpar_cpu_release;
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
- for_each_node_by_name(np, "interrupt-controller") {
- typep = of_get_property(np, "compatible", NULL);
- if (strstr(typep, "open-pic")) {
- of_node_put(np);
-
- printk(KERN_INFO "CPU Hotplug not supported on "
- "systems using MPIC\n");
- return 0;
- }
- }
-
rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2ce138542083..43f7beb2902d 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -69,13 +69,36 @@ unsigned long pseries_memory_block_size(void)
return memblock_size;
}
-static void dlpar_free_drconf_property(struct property *prop)
+static void dlpar_free_property(struct property *prop)
{
kfree(prop->name);
kfree(prop->value);
kfree(prop);
}
+static struct property *dlpar_clone_property(struct property *prop,
+ u32 prop_size)
+{
+ struct property *new_prop;
+
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ return NULL;
+
+ new_prop->name = kstrdup(prop->name, GFP_KERNEL);
+ new_prop->value = kzalloc(prop_size, GFP_KERNEL);
+ if (!new_prop->name || !new_prop->value) {
+ dlpar_free_property(new_prop);
+ return NULL;
+ }
+
+ memcpy(new_prop->value, prop->value, prop->length);
+ new_prop->length = prop_size;
+
+ of_property_set_flag(new_prop, OF_DYNAMIC);
+ return new_prop;
+}
+
static struct property *dlpar_clone_drconf_property(struct device_node *dn)
{
struct property *prop, *new_prop;
@@ -87,19 +110,10 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
if (!prop)
return NULL;
- new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ new_prop = dlpar_clone_property(prop, prop->length);
if (!new_prop)
return NULL;
- new_prop->name = kstrdup(prop->name, GFP_KERNEL);
- new_prop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
- if (!new_prop->name || !new_prop->value) {
- dlpar_free_drconf_property(new_prop);
- return NULL;
- }
-
- new_prop->length = prop->length;
-
/* Convert the property to cpu endian-ness */
p = new_prop->value;
*p = be32_to_cpu(*p);
@@ -177,14 +191,74 @@ static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb)
return 0;
}
+static u32 find_aa_index(struct device_node *dr_node,
+ struct property *ala_prop, const u32 *lmb_assoc)
+{
+ u32 *assoc_arrays;
+ u32 aa_index;
+ int aa_arrays, aa_array_entries, aa_array_sz;
+ int i, index;
+
+ /*
+ * The ibm,associativity-lookup-arrays property is defined to be
+ * a 32-bit value specifying the number of associativity arrays
+ * followed by a 32-bitvalue specifying the number of entries per
+ * array, followed by the associativity arrays.
+ */
+ assoc_arrays = ala_prop->value;
+
+ aa_arrays = be32_to_cpu(assoc_arrays[0]);
+ aa_array_entries = be32_to_cpu(assoc_arrays[1]);
+ aa_array_sz = aa_array_entries * sizeof(u32);
+
+ aa_index = -1;
+ for (i = 0; i < aa_arrays; i++) {
+ index = (i * aa_array_entries) + 2;
+
+ if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
+ continue;
+
+ aa_index = i;
+ break;
+ }
+
+ if (aa_index == -1) {
+ struct property *new_prop;
+ u32 new_prop_size;
+
+ new_prop_size = ala_prop->length + aa_array_sz;
+ new_prop = dlpar_clone_property(ala_prop, new_prop_size);
+ if (!new_prop)
+ return -1;
+
+ assoc_arrays = new_prop->value;
+
+ /* increment the number of entries in the lookup array */
+ assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
+
+ /* copy the new associativity into the lookup array */
+ index = aa_arrays * aa_array_entries + 2;
+ memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
+
+ of_update_property(dr_node, new_prop);
+
+ /*
+ * The associativity lookup array index for this lmb is
+ * number of entries - 1 since we added its associativity
+ * to the end of the lookup array.
+ */
+ aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
+ }
+
+ return aa_index;
+}
+
static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
{
struct device_node *parent, *lmb_node, *dr_node;
+ struct property *ala_prop;
const u32 *lmb_assoc;
- const u32 *assoc_arrays;
u32 aa_index;
- int aa_arrays, aa_array_entries, aa_array_sz;
- int i;
parent = of_find_node_by_path("/");
if (!parent)
@@ -208,34 +282,15 @@ static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
return -ENODEV;
}
- assoc_arrays = of_get_property(dr_node,
- "ibm,associativity-lookup-arrays",
- NULL);
- of_node_put(dr_node);
- if (!assoc_arrays) {
+ ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
+ NULL);
+ if (!ala_prop) {
+ of_node_put(dr_node);
dlpar_free_cc_nodes(lmb_node);
return -ENODEV;
}
- /* The ibm,associativity-lookup-arrays property is defined to be
- * a 32-bit value specifying the number of associativity arrays
- * followed by a 32-bitvalue specifying the number of entries per
- * array, followed by the associativity arrays.
- */
- aa_arrays = be32_to_cpu(assoc_arrays[0]);
- aa_array_entries = be32_to_cpu(assoc_arrays[1]);
- aa_array_sz = aa_array_entries * sizeof(u32);
-
- aa_index = -1;
- for (i = 0; i < aa_arrays; i++) {
- int indx = (i * aa_array_entries) + 2;
-
- if (memcmp(&assoc_arrays[indx], &lmb_assoc[1], aa_array_sz))
- continue;
-
- aa_index = i;
- break;
- }
+ aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
dlpar_free_cc_nodes(lmb_node);
return aa_index;
@@ -533,50 +588,11 @@ static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
#endif /* CONFIG_MEMORY_HOTREMOVE */
-static int dlpar_add_lmb_memory(struct of_drconf_cell *lmb)
+static int dlpar_add_lmb(struct of_drconf_cell *lmb)
{
- struct memory_block *mem_block;
unsigned long block_sz;
int nid, rc;
- block_sz = memory_block_size_bytes();
-
- /* Find the node id for this address */
- nid = memory_add_physaddr_to_nid(lmb->base_addr);
-
- /* Add the memory */
- rc = add_memory(nid, lmb->base_addr, block_sz);
- if (rc)
- return rc;
-
- /* Register this block of memory */
- rc = memblock_add(lmb->base_addr, block_sz);
- if (rc) {
- remove_memory(nid, lmb->base_addr, block_sz);
- return rc;
- }
-
- mem_block = lmb_to_memblock(lmb);
- if (!mem_block) {
- remove_memory(nid, lmb->base_addr, block_sz);
- return -EINVAL;
- }
-
- rc = device_online(&mem_block->dev);
- put_device(&mem_block->dev);
- if (rc) {
- remove_memory(nid, lmb->base_addr, block_sz);
- return rc;
- }
-
- lmb->flags |= DRCONF_MEM_ASSIGNED;
- return 0;
-}
-
-static int dlpar_add_lmb(struct of_drconf_cell *lmb)
-{
- int rc;
-
if (lmb->flags & DRCONF_MEM_ASSIGNED)
return -EINVAL;
@@ -592,10 +608,18 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb)
return rc;
}
- rc = dlpar_add_lmb_memory(lmb);
+ block_sz = memory_block_size_bytes();
+
+ /* Find the node id for this address */
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
+
+ /* Add the memory */
+ rc = add_memory(nid, lmb->base_addr, block_sz);
if (rc) {
dlpar_remove_device_tree_lmb(lmb);
dlpar_release_drc(lmb->drc_index);
+ } else {
+ lmb->flags |= DRCONF_MEM_ASSIGNED;
}
return rc;
@@ -748,7 +772,7 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
break;
}
- dlpar_free_drconf_property(prop);
+ dlpar_free_property(prop);
dlpar_memory_out:
of_node_put(dn);
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
index 0240c4ff878a..f053bda64ee7 100644
--- a/arch/powerpc/platforms/pseries/io_event_irq.c
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -113,7 +113,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
* - The owner of an event is determined by combinations of scope,
* event type, and sub-type. There is no easy way to pre-sort clients
* by scope or event type alone. For example, Torrent ISR route change
- * event is reported with scope 0x00 (Not Applicatable) rather than
+ * event is reported with scope 0x00 (Not Applicable) rather than
* 0x3B (Torrent-hub). It is better to let the clients to identify
* who owns the event.
*/
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index b7dfc1359d01..770a753b52c9 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -120,35 +120,6 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group,
kfree(table_group);
}
-static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
- __be64 *startp, __be64 *endp)
-{
- u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
- unsigned long start, end, inc;
-
- start = __pa(startp);
- end = __pa(endp);
- inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */
-
- /* If this is non-zero, change the format. We shift the
- * address and or in the magic from the device tree. */
- if (tbl->it_busno) {
- start <<= 12;
- end <<= 12;
- inc <<= 12;
- start |= tbl->it_busno;
- end |= tbl->it_busno;
- }
-
- end |= inc - 1; /* round up end to be different than start */
-
- mb(); /* Make sure TCEs in memory are written */
- while (start <= end) {
- out_be64(invalidate, start);
- start += inc;
- }
-}
-
static int tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
@@ -173,9 +144,6 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
uaddr += TCE_PAGE_SIZE;
tcep++;
}
-
- if (tbl->it_type & TCE_PCI_SWINV_CREATE)
- tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
return 0;
}
@@ -188,9 +156,6 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
while (npages--)
*(tcep++) = 0;
-
- if (tbl->it_type & TCE_PCI_SWINV_FREE)
- tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
}
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
@@ -537,7 +502,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
struct iommu_table *tbl)
{
struct device_node *node;
- const unsigned long *basep, *sw_inval;
+ const unsigned long *basep;
const u32 *sizep;
node = phb->dn;
@@ -575,22 +540,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
tbl->it_index = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
-
- sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL);
- if (sw_inval) {
- /*
- * This property contains information on how to
- * invalidate the TCE entry. The first property is
- * the base MMIO address used to invalidate entries.
- * The second property tells us the format of the TCE
- * invalidate (whether it needs to be shifted) and
- * some magic routing info to add to our invalidate
- * command.
- */
- tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8);
- tbl->it_busno = sw_inval[1]; /* overload this with magic */
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
- }
}
/*
@@ -927,7 +876,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
- cfg_addr = (pdn->busno << 8) | pdn->devfn;
+ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
@@ -956,7 +905,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
- cfg_addr = (pdn->busno << 8) | pdn->devfn;
+ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 13fa95b3aa8b..6681ac97fb18 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -14,14 +14,13 @@
#include <asm/page.h>
#include <asm/firmware.h>
#include <asm/kexec.h>
-#include <asm/mpic.h>
#include <asm/xics.h>
#include <asm/smp.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
-static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
+void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
/* Don't risk a hypervisor call if we're crashing */
if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
@@ -51,26 +50,6 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
"(hw %d) failed with %d\n", cpu, hwcpu, ret);
}
}
-}
-
-static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary)
-{
- pseries_kexec_cpu_down(crash_shutdown, secondary);
- mpic_teardown_this_cpu(secondary);
-}
-void __init setup_kexec_cpu_down_mpic(void)
-{
- ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic;
-}
-
-static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
-{
- pseries_kexec_cpu_down(crash_shutdown, secondary);
xics_kexec_teardown_cpu(secondary);
}
-
-void __init setup_kexec_cpu_down_xics(void)
-{
- ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
-}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 7f6100d91b4b..86707e67843f 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -45,6 +45,7 @@
#include <asm/plpar_wrappers.h>
#include <asm/kexec.h>
#include <asm/fadump.h>
+#include <asm/asm-prototypes.h>
#include "pseries.h"
@@ -260,24 +261,8 @@ static void pSeries_lpar_hptab_clear(void)
* This is also called on boot when a fadump happens. In that case we
* must not change the exception endian mode.
*/
- if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
- long rc;
-
- rc = pseries_big_endian_exceptions();
- /*
- * At this point it is unlikely panic() will get anything
- * out to the user, but at least this will stop us from
- * continuing on further and creating an even more
- * difficult to debug situation.
- *
- * There is a known problem when kdump'ing, if cpus are offline
- * the above call will fail. Rather than panicking again, keep
- * going and hope the kdump kernel is also little endian, which
- * it usually is.
- */
- if (rc && !kdump_in_progress())
- panic("Could not enable big endian exceptions");
- }
+ if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
+ pseries_big_endian_exceptions();
#endif
}
@@ -604,17 +589,17 @@ static int __init disable_bulk_remove(char *str)
__setup("bulk_remove=", disable_bulk_remove);
-void __init hpte_init_lpar(void)
+void __init hpte_init_pseries(void)
{
- ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
- ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
- ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
- ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
- ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
- ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
- ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
+ mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
+ mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
+ mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
+ mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
+ mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
+ mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
+ mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
+ mmu_hash_ops.hpte_clear_all = pSeries_lpar_hptab_clear;
+ mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
}
#ifdef CONFIG_PPC_SMLPAR
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 9f8184175c86..79aef8c1c5b3 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -17,8 +17,6 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/kmsg_dump.h>
-#include <linux/pstore.h>
#include <linux/ctype.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
diff --git a/arch/powerpc/platforms/pseries/power.c b/arch/powerpc/platforms/pseries/power.c
index c26eadde434c..a4a0b57d1d81 100644
--- a/arch/powerpc/platforms/pseries/power.c
+++ b/arch/powerpc/platforms/pseries/power.c
@@ -27,6 +27,8 @@
#include <linux/init.h>
#include <asm/machdep.h>
+#include "pseries.h"
+
unsigned long rtas_poweron_auto; /* default and normal state is 0 */
static ssize_t auto_poweron_show(struct kobject *kobj,
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 7aa83f00ac62..b1be7b713fe6 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -20,31 +20,18 @@ extern void request_event_sources_irqs(struct device_node *np,
#include <linux/of.h>
-extern void __init fw_hypertas_feature_init(const char *hypertas,
- unsigned long len);
-extern void __init fw_vec5_feature_init(const char *hypertas,
- unsigned long len);
-
struct pt_regs;
extern int pSeries_system_reset_exception(struct pt_regs *regs);
extern int pSeries_machine_check_exception(struct pt_regs *regs);
#ifdef CONFIG_SMP
-extern void smp_init_pseries_mpic(void);
-extern void smp_init_pseries_xics(void);
+extern void smp_init_pseries(void);
#else
-static inline void smp_init_pseries_mpic(void) { };
-static inline void smp_init_pseries_xics(void) { };
+static inline void smp_init_pseries(void) { };
#endif
-#ifdef CONFIG_KEXEC
-extern void setup_kexec_cpu_down_xics(void);
-extern void setup_kexec_cpu_down_mpic(void);
-#else
-static inline void setup_kexec_cpu_down_xics(void) { }
-static inline void setup_kexec_cpu_down_mpic(void) { }
-#endif
+extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
extern void pSeries_final_fixup(void);
@@ -64,6 +51,8 @@ extern int dlpar_detach_node(struct device_node *);
extern int dlpar_acquire_drc(u32 drc_index);
extern int dlpar_release_drc(u32 drc_index);
+void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
+ struct completion *hotplug_done, int *rc);
#ifdef CONFIG_MEMORY_HOTPLUG
int dlpar_memory(struct pseries_hp_errorlog *hp_elog);
#else
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index 92767791f93b..164a13d3998a 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -208,19 +208,19 @@ static ssize_t percpu_deactivate_hint_show(struct device *dev,
* Per-cpu value of the hint
*/
-struct device_attribute attr_cpu_activate_hint_list =
+static struct device_attribute attr_cpu_activate_hint_list =
__ATTR(pseries_activate_hint_list, 0444,
cpu_activate_hint_list_show, NULL);
-struct device_attribute attr_cpu_deactivate_hint_list =
+static struct device_attribute attr_cpu_deactivate_hint_list =
__ATTR(pseries_deactivate_hint_list, 0444,
cpu_deactivate_hint_list_show, NULL);
-struct device_attribute attr_percpu_activate_hint =
+static struct device_attribute attr_percpu_activate_hint =
__ATTR(pseries_activate_hint, 0444,
percpu_activate_hint_show, NULL);
-struct device_attribute attr_percpu_deactivate_hint =
+static struct device_attribute attr_percpu_deactivate_hint =
__ATTR(pseries_deactivate_hint, 0444,
percpu_deactivate_hint_show, NULL);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 9a3e27b863ce..904a677208d1 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -43,6 +43,7 @@ static int ras_check_exception_token;
/* EPOW events counter variable */
static int num_epow_events;
+static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id);
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
@@ -65,6 +66,14 @@ static int __init init_ras_IRQ(void)
of_node_put(np);
}
+ /* Hotplug Events */
+ np = of_find_node_by_path("/event-sources/hot-plug-events");
+ if (np != NULL) {
+ request_event_sources_irqs(np, ras_hotplug_interrupt,
+ "RAS_HOTPLUG");
+ of_node_put(np);
+ }
+
/* EPOW Events */
np = of_find_node_by_path("/event-sources/epow-events");
if (np != NULL) {
@@ -190,6 +199,36 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log)
num_epow_events++;
}
+static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id)
+{
+ struct pseries_errorlog *pseries_log;
+ struct pseries_hp_errorlog *hp_elog;
+
+ spin_lock(&ras_log_buf_lock);
+
+ rtas_call(ras_check_exception_token, 6, 1, NULL,
+ RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq),
+ RTAS_HOTPLUG_EVENTS, 0, __pa(&ras_log_buf),
+ rtas_get_error_log_max());
+
+ pseries_log = get_pseries_errorlog((struct rtas_error_log *)ras_log_buf,
+ PSERIES_ELOG_SECT_ID_HOTPLUG);
+ hp_elog = (struct pseries_hp_errorlog *)pseries_log->data;
+
+ /*
+ * Since PCI hotplug is not currently supported on pseries, put PCI
+ * hotplug events on the ras_log_buf to be handled by rtas_errd.
+ */
+ if (hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_MEM ||
+ hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU)
+ queue_hotplug_event(hp_elog, NULL, NULL);
+ else
+ log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
+
+ spin_unlock(&ras_log_buf_lock);
+ return IRQ_HANDLED;
+}
+
/* Handle environmental and power warning (EPOW) interrupts. */
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
{
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 9883bc7ea007..4ffcaa6f8670 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -57,7 +57,6 @@
#include <asm/time.h>
#include <asm/nvram.h>
#include <asm/pmc.h>
-#include <asm/mpic.h>
#include <asm/xics.h>
#include <asm/ppc-pci.h>
#include <asm/i8259.h>
@@ -77,8 +76,6 @@ EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
-static struct device_node *pSeries_mpic_node;
-
static void pSeries_show_cpuinfo(struct seq_file *m)
{
struct device_node *root;
@@ -172,48 +169,7 @@ static void __init pseries_setup_i8259_cascade(void)
irq_set_chained_handler(cascade, pseries_8259_cascade);
}
-static void __init pseries_mpic_init_IRQ(void)
-{
- struct device_node *np;
- const unsigned int *opprop;
- unsigned long openpic_addr = 0;
- int naddr, n, i, opplen;
- struct mpic *mpic;
-
- np = of_find_node_by_path("/");
- naddr = of_n_addr_cells(np);
- opprop = of_get_property(np, "platform-open-pic", &opplen);
- if (opprop != NULL) {
- openpic_addr = of_read_number(opprop, naddr);
- printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
- }
- of_node_put(np);
-
- BUG_ON(openpic_addr == 0);
-
- /* Setup the openpic driver */
- mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
- MPIC_NO_RESET, 16, 0, " MPIC ");
- BUG_ON(mpic == NULL);
-
- /* Add ISUs */
- opplen /= sizeof(u32);
- for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
- unsigned long isuaddr = of_read_number(opprop + i, naddr);
- mpic_assign_isu(mpic, n, isuaddr);
- }
-
- /* Setup top-level get_irq */
- ppc_md.get_irq = mpic_get_irq;
-
- /* All ISUs are setup, complete initialization */
- mpic_init(mpic);
-
- /* Look for cascade */
- pseries_setup_i8259_cascade();
-}
-
-static void __init pseries_xics_init_IRQ(void)
+static void __init pseries_init_irq(void)
{
xics_init();
pseries_setup_i8259_cascade();
@@ -228,32 +184,6 @@ static void pseries_lpar_enable_pmcs(void)
plpar_hcall_norets(H_PERFMON, set, reset);
}
-static void __init pseries_discover_pic(void)
-{
- struct device_node *np;
- const char *typep;
-
- for_each_node_by_name(np, "interrupt-controller") {
- typep = of_get_property(np, "compatible", NULL);
- if (!typep)
- continue;
- if (strstr(typep, "open-pic")) {
- pSeries_mpic_node = of_node_get(np);
- ppc_md.init_IRQ = pseries_mpic_init_IRQ;
- setup_kexec_cpu_down_mpic();
- smp_init_pseries_mpic();
- return;
- } else if (strstr(typep, "ppc-xicp")) {
- ppc_md.init_IRQ = pseries_xics_init_IRQ;
- setup_kexec_cpu_down_xics();
- smp_init_pseries_xics();
- return;
- }
- }
- printk(KERN_ERR "pSeries_discover_pic: failed to recognize"
- " interrupt-controller\n");
-}
-
static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct of_reconfig_data *rd = data;
@@ -265,11 +195,8 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
case OF_RECONFIG_ATTACH_NODE:
parent = of_get_parent(np);
pdn = parent ? PCI_DN(parent) : NULL;
- if (pdn) {
- /* Create pdn and EEH device */
+ if (pdn)
pci_add_device_node_info(pdn->phb, np);
- eeh_dev_init(PCI_DN(np), pdn->phb);
- }
of_node_put(parent);
break;
@@ -367,7 +294,7 @@ static void pseries_lpar_idle(void)
{
/*
* Default handler to go into low thread priority and possibly
- * low power mode by cedeing processor to hypervisor
+ * low power mode by ceding processor to hypervisor
*/
/* Indicate to hypervisor that we are idle. */
@@ -392,15 +319,23 @@ static void pseries_lpar_idle(void)
* to ever be a problem in practice we can move this into a kernel thread to
* finish off the process later in boot.
*/
-long pSeries_enable_reloc_on_exc(void)
+void pseries_enable_reloc_on_exc(void)
{
long rc;
unsigned int delay, total_delay = 0;
while (1) {
rc = enable_reloc_on_exceptions();
- if (!H_IS_LONG_BUSY(rc))
- return rc;
+ if (!H_IS_LONG_BUSY(rc)) {
+ if (rc == H_P2) {
+ pr_info("Relocation on exceptions not"
+ " supported\n");
+ } else if (rc != H_SUCCESS) {
+ pr_warn("Unable to enable relocation"
+ " on exceptions: %ld\n", rc);
+ }
+ break;
+ }
delay = get_longbusy_msecs(rc);
total_delay += delay;
@@ -408,66 +343,81 @@ long pSeries_enable_reloc_on_exc(void)
pr_warn("Warning: Giving up waiting to enable "
"relocation on exceptions (%u msec)!\n",
total_delay);
- return rc;
+ return;
}
mdelay(delay);
}
}
-EXPORT_SYMBOL(pSeries_enable_reloc_on_exc);
+EXPORT_SYMBOL(pseries_enable_reloc_on_exc);
-long pSeries_disable_reloc_on_exc(void)
+void pseries_disable_reloc_on_exc(void)
{
long rc;
while (1) {
rc = disable_reloc_on_exceptions();
if (!H_IS_LONG_BUSY(rc))
- return rc;
+ break;
mdelay(get_longbusy_msecs(rc));
}
+ if (rc != H_SUCCESS)
+ pr_warning("Warning: Failed to disable relocation on "
+ "exceptions: %ld\n", rc);
}
-EXPORT_SYMBOL(pSeries_disable_reloc_on_exc);
+EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
#ifdef CONFIG_KEXEC
static void pSeries_machine_kexec(struct kimage *image)
{
- long rc;
-
- if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
- rc = pSeries_disable_reloc_on_exc();
- if (rc != H_SUCCESS)
- pr_warning("Warning: Failed to disable relocation on "
- "exceptions: %ld\n", rc);
- }
+ if (firmware_has_feature(FW_FEATURE_SET_MODE))
+ pseries_disable_reloc_on_exc();
default_machine_kexec(image);
}
#endif
#ifdef __LITTLE_ENDIAN__
-long pseries_big_endian_exceptions(void)
+void pseries_big_endian_exceptions(void)
{
long rc;
while (1) {
rc = enable_big_endian_exceptions();
if (!H_IS_LONG_BUSY(rc))
- return rc;
+ break;
mdelay(get_longbusy_msecs(rc));
}
+
+ /*
+ * At this point it is unlikely panic() will get anything
+ * out to the user, since this is called very late in kexec
+ * but at least this will stop us from continuing on further
+ * and creating an even more difficult to debug situation.
+ *
+ * There is a known problem when kdump'ing, if cpus are offline
+ * the above call will fail. Rather than panicking again, keep
+ * going and hope the kdump kernel is also little endian, which
+ * it usually is.
+ */
+ if (rc && !kdump_in_progress())
+ panic("Could not enable big endian exceptions");
}
-static long pseries_little_endian_exceptions(void)
+void pseries_little_endian_exceptions(void)
{
long rc;
while (1) {
rc = enable_little_endian_exceptions();
if (!H_IS_LONG_BUSY(rc))
- return rc;
+ break;
mdelay(get_longbusy_msecs(rc));
}
+ if (rc) {
+ ppc_md.progress("H_SET_MODE LE exception fail", 0);
+ panic("Could not enable little endian exceptions");
+ }
}
#endif
@@ -492,7 +442,6 @@ static void __init find_and_init_phbs(void)
}
of_node_put(root);
- pci_devs_phb_init();
/*
* PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
@@ -506,7 +455,8 @@ static void __init pSeries_setup_arch(void)
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
/* Discover PIC type and setup ppc_md accordingly */
- pseries_discover_pic();
+ smp_init_pseries();
+
/* openpic global configuration register (64-bit format). */
/* openpic Interrupt Source Unit pointer (64-bit format). */
@@ -537,18 +487,6 @@ static void __init pSeries_setup_arch(void)
}
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
-
- if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
- long rc;
-
- rc = pSeries_enable_reloc_on_exc();
- if (rc == H_P2) {
- pr_info("Relocation on exceptions not supported\n");
- } else if (rc != H_SUCCESS) {
- pr_warn("Unable to enable relocation on exceptions: "
- "%ld\n", rc);
- }
- }
}
static int __init pSeries_init_panel(void)
@@ -682,9 +620,9 @@ static void pSeries_cmo_feature_init(void)
/*
* Early initialization. Relocation is on but do not reference unbolted pages
*/
-static void __init pSeries_init_early(void)
+static void __init pseries_init(void)
{
- pr_debug(" -> pSeries_init_early()\n");
+ pr_debug(" -> pseries_init()\n");
#ifdef CONFIG_HVC_CONSOLE
if (firmware_has_feature(FW_FEATURE_LPAR))
@@ -701,7 +639,7 @@ static void __init pSeries_init_early(void)
pSeries_cmo_feature_init();
iommu_init_early_pSeries();
- pr_debug(" <- pSeries_init_early()\n");
+ pr_debug(" <- pseries_init()\n");
}
/**
@@ -732,49 +670,9 @@ static void pseries_power_off(void)
for (;;);
}
-/*
- * Called very early, MMU is off, device-tree isn't unflattened
- */
-
-static int __init pseries_probe_fw_features(unsigned long node,
- const char *uname, int depth,
- void *data)
-{
- const char *prop;
- int len;
- static int hypertas_found;
- static int vec5_found;
-
- if (depth != 1)
- return 0;
-
- if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
- prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
- &len);
- if (prop) {
- powerpc_firmware_features |= FW_FEATURE_LPAR;
- fw_hypertas_feature_init(prop, len);
- }
-
- hypertas_found = 1;
- }
-
- if (!strcmp(uname, "chosen")) {
- prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
- &len);
- if (prop)
- fw_vec5_feature_init(prop, len);
-
- vec5_found = 1;
- }
-
- return hypertas_found && vec5_found;
-}
-
static int __init pSeries_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- const char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
+ const char *dtype = of_get_property(of_root, "device_type", NULL);
if (dtype == NULL)
return 0;
@@ -784,41 +682,17 @@ static int __init pSeries_probe(void)
/* Cell blades firmware claims to be chrp while it's not. Until this
* is fixed, we need to avoid those here.
*/
- if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
- of_flat_dt_is_compatible(root, "IBM,CBEA"))
+ if (of_machine_is_compatible("IBM,CPBW-1.0") ||
+ of_machine_is_compatible("IBM,CBEA"))
return 0;
- pr_debug("pSeries detected, looking for LPAR capability...\n");
-
- /* Now try to figure out if we are running on LPAR */
- of_scan_flat_dt(pseries_probe_fw_features, NULL);
-
-#ifdef __LITTLE_ENDIAN__
- if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
- long rc;
- /*
- * Tell the hypervisor that we want our exceptions to
- * be taken in little endian mode. If this fails we don't
- * want to use BUG() because it will trigger an exception.
- */
- rc = pseries_little_endian_exceptions();
- if (rc) {
- ppc_md.progress("H_SET_MODE LE exception fail", 0);
- panic("Could not enable little endian exceptions");
- }
- }
-#endif
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
- hpte_init_lpar();
- else
- hpte_init_native();
-
pm_power_off = pseries_power_off;
pr_debug("Machine is%s LPAR !\n",
(powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
+ pseries_init();
+
return 1;
}
@@ -837,7 +711,7 @@ define_machine(pseries) {
.name = "pSeries",
.probe = pSeries_probe,
.setup_arch = pSeries_setup_arch,
- .init_early = pSeries_init_early,
+ .init_IRQ = pseries_init_irq,
.show_cpuinfo = pSeries_show_cpuinfo,
.log_error = pSeries_log_error,
.pcibios_fixup = pSeries_final_fixup,
@@ -853,6 +727,7 @@ define_machine(pseries) {
.machine_check_exception = pSeries_machine_check_exception,
#ifdef CONFIG_KEXEC
.machine_kexec = pSeries_machine_kexec,
+ .kexec_cpu_down = pseries_kexec_cpu_down,
#endif
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
.memory_block_size = pseries_memory_block_size,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 6932ea803e33..f6f83aeccaaa 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -38,7 +38,6 @@
#include <asm/cputable.h>
#include <asm/firmware.h>
#include <asm/rtas.h>
-#include <asm/mpic.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
@@ -140,7 +139,7 @@ out:
return 1;
}
-static void smp_xics_setup_cpu(int cpu)
+static void smp_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
xics_setup_cpu();
@@ -207,28 +206,22 @@ static __init void pSeries_smp_probe(void)
}
}
-static struct smp_ops_t pSeries_mpic_smp_ops = {
- .message_pass = smp_mpic_message_pass,
- .probe = smp_mpic_probe,
- .kick_cpu = smp_pSeries_kick_cpu,
- .setup_cpu = smp_mpic_setup_cpu,
-};
-
-static struct smp_ops_t pSeries_xics_smp_ops = {
+static struct smp_ops_t pseries_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
.probe = pSeries_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
- .setup_cpu = smp_xics_setup_cpu,
+ .setup_cpu = smp_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
};
/* This is called very early */
-static void __init smp_init_pseries(void)
+void __init smp_init_pseries(void)
{
int i;
pr_debug(" -> smp_init_pSeries()\n");
+ smp_ops = &pseries_smp_ops;
alloc_bootmem_cpumask_var(&of_spin_mask);
@@ -258,17 +251,3 @@ static void __init smp_init_pseries(void)
pr_debug(" <- smp_init_pSeries()\n");
}
-
-void __init smp_init_pseries_mpic(void)
-{
- smp_ops = &pSeries_mpic_smp_ops;
-
- smp_init_pseries();
-}
-
-void __init smp_init_pseries_xics(void)
-{
- smp_ops = &pSeries_xics_smp_ops;
-
- smp_init_pseries();
-}
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ff75d70f7285..9144204442eb 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -143,12 +143,12 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
*/
static long
axon_ram_direct_access(struct block_device *device, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn, long size)
+ void **kaddr, pfn_t *pfn, long size)
{
struct axon_ram_bank *bank = device->bd_disk->private_data;
loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
- *kaddr = (void __pmem __force *) bank->io_addr + offset;
+ *kaddr = (void *) bank->io_addr + offset;
*pfn = phys_to_pfn_t(bank->ph_addr + offset, PFN_DEV);
return bank->size - offset;
}
@@ -223,7 +223,6 @@ static int axon_ram_probe(struct platform_device *device)
bank->disk->first_minor = azfs_minor;
bank->disk->fops = &axon_ram_devops;
bank->disk->private_data = bank;
- bank->disk->driverfs_dev = &device->dev;
sprintf(bank->disk->disk_name, "%s%d",
AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
@@ -238,7 +237,7 @@ static int axon_ram_probe(struct platform_device *device)
set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
- add_disk(bank->disk);
+ device_add_disk(&device->dev, bank->disk);
bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
if (bank->irq_id == NO_IRQ) {
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 0ac12e5fd8ab..911456d17713 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -28,6 +28,7 @@
#include <asm/udbg.h>
#include <asm/io.h>
#include <asm/cpm.h>
+#include <asm/fixmap.h>
#include <soc/fsl/qe/qe.h>
#include <mm/mmu_decl.h>
@@ -37,25 +38,36 @@
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
-static u32 __iomem *cpm_udbg_txdesc =
- (u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;
+static u32 __iomem *cpm_udbg_txdesc;
+static u8 __iomem *cpm_udbg_txbuf;
static void udbg_putc_cpm(char c)
{
- u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);
-
if (c == '\n')
udbg_putc_cpm('\r');
while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000)
;
- out_8(txbuf, c);
+ out_8(cpm_udbg_txbuf, c);
out_be32(&cpm_udbg_txdesc[0], 0xa0000001);
}
void __init udbg_init_cpm(void)
{
+#ifdef CONFIG_PPC_8xx
+ cpm_udbg_txdesc = (u32 __iomem __force *)
+ (CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
+ VIRT_IMMR_BASE);
+ cpm_udbg_txbuf = (u8 __iomem __force *)
+ (in_be32(&cpm_udbg_txdesc[1]) - PHYS_IMMR_BASE +
+ VIRT_IMMR_BASE);
+#else
+ cpm_udbg_txdesc = (u32 __iomem __force *)
+ CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;
+ cpm_udbg_txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);
+#endif
+
if (cpm_udbg_txdesc) {
#ifdef CONFIG_CPM2
setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG);
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index b7348637eae0..26904f4879ec 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -48,16 +48,10 @@
#include "dart.h"
-/* Physical base address and size of the DART table */
-unsigned long dart_tablebase; /* exported to htab_initialize */
+/* DART table address and size */
+static u32 *dart_tablebase;
static unsigned long dart_tablesize;
-/* Virtual base address of the DART table */
-static u32 *dart_vbase;
-#ifdef CONFIG_PM
-static u32 *dart_copy;
-#endif
-
/* Mapped base address for the dart */
static unsigned int __iomem *dart;
@@ -151,6 +145,34 @@ wait_more:
spin_unlock_irqrestore(&invalidate_lock, flags);
}
+static void dart_cache_sync(unsigned int *base, unsigned int count)
+{
+ /*
+ * We add 1 to the number of entries to flush, following a
+ * comment in Darwin indicating that the memory controller
+ * can prefetch unmapped memory under some circumstances.
+ */
+ unsigned long start = (unsigned long)base;
+ unsigned long end = start + (count + 1) * sizeof(unsigned int);
+ unsigned int tmp;
+
+ /* Perform a standard cache flush */
+ flush_inval_dcache_range(start, end);
+
+ /*
+ * Perform the sequence described in the CPC925 manual to
+ * ensure all the data gets to a point the cache incoherent
+ * DART hardware will see.
+ */
+ asm volatile(" sync;"
+ " isync;"
+ " dcbf 0,%1;"
+ " sync;"
+ " isync;"
+ " lwz %0,0(%1);"
+ " isync" : "=r" (tmp) : "r" (end) : "memory");
+}
+
static void dart_flush(struct iommu_table *tbl)
{
mb();
@@ -165,13 +187,13 @@ static int dart_build(struct iommu_table *tbl, long index,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
- unsigned int *dp;
+ unsigned int *dp, *orig_dp;
unsigned int rpn;
long l;
DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
- dp = ((unsigned int*)tbl->it_base) + index;
+ orig_dp = dp = ((unsigned int*)tbl->it_base) + index;
/* On U3, all memory is contiguous, so we can move this
* out of the loop.
@@ -184,11 +206,7 @@ static int dart_build(struct iommu_table *tbl, long index,
uaddr += DART_PAGE_SIZE;
}
-
- /* make sure all updates have reached memory */
- mb();
- in_be32((unsigned __iomem *)dp);
- mb();
+ dart_cache_sync(orig_dp, npages);
if (dart_is_u4) {
rpn = index;
@@ -203,7 +221,8 @@ static int dart_build(struct iommu_table *tbl, long index,
static void dart_free(struct iommu_table *tbl, long index, long npages)
{
- unsigned int *dp;
+ unsigned int *dp, *orig_dp;
+ long orig_npages = npages;
/* We don't worry about flushing the TLB cache. The only drawback of
* not doing it is that we won't catch buggy device drivers doing
@@ -212,34 +231,30 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
DBG("dart: free at: %lx, %lx\n", index, npages);
- dp = ((unsigned int *)tbl->it_base) + index;
+ orig_dp = dp = ((unsigned int *)tbl->it_base) + index;
while (npages--)
*(dp++) = dart_emptyval;
-}
+ dart_cache_sync(orig_dp, orig_npages);
+}
-static int __init dart_init(struct device_node *dart_node)
+static void allocate_dart(void)
{
- unsigned int i;
- unsigned long tmp, base, size;
- struct resource r;
-
- if (dart_tablebase == 0 || dart_tablesize == 0) {
- printk(KERN_INFO "DART: table not allocated, using "
- "direct DMA\n");
- return -ENODEV;
- }
+ unsigned long tmp;
- if (of_address_to_resource(dart_node, 0, &r))
- panic("DART: can't get register base ! ");
+ /* 512 pages (2MB) is max DART tablesize. */
+ dart_tablesize = 1UL << 21;
- /* Make sure nothing from the DART range remains in the CPU cache
- * from a previous mapping that existed before the kernel took
- * over
+ /*
+ * 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we
+ * will blow up an entire large page anyway in the kernel mapping.
*/
- flush_dcache_phys_range(dart_tablebase,
- dart_tablebase + dart_tablesize);
+ dart_tablebase = __va(memblock_alloc_base(1UL<<24,
+ 1UL<<24, 0x80000000L));
+
+ /* There is no point scanning the DART space for leaks*/
+ kmemleak_no_scan((void *)dart_tablebase);
/* Allocate a spare page to map all invalid DART pages. We need to do
* that to work around what looks like a problem with the HT bridge
@@ -249,20 +264,51 @@ static int __init dart_init(struct device_node *dart_node)
dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
DARTMAP_RPNMASK);
+ printk(KERN_INFO "DART table allocated at: %p\n", dart_tablebase);
+}
+
+static int __init dart_init(struct device_node *dart_node)
+{
+ unsigned int i;
+ unsigned long base, size;
+ struct resource r;
+
+ /* IOMMU disabled by the user ? bail out */
+ if (iommu_is_off)
+ return -ENODEV;
+
+ /*
+ * Only use the DART if the machine has more than 1GB of RAM
+ * or if requested with iommu=on on cmdline.
+ *
+ * 1GB of RAM is picked as limit because some default devices
+ * (i.e. Airport Extreme) have 30 bit address range limits.
+ */
+
+ if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull)
+ return -ENODEV;
+
+ /* Get DART registers */
+ if (of_address_to_resource(dart_node, 0, &r))
+ panic("DART: can't get register base ! ");
+
/* Map in DART registers */
dart = ioremap(r.start, resource_size(&r));
if (dart == NULL)
panic("DART: Cannot map registers!");
- /* Map in DART table */
- dart_vbase = ioremap(__pa(dart_tablebase), dart_tablesize);
+ /* Allocate the DART and dummy page */
+ allocate_dart();
/* Fill initial table */
for (i = 0; i < dart_tablesize/4; i++)
- dart_vbase[i] = dart_emptyval;
+ dart_tablebase[i] = dart_emptyval;
+
+ /* Push to memory */
+ dart_cache_sync(dart_tablebase, dart_tablesize / sizeof(u32));
/* Initialize DART with table base and enable it. */
- base = dart_tablebase >> DART_PAGE_SHIFT;
+ base = ((unsigned long)dart_tablebase) >> DART_PAGE_SHIFT;
size = dart_tablesize >> DART_PAGE_SHIFT;
if (dart_is_u4) {
size &= DART_SIZE_U4_SIZE_MASK;
@@ -301,7 +347,7 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Initialize the common IOMMU code */
- iommu_table_dart.it_base = (unsigned long)dart_vbase;
+ iommu_table_dart.it_base = (unsigned long)dart_tablebase;
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
iommu_table_dart.it_ops = &iommu_dart_ops;
@@ -404,75 +450,21 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
}
#ifdef CONFIG_PM
-static void iommu_dart_save(void)
-{
- memcpy(dart_copy, dart_vbase, 2*1024*1024);
-}
-
static void iommu_dart_restore(void)
{
- memcpy(dart_vbase, dart_copy, 2*1024*1024);
+ dart_cache_sync(dart_tablebase, dart_tablesize / sizeof(u32));
dart_tlb_invalidate_all();
}
static int __init iommu_init_late_dart(void)
{
- unsigned long tbasepfn;
- struct page *p;
-
- /* if no dart table exists then we won't need to save it
- * and the area has also not been reserved */
if (!dart_tablebase)
return 0;
- tbasepfn = __pa(dart_tablebase) >> PAGE_SHIFT;
- register_nosave_region_late(tbasepfn,
- tbasepfn + ((1<<24) >> PAGE_SHIFT));
-
- /* For suspend we need to copy the dart contents because
- * it is not part of the regular mapping (see above) and
- * thus not saved automatically. The memory for this copy
- * must be allocated early because we need 2 MB. */
- p = alloc_pages(GFP_KERNEL, 21 - PAGE_SHIFT);
- BUG_ON(!p);
- dart_copy = page_address(p);
-
- ppc_md.iommu_save = iommu_dart_save;
ppc_md.iommu_restore = iommu_dart_restore;
return 0;
}
late_initcall(iommu_init_late_dart);
-#endif
-
-void __init alloc_dart_table(void)
-{
- /* Only reserve DART space if machine has more than 1GB of RAM
- * or if requested with iommu=on on cmdline.
- *
- * 1GB of RAM is picked as limit because some default devices
- * (i.e. Airport Extreme) have 30 bit address range limits.
- */
-
- if (iommu_is_off)
- return;
-
- if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull)
- return;
-
- /* 512 pages (2MB) is max DART tablesize. */
- dart_tablesize = 1UL << 21;
- /* 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we
- * will blow up an entire large page anyway in the kernel mapping
- */
- dart_tablebase = (unsigned long)
- __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
- /*
- * The DART space is later unmapped from the kernel linear mapping and
- * accessing dart_tablebase during kmemleak scanning will fault.
- */
- kmemleak_no_scan((void *)dart_tablebase);
-
- printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
-}
+#endif /* CONFIG_PM */
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index 861cebf9c292..c27058e5df26 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -90,12 +90,8 @@ static int mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
}
l2cache_size = *prop;
- if (get_cache_sram_params(&sram_params)) {
- dev_err(&dev->dev,
- "Entire L2 as cache, provide valid sram offset and size\n");
- return -EINVAL;
- }
-
+ if (get_cache_sram_params(&sram_params))
+ return 0; /* fall back to L2 cache only */
rem = l2cache_size % sram_params.sram_size;
ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size;
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 99269c041615..a09ca704de58 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -204,7 +204,7 @@ static int __init setup_rstcr(void)
arch_initcall(setup_rstcr);
-void fsl_rstcr_restart(char *cmd)
+void __noreturn fsl_rstcr_restart(char *cmd)
{
local_irq_disable();
if (rstcr)
@@ -228,10 +228,11 @@ EXPORT_SYMBOL(diu_ops);
* to initiate a partition restart when we're running under the Freescale
* hypervisor.
*/
-void fsl_hv_restart(char *cmd)
+void __noreturn fsl_hv_restart(char *cmd)
{
pr_info("hv restart\n");
fh_partition_restart(-1);
+ while (1) ;
}
/*
@@ -241,9 +242,10 @@ void fsl_hv_restart(char *cmd)
* function pointers, to shut down the partition when we're running under
* the Freescale hypervisor.
*/
-void fsl_hv_halt(void)
+void __noreturn fsl_hv_halt(void)
{
pr_info("hv exit\n");
fh_partition_stop(-1);
+ while (1) ;
}
#endif
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 4c5a19ef4f0b..433566a5ef19 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -19,7 +19,7 @@ extern u32 fsl_get_sys_freq(void);
struct spi_board_info;
struct device_node;
-extern void fsl_rstcr_restart(char *cmd);
+extern void __noreturn fsl_rstcr_restart(char *cmd);
/* The different ports that the DIU can be connected to */
enum fsl_diu_monitor_port {
@@ -42,8 +42,8 @@ struct platform_diu_data_ops {
extern struct platform_diu_data_ops diu_ops;
-void fsl_hv_restart(char *cmd);
-void fsl_hv_halt(void);
+void __noreturn fsl_hv_restart(char *cmd);
+void __noreturn fsl_hv_halt(void);
#endif
#endif
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
index c606aa8ba60a..5d7f5a6564de 100644
--- a/arch/powerpc/sysdev/xics/Makefile
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -4,4 +4,4 @@ obj-y += xics-common.o
obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
-obj-$(CONFIG_PPC_POWERNV) += ics-opal.o
+obj-$(CONFIG_PPC_POWERNV) += ics-opal.o icp-opal.o
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
new file mode 100644
index 000000000000..57d72f10a97f
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2016 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+#include <asm/io.h>
+#include <asm/opal.h>
+
+static void icp_opal_teardown_cpu(void)
+{
+ int cpu = smp_processor_id();
+
+ /* Clear any pending IPI */
+ opal_int_set_mfrr(cpu, 0xff);
+}
+
+static void icp_opal_flush_ipi(void)
+{
+ /*
+ * We take the ipi irq but and never return so we need to EOI the IPI,
+ * but want to leave our priority 0.
+ *
+ * Should we check all the other interrupts too?
+ * Should we be flagging idle loop instead?
+ * Or creating some task to be scheduled?
+ */
+ opal_int_eoi((0x00 << 24) | XICS_IPI);
+}
+
+static unsigned int icp_opal_get_irq(void)
+{
+ unsigned int xirr;
+ unsigned int vec;
+ unsigned int irq;
+ int64_t rc;
+
+ rc = opal_int_get_xirr(&xirr, false);
+ if (rc < 0)
+ return NO_IRQ;
+ xirr = be32_to_cpu(xirr);
+ vec = xirr & 0x00ffffff;
+ if (vec == XICS_IRQ_SPURIOUS)
+ return NO_IRQ;
+
+ irq = irq_find_mapping(xics_host, vec);
+ if (likely(irq != NO_IRQ)) {
+ xics_push_cppr(vec);
+ return irq;
+ }
+
+ /* We don't have a linux mapping, so have rtas mask it. */
+ xics_mask_unknown_vec(vec);
+
+ /* We might learn about it later, so EOI it */
+ opal_int_eoi(xirr);
+
+ return NO_IRQ;
+}
+
+static void icp_opal_set_cpu_priority(unsigned char cppr)
+{
+ xics_set_base_cppr(cppr);
+ opal_int_set_cppr(cppr);
+ iosync();
+}
+
+static void icp_opal_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int64_t rc;
+
+ iosync();
+ rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
+
+ /*
+ * EOI tells us whether there are more interrupts to fetch.
+ *
+ * Some HW implementations might not be able to send us another
+ * external interrupt in that case, so we force a replay.
+ */
+ if (rc > 0)
+ force_external_irq_replay();
+}
+
+#ifdef CONFIG_SMP
+
+static void icp_opal_cause_ipi(int cpu, unsigned long data)
+{
+ opal_int_set_mfrr(cpu, IPI_PRIORITY);
+}
+
+static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+
+ opal_int_set_mfrr(cpu, 0xff);
+
+ return smp_ipi_demux();
+}
+
+#endif /* CONFIG_SMP */
+
+static const struct icp_ops icp_opal_ops = {
+ .get_irq = icp_opal_get_irq,
+ .eoi = icp_opal_eoi,
+ .set_priority = icp_opal_set_cpu_priority,
+ .teardown_cpu = icp_opal_teardown_cpu,
+ .flush_ipi = icp_opal_flush_ipi,
+#ifdef CONFIG_SMP
+ .ipi_action = icp_opal_ipi_action,
+ .cause_ipi = icp_opal_cause_ipi,
+#endif
+};
+
+int icp_opal_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
+ if (!np)
+ return -ENODEV;
+
+ icp_ops = &icp_opal_ops;
+
+ printk("XICS: Using OPAL ICP fallbacks\n");
+
+ return 0;
+}
+
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 47e43b7b076b..a795a5f0301c 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -404,8 +404,11 @@ void __init xics_init(void)
/* Fist locate ICP */
if (firmware_has_feature(FW_FEATURE_LPAR))
rc = icp_hv_init();
- if (rc < 0)
+ if (rc < 0) {
rc = icp_native_init();
+ if (rc == -ENODEV)
+ rc = icp_opal_init();
+ }
if (rc < 0) {
pr_warning("XICS: Cannot find a Presentation Controller !\n");
return;
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index c5e155108be5..760545519a0b 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -184,9 +184,6 @@ static void dump_tlb_book3e(void);
static int xmon_no_auto_backtrace;
-extern void xmon_enter(void);
-extern void xmon_leave(void);
-
#ifdef CONFIG_PPC64
#define REG "%.16lx"
#else
@@ -1685,9 +1682,78 @@ write_spr(int n, unsigned long val)
catch_spr_faults = 0;
}
-static unsigned long regno;
-extern char exc_prolog;
-extern char dec_exc;
+static void dump_206_sprs(void)
+{
+#ifdef CONFIG_PPC64
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ return;
+
+ /* Actually some of these pre-date 2.06, but whatevs */
+
+ printf("srr0 = %.16x srr1 = %.16x dsisr = %.8x\n",
+ mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR));
+ printf("dscr = %.16x ppr = %.16x pir = %.8x\n",
+ mfspr(SPRN_DSCR), mfspr(SPRN_PPR), mfspr(SPRN_PIR));
+
+ if (!(mfmsr() & MSR_HV))
+ return;
+
+ printf("sdr1 = %.16x hdar = %.16x hdsisr = %.8x\n",
+ mfspr(SPRN_SDR1), mfspr(SPRN_HDAR), mfspr(SPRN_HDSISR));
+ printf("hsrr0 = %.16x hsrr1 = %.16x hdec = %.8x\n",
+ mfspr(SPRN_HSRR0), mfspr(SPRN_HSRR1), mfspr(SPRN_HDEC));
+ printf("lpcr = %.16x pcr = %.16x lpidr = %.8x\n",
+ mfspr(SPRN_LPCR), mfspr(SPRN_PCR), mfspr(SPRN_LPID));
+ printf("hsprg0 = %.16x hsprg1 = %.16x\n",
+ mfspr(SPRN_HSPRG0), mfspr(SPRN_HSPRG1));
+ printf("dabr = %.16x dabrx = %.16x\n",
+ mfspr(SPRN_DABR), mfspr(SPRN_DABRX));
+#endif
+}
+
+static void dump_207_sprs(void)
+{
+#ifdef CONFIG_PPC64
+ unsigned long msr;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return;
+
+ printf("dpdes = %.16x tir = %.16x cir = %.8x\n",
+ mfspr(SPRN_DPDES), mfspr(SPRN_TIR), mfspr(SPRN_CIR));
+
+ printf("fscr = %.16x tar = %.16x pspb = %.8x\n",
+ mfspr(SPRN_FSCR), mfspr(SPRN_TAR), mfspr(SPRN_PSPB));
+
+ msr = mfmsr();
+ if (msr & MSR_TM) {
+ /* Only if TM has been enabled in the kernel */
+ printf("tfhar = %.16x tfiar = %.16x texasr = %.16x\n",
+ mfspr(SPRN_TFHAR), mfspr(SPRN_TFIAR),
+ mfspr(SPRN_TEXASR));
+ }
+
+ printf("mmcr0 = %.16x mmcr1 = %.16x mmcr2 = %.16x\n",
+ mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCR2));
+ printf("pmc1 = %.8x pmc2 = %.8x pmc3 = %.8x pmc4 = %.8x\n",
+ mfspr(SPRN_PMC1), mfspr(SPRN_PMC2),
+ mfspr(SPRN_PMC3), mfspr(SPRN_PMC4));
+ printf("mmcra = %.16x siar = %.16x pmc5 = %.8x\n",
+ mfspr(SPRN_MMCRA), mfspr(SPRN_SIAR), mfspr(SPRN_PMC5));
+ printf("sdar = %.16x sier = %.16x pmc6 = %.8x\n",
+ mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6));
+ printf("ebbhr = %.16x ebbrr = %.16x bescr = %.16x\n",
+ mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
+
+ if (!(msr & MSR_HV))
+ return;
+
+ printf("hfscr = %.16x dhdes = %.16x rpr = %.16x\n",
+ mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR));
+ printf("dawr = %.16x dawrx = %.16x ciabr = %.16x\n",
+ mfspr(SPRN_DAWR), mfspr(SPRN_DAWRX), mfspr(SPRN_CIABR));
+#endif
+}
static void dump_one_spr(int spr, bool show_unimplemented)
{
@@ -1719,6 +1785,7 @@ static void dump_one_spr(int spr, bool show_unimplemented)
static void super_regs(void)
{
+ static unsigned long regno;
int cmd;
int spr;
@@ -1730,14 +1797,18 @@ static void super_regs(void)
asm("mr %0,1" : "=r" (sp) :);
asm("mr %0,2" : "=r" (toc) :);
- printf("msr = "REG" sprg0= "REG"\n",
+ printf("msr = "REG" sprg0 = "REG"\n",
mfmsr(), mfspr(SPRN_SPRG0));
- printf("pvr = "REG" sprg1= "REG"\n",
+ printf("pvr = "REG" sprg1 = "REG"\n",
mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
- printf("dec = "REG" sprg2= "REG"\n",
+ printf("dec = "REG" sprg2 = "REG"\n",
mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
- printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
- printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
+ printf("sp = "REG" sprg3 = "REG"\n", sp, mfspr(SPRN_SPRG3));
+ printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
+
+ dump_206_sprs();
+ dump_207_sprs();
+
return;
}
case 'w': {
@@ -2213,13 +2284,13 @@ static void dump_one_paca(int cpu)
DUMP(p, subcore_sibling_mask, "x");
#endif
- DUMP(p, user_time, "llx");
- DUMP(p, system_time, "llx");
- DUMP(p, user_time_scaled, "llx");
- DUMP(p, starttime, "llx");
- DUMP(p, starttime_user, "llx");
- DUMP(p, startspurr, "llx");
- DUMP(p, utime_sspurr, "llx");
+ DUMP(p, accounting.user_time, "llx");
+ DUMP(p, accounting.system_time, "llx");
+ DUMP(p, accounting.user_time_scaled, "llx");
+ DUMP(p, accounting.starttime, "llx");
+ DUMP(p, accounting.starttime_user, "llx");
+ DUMP(p, accounting.startspurr, "llx");
+ DUMP(p, accounting.utime_sspurr, "llx");
DUMP(p, stolen_time, "llx");
#undef DUMP
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a8c259059adf..9e607bf2d640 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -72,6 +72,7 @@ config S390
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_KCOV
select ARCH_HAS_SG_CHAIN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
@@ -163,6 +164,7 @@ config S390
select NO_BOOTMEM
select OLD_SIGACTION
select OLD_SIGSUSPEND3
+ select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select TTY
select VIRT_CPU_ACCOUNTING
@@ -477,6 +479,9 @@ config SCHED_MC
config SCHED_BOOK
def_bool n
+config SCHED_DRAWER
+ def_bool n
+
config SCHED_TOPOLOGY
def_bool y
prompt "Topology scheduler support"
@@ -484,6 +489,7 @@ config SCHED_TOPOLOGY
select SCHED_SMT
select SCHED_MC
select SCHED_BOOK
+ select SCHED_DRAWER
help
Topology scheduler support improves the CPU scheduler's decision
making when dealing with machines that have multi-threading,
@@ -605,16 +611,6 @@ config PCI_NR_FUNCTIONS
This allows you to specify the maximum number of PCI functions which
this kernel will support.
-config PCI_NR_MSI
- int "Maximum number of MSI interrupts (64-32768)"
- range 64 32768
- default "256"
- help
- This defines the number of virtual interrupts the kernel will
- provide for MSI interrupts. If you configure your system to have
- too few drivers will fail to allocate MSI interrupts for all
- PCI devices.
-
source "drivers/pci/Kconfig"
endif # PCI
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index edcf2a706942..598df5708501 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -102,7 +102,7 @@ static void appldata_get_mem_data(void *data)
mem_data->totalhigh = P2K(val.totalhigh);
mem_data->freehigh = P2K(val.freehigh);
mem_data->bufferram = P2K(val.bufferram);
- mem_data->cached = P2K(global_page_state(NR_FILE_PAGES)
+ mem_data->cached = P2K(global_node_page_state(NR_FILE_PAGES)
- val.bufferram);
si_swapinfo(&val);
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 1dd210347e12..98ec652cc332 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -4,6 +4,8 @@
# create a compressed vmlinux image from the original vmlinux
#
+KCOV_INSTRUMENT := n
+
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head.o
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 0ac42cc4f880..889ea3450210 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -1,8 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
@@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
@@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
+CONFIG_ZPOOL=m
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_PCI_DEBUG=y
CONFIG_HOTPLUG_PCI=y
@@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
@@ -537,6 +545,8 @@ CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
CONFIG_FRAME_WARN=1024
CONFIG_READABLE_ASM=y
CONFIG_UNUSED_SYMBOLS=y
@@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_VMACACHE=y
CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
@@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
@@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
@@ -664,7 +678,8 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index a31dcd56f7c0..1bcfd764910a 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -1,8 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
@@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
@@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_KPROBE_EVENT is not set
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_RBTREE_TEST=m
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
@@ -610,7 +616,8 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 7b73bf353345..13ff090139c8 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -1,8 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
@@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
@@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
@@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
@@ -607,7 +615,8 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_CRYPTO_CRC32_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1719843a55a2..4366a3e3e754 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,5 +1,5 @@
# CONFIG_SWAP is not set
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
@@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
-# CONFIG_STRICT_DEVMEM is not set
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS_FS is not set
# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 7f0b7cda6259..d1033de4c4ee 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -9,3 +9,6 @@ obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
+obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
+
+crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 7554a8bb2adc..2ea18b050309 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -22,6 +22,7 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
@@ -44,7 +45,7 @@ struct s390_aes_ctx {
long dec;
int key_len;
union {
- struct crypto_blkcipher *blk;
+ struct crypto_skcipher *blk;
struct crypto_cipher *cip;
} fallback;
};
@@ -63,7 +64,7 @@ struct s390_xts_ctx {
long enc;
long dec;
int key_len;
- struct crypto_blkcipher *fallback;
+ struct crypto_skcipher *fallback;
};
/*
@@ -237,16 +238,16 @@ static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
- CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
+
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
+ CRYPTO_TFM_RES_MASK;
- ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
return ret;
}
@@ -255,15 +256,17 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
unsigned int nbytes)
{
unsigned int ret;
- struct crypto_blkcipher *tfm;
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_blkcipher *tfm = desc->tfm;
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
+ SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
- tfm = desc->tfm;
- desc->tfm = sctx->fallback.blk;
+ skcipher_request_set_tfm(req, sctx->fallback.blk);
+ skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+ ret = crypto_skcipher_decrypt(req);
- desc->tfm = tfm;
+ skcipher_request_zero(req);
return ret;
}
@@ -272,15 +275,15 @@ static int fallback_blk_enc(struct blkcipher_desc *desc,
unsigned int nbytes)
{
unsigned int ret;
- struct crypto_blkcipher *tfm;
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_blkcipher *tfm = desc->tfm;
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
+ SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
- tfm = desc->tfm;
- desc->tfm = sctx->fallback.blk;
+ skcipher_request_set_tfm(req, sctx->fallback.blk);
+ skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
-
- desc->tfm = tfm;
+ ret = crypto_skcipher_encrypt(req);
return ret;
}
@@ -370,8 +373,9 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
const char *name = tfm->__crt_alg->cra_name;
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.blk)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
@@ -386,8 +390,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- crypto_free_blkcipher(sctx->fallback.blk);
- sctx->fallback.blk = NULL;
+ crypto_free_skcipher(sctx->fallback.blk);
}
static struct crypto_alg ecb_aes_alg = {
@@ -536,16 +539,16 @@ static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
- CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
+
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
+ CRYPTO_TFM_RES_MASK;
- ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
return ret;
}
@@ -553,16 +556,18 @@ static int xts_fallback_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
- struct crypto_blkcipher *tfm;
+ struct crypto_blkcipher *tfm = desc->tfm;
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
+ SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
unsigned int ret;
- tfm = desc->tfm;
- desc->tfm = xts_ctx->fallback;
+ skcipher_request_set_tfm(req, xts_ctx->fallback);
+ skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+ ret = crypto_skcipher_decrypt(req);
- desc->tfm = tfm;
+ skcipher_request_zero(req);
return ret;
}
@@ -570,16 +575,18 @@ static int xts_fallback_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
- struct crypto_blkcipher *tfm;
+ struct crypto_blkcipher *tfm = desc->tfm;
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
+ SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
unsigned int ret;
- tfm = desc->tfm;
- desc->tfm = xts_ctx->fallback;
+ skcipher_request_set_tfm(req, xts_ctx->fallback);
+ skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+ ret = crypto_skcipher_encrypt(req);
- desc->tfm = tfm;
+ skcipher_request_zero(req);
return ret;
}
@@ -700,8 +707,9 @@ static int xts_fallback_init(struct crypto_tfm *tfm)
const char *name = tfm->__crt_alg->cra_name;
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(xts_ctx->fallback)) {
pr_err("Allocating XTS fallback algorithm %s failed\n",
@@ -715,8 +723,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
{
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- crypto_free_blkcipher(xts_ctx->fallback);
- xts_ctx->fallback = NULL;
+ crypto_free_skcipher(xts_ctx->fallback);
}
static struct crypto_alg xts_aes_alg = {
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
new file mode 100644
index 000000000000..577ae1d4ae89
--- /dev/null
+++ b/arch/s390/crypto/crc32-vx.c
@@ -0,0 +1,310 @@
+/*
+ * Crypto-API module for CRC-32 algorithms implemented with the
+ * z/Architecture Vector Extension Facility.
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+#define KMSG_COMPONENT "crc32-vx"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/crc32.h>
+#include <crypto/internal/hash.h>
+#include <asm/fpu/api.h>
+
+
+#define CRC32_BLOCK_SIZE 1
+#define CRC32_DIGEST_SIZE 4
+
+#define VX_MIN_LEN 64
+#define VX_ALIGNMENT 16L
+#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
+
+struct crc_ctx {
+ u32 key;
+};
+
+struct crc_desc_ctx {
+ u32 crc;
+};
+
+/* Prototypes for functions in assembly files */
+u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+
+/*
+ * DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
+ *
+ * Creates a function to perform a particular CRC-32 computation. Depending
+ * on the message buffer, the hardware-accelerated or software implementation
+ * is used. Note that the message buffer is aligned to improve fetch
+ * operations of VECTOR LOAD MULTIPLE instructions.
+ *
+ */
+#define DEFINE_CRC32_VX(___fname, ___crc32_vx, ___crc32_sw) \
+ static u32 __pure ___fname(u32 crc, \
+ unsigned char const *data, size_t datalen) \
+ { \
+ struct kernel_fpu vxstate; \
+ unsigned long prealign, aligned, remaining; \
+ \
+ if ((unsigned long)data & VX_ALIGN_MASK) { \
+ prealign = VX_ALIGNMENT - \
+ ((unsigned long)data & VX_ALIGN_MASK); \
+ datalen -= prealign; \
+ crc = ___crc32_sw(crc, data, prealign); \
+ data = (void *)((unsigned long)data + prealign); \
+ } \
+ \
+ if (datalen < VX_MIN_LEN) \
+ return ___crc32_sw(crc, data, datalen); \
+ \
+ aligned = datalen & ~VX_ALIGN_MASK; \
+ remaining = datalen & VX_ALIGN_MASK; \
+ \
+ kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \
+ crc = ___crc32_vx(crc, data, aligned); \
+ kernel_fpu_end(&vxstate); \
+ \
+ if (remaining) \
+ crc = ___crc32_sw(crc, data + aligned, remaining); \
+ \
+ return crc; \
+ }
+
+DEFINE_CRC32_VX(crc32_le_vx, crc32_le_vgfm_16, crc32_le)
+DEFINE_CRC32_VX(crc32_be_vx, crc32_be_vgfm_16, crc32_be)
+DEFINE_CRC32_VX(crc32c_le_vx, crc32c_le_vgfm_16, __crc32c_le)
+
+
+static int crc32_vx_cra_init_zero(struct crypto_tfm *tfm)
+{
+ struct crc_ctx *mctx = crypto_tfm_ctx(tfm);
+
+ mctx->key = 0;
+ return 0;
+}
+
+static int crc32_vx_cra_init_invert(struct crypto_tfm *tfm)
+{
+ struct crc_ctx *mctx = crypto_tfm_ctx(tfm);
+
+ mctx->key = ~0;
+ return 0;
+}
+
+static int crc32_vx_init(struct shash_desc *desc)
+{
+ struct crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = mctx->key;
+ return 0;
+}
+
+static int crc32_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
+ unsigned int newkeylen)
+{
+ struct crc_ctx *mctx = crypto_shash_ctx(tfm);
+
+ if (newkeylen != sizeof(mctx->key)) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ mctx->key = le32_to_cpu(*(__le32 *)newkey);
+ return 0;
+}
+
+static int crc32be_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
+ unsigned int newkeylen)
+{
+ struct crc_ctx *mctx = crypto_shash_ctx(tfm);
+
+ if (newkeylen != sizeof(mctx->key)) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ mctx->key = be32_to_cpu(*(__be32 *)newkey);
+ return 0;
+}
+
+static int crc32le_vx_final(struct shash_desc *desc, u8 *out)
+{
+ struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ *(__le32 *)out = cpu_to_le32p(&ctx->crc);
+ return 0;
+}
+
+static int crc32be_vx_final(struct shash_desc *desc, u8 *out)
+{
+ struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ *(__be32 *)out = cpu_to_be32p(&ctx->crc);
+ return 0;
+}
+
+static int crc32c_vx_final(struct shash_desc *desc, u8 *out)
+{
+ struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ /*
+ * Perform a final XOR with 0xFFFFFFFF to be in sync
+ * with the generic crc32c shash implementation.
+ */
+ *(__le32 *)out = ~cpu_to_le32p(&ctx->crc);
+ return 0;
+}
+
+static int __crc32le_vx_finup(u32 *crc, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ *(__le32 *)out = cpu_to_le32(crc32_le_vx(*crc, data, len));
+ return 0;
+}
+
+static int __crc32be_vx_finup(u32 *crc, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ *(__be32 *)out = cpu_to_be32(crc32_be_vx(*crc, data, len));
+ return 0;
+}
+
+static int __crc32c_vx_finup(u32 *crc, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ /*
+ * Perform a final XOR with 0xFFFFFFFF to be in sync
+ * with the generic crc32c shash implementation.
+ */
+ *(__le32 *)out = ~cpu_to_le32(crc32c_le_vx(*crc, data, len));
+ return 0;
+}
+
+
+#define CRC32_VX_FINUP(alg, func) \
+ static int alg ## _vx_finup(struct shash_desc *desc, const u8 *data, \
+ unsigned int datalen, u8 *out) \
+ { \
+ return __ ## alg ## _vx_finup(shash_desc_ctx(desc), \
+ data, datalen, out); \
+ }
+
+CRC32_VX_FINUP(crc32le, crc32_le_vx)
+CRC32_VX_FINUP(crc32be, crc32_be_vx)
+CRC32_VX_FINUP(crc32c, crc32c_le_vx)
+
+#define CRC32_VX_DIGEST(alg, func) \
+ static int alg ## _vx_digest(struct shash_desc *desc, const u8 *data, \
+ unsigned int len, u8 *out) \
+ { \
+ return __ ## alg ## _vx_finup(crypto_shash_ctx(desc->tfm), \
+ data, len, out); \
+ }
+
+CRC32_VX_DIGEST(crc32le, crc32_le_vx)
+CRC32_VX_DIGEST(crc32be, crc32_be_vx)
+CRC32_VX_DIGEST(crc32c, crc32c_le_vx)
+
+#define CRC32_VX_UPDATE(alg, func) \
+ static int alg ## _vx_update(struct shash_desc *desc, const u8 *data, \
+ unsigned int datalen) \
+ { \
+ struct crc_desc_ctx *ctx = shash_desc_ctx(desc); \
+ ctx->crc = func(ctx->crc, data, datalen); \
+ return 0; \
+ }
+
+CRC32_VX_UPDATE(crc32le, crc32_le_vx)
+CRC32_VX_UPDATE(crc32be, crc32_be_vx)
+CRC32_VX_UPDATE(crc32c, crc32c_le_vx)
+
+
+static struct shash_alg crc32_vx_algs[] = {
+ /* CRC-32 LE */
+ {
+ .init = crc32_vx_init,
+ .setkey = crc32_vx_setkey,
+ .update = crc32le_vx_update,
+ .final = crc32le_vx_final,
+ .finup = crc32le_vx_finup,
+ .digest = crc32le_vx_digest,
+ .descsize = sizeof(struct crc_desc_ctx),
+ .digestsize = CRC32_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "crc32-vx",
+ .cra_priority = 200,
+ .cra_blocksize = CRC32_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crc_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_vx_cra_init_zero,
+ },
+ },
+ /* CRC-32 BE */
+ {
+ .init = crc32_vx_init,
+ .setkey = crc32be_vx_setkey,
+ .update = crc32be_vx_update,
+ .final = crc32be_vx_final,
+ .finup = crc32be_vx_finup,
+ .digest = crc32be_vx_digest,
+ .descsize = sizeof(struct crc_desc_ctx),
+ .digestsize = CRC32_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc32be",
+ .cra_driver_name = "crc32be-vx",
+ .cra_priority = 200,
+ .cra_blocksize = CRC32_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crc_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_vx_cra_init_zero,
+ },
+ },
+ /* CRC-32C LE */
+ {
+ .init = crc32_vx_init,
+ .setkey = crc32_vx_setkey,
+ .update = crc32c_vx_update,
+ .final = crc32c_vx_final,
+ .finup = crc32c_vx_finup,
+ .digest = crc32c_vx_digest,
+ .descsize = sizeof(struct crc_desc_ctx),
+ .digestsize = CRC32_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc32c",
+ .cra_driver_name = "crc32c-vx",
+ .cra_priority = 200,
+ .cra_blocksize = CRC32_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crc_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_vx_cra_init_invert,
+ },
+ },
+};
+
+
+static int __init crc_vx_mod_init(void)
+{
+ return crypto_register_shashes(crc32_vx_algs,
+ ARRAY_SIZE(crc32_vx_algs));
+}
+
+static void __exit crc_vx_mod_exit(void)
+{
+ crypto_unregister_shashes(crc32_vx_algs, ARRAY_SIZE(crc32_vx_algs));
+}
+
+module_cpu_feature_match(VXRS, crc_vx_mod_init);
+module_exit(crc_vx_mod_exit);
+
+MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32-vx");
+MODULE_ALIAS_CRYPTO("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c-vx");
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
new file mode 100644
index 000000000000..8013989cd2e5
--- /dev/null
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -0,0 +1,207 @@
+/*
+ * Hardware-accelerated CRC-32 variants for Linux on z Systems
+ *
+ * Use the z/Architecture Vector Extension Facility to accelerate the
+ * computing of CRC-32 checksums.
+ *
+ * This CRC-32 implementation algorithm processes the most-significant
+ * bit first (BE).
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/vx-insn.h>
+
+/* Vector register range containing CRC-32 constants */
+#define CONST_R1R2 %v9
+#define CONST_R3R4 %v10
+#define CONST_R5 %v11
+#define CONST_R6 %v12
+#define CONST_RU_POLY %v13
+#define CONST_CRC_POLY %v14
+
+.data
+.align 8
+
+/*
+ * The CRC-32 constant block contains reduction constants to fold and
+ * process particular chunks of the input data stream in parallel.
+ *
+ * For the CRC-32 variants, the constants are precomputed according to
+ * these defintions:
+ *
+ * R1 = x4*128+64 mod P(x)
+ * R2 = x4*128 mod P(x)
+ * R3 = x128+64 mod P(x)
+ * R4 = x128 mod P(x)
+ * R5 = x96 mod P(x)
+ * R6 = x64 mod P(x)
+ *
+ * Barret reduction constant, u, is defined as floor(x**64 / P(x)).
+ *
+ * where P(x) is the polynomial in the normal domain and the P'(x) is the
+ * polynomial in the reversed (bitreflected) domain.
+ *
+ * Note that the constant definitions below are extended in order to compute
+ * intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
+ * The righmost doubleword can be 0 to prevent contribution to the result or
+ * can be multiplied by 1 to perform an XOR without the need for a separate
+ * VECTOR EXCLUSIVE OR instruction.
+ *
+ * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
+ *
+ * P(x) = 0x04C11DB7
+ * P'(x) = 0xEDB88320
+ */
+
+.Lconstants_CRC_32_BE:
+ .quad 0x08833794c, 0x0e6228b11 # R1, R2
+ .quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4
+ .quad 0x0f200aa66, 1 << 32 # R5, x32
+ .quad 0x0490d678d, 1 # R6, 1
+ .quad 0x104d101df, 0 # u
+ .quad 0x104C11DB7, 0 # P(x)
+
+.previous
+
+.text
+/*
+ * The CRC-32 function(s) use these calling conventions:
+ *
+ * Parameters:
+ *
+ * %r2: Initial CRC value, typically ~0; and final CRC (return) value.
+ * %r3: Input buffer pointer, performance might be improved if the
+ * buffer is on a doubleword boundary.
+ * %r4: Length of the buffer, must be 64 bytes or greater.
+ *
+ * Register usage:
+ *
+ * %r5: CRC-32 constant pool base pointer.
+ * V0: Initial CRC value and intermediate constants and results.
+ * V1..V4: Data for CRC computation.
+ * V5..V8: Next data chunks that are fetched from the input buffer.
+ *
+ * V9..V14: CRC-32 constants.
+ */
+ENTRY(crc32_be_vgfm_16)
+ /* Load CRC-32 constants */
+ larl %r5,.Lconstants_CRC_32_BE
+ VLM CONST_R1R2,CONST_CRC_POLY,0,%r5
+
+ /* Load the initial CRC value into the leftmost word of V0. */
+ VZERO %v0
+ VLVGF %v0,%r2,0
+
+ /* Load a 64-byte data chunk and XOR with CRC */
+ VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */
+ VX %v1,%v0,%v1 /* V1 ^= CRC */
+ aghi %r3,64 /* BUF = BUF + 64 */
+ aghi %r4,-64 /* LEN = LEN - 64 */
+
+ /* Check remaining buffer size and jump to proper folding method */
+ cghi %r4,64
+ jl .Lless_than_64bytes
+
+.Lfold_64bytes_loop:
+ /* Load the next 64-byte data chunk into V5 to V8 */
+ VLM %v5,%v8,0,%r3
+
+ /*
+ * Perform a GF(2) multiplication of the doublewords in V1 with
+ * the reduction constants in V0. The intermediate result is
+ * then folded (accumulated) with the next data chunk in V5 and
+ * stored in V1. Repeat this step for the register contents
+ * in V2, V3, and V4 respectively.
+ */
+ VGFMAG %v1,CONST_R1R2,%v1,%v5
+ VGFMAG %v2,CONST_R1R2,%v2,%v6
+ VGFMAG %v3,CONST_R1R2,%v3,%v7
+ VGFMAG %v4,CONST_R1R2,%v4,%v8
+
+ /* Adjust buffer pointer and length for next loop */
+ aghi %r3,64 /* BUF = BUF + 64 */
+ aghi %r4,-64 /* LEN = LEN - 64 */
+
+ cghi %r4,64
+ jnl .Lfold_64bytes_loop
+
+.Lless_than_64bytes:
+ /* Fold V1 to V4 into a single 128-bit value in V1 */
+ VGFMAG %v1,CONST_R3R4,%v1,%v2
+ VGFMAG %v1,CONST_R3R4,%v1,%v3
+ VGFMAG %v1,CONST_R3R4,%v1,%v4
+
+ /* Check whether to continue with 64-bit folding */
+ cghi %r4,16
+ jl .Lfinal_fold
+
+.Lfold_16bytes_loop:
+
+ VL %v2,0,,%r3 /* Load next data chunk */
+ VGFMAG %v1,CONST_R3R4,%v1,%v2 /* Fold next data chunk */
+
+ /* Adjust buffer pointer and size for folding next data chunk */
+ aghi %r3,16
+ aghi %r4,-16
+
+ /* Process remaining data chunks */
+ cghi %r4,16
+ jnl .Lfold_16bytes_loop
+
+.Lfinal_fold:
+ /*
+ * The R5 constant is used to fold a 128-bit value into an 96-bit value
+ * that is XORed with the next 96-bit input data chunk. To use a single
+ * VGFMG instruction, multiply the rightmost 64-bit with x^32 (1<<32) to
+ * form an intermediate 96-bit value (with appended zeros) which is then
+ * XORed with the intermediate reduction result.
+ */
+ VGFMG %v1,CONST_R5,%v1
+
+ /*
+ * Further reduce the remaining 96-bit value to a 64-bit value using a
+ * single VGFMG, the rightmost doubleword is multiplied with 0x1. The
+ * intermediate result is then XORed with the product of the leftmost
+ * doubleword with R6. The result is a 64-bit value and is subject to
+ * the Barret reduction.
+ */
+ VGFMG %v1,CONST_R6,%v1
+
+ /*
+ * The input values to the Barret reduction are the degree-63 polynomial
+ * in V1 (R(x)), degree-32 generator polynomial, and the reduction
+ * constant u. The Barret reduction result is the CRC value of R(x) mod
+ * P(x).
+ *
+ * The Barret reduction algorithm is defined as:
+ *
+ * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
+ * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
+ * 3. C(x) = R(x) XOR T2(x) mod x^32
+ *
+ * Note: To compensate the division by x^32, use the vector unpack
+ * instruction to move the leftmost word into the leftmost doubleword
+ * of the vector register. The rightmost doubleword is multiplied
+ * with zero to not contribute to the intermedate results.
+ */
+
+ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
+ VUPLLF %v2,%v1
+ VGFMG %v2,CONST_RU_POLY,%v2
+
+ /*
+ * Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
+ * V2 and XOR the intermediate result, T2(x), with the value in V1.
+ * The final result is in the rightmost word of V2.
+ */
+ VUPLLF %v2,%v2
+ VGFMAG %v2,CONST_CRC_POLY,%v2,%v1
+
+.Ldone:
+ VLGVF %r2,%v2,3
+ br %r14
+
+.previous
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
new file mode 100644
index 000000000000..17f2504c2633
--- /dev/null
+++ b/arch/s390/crypto/crc32le-vx.S
@@ -0,0 +1,268 @@
+/*
+ * Hardware-accelerated CRC-32 variants for Linux on z Systems
+ *
+ * Use the z/Architecture Vector Extension Facility to accelerate the
+ * computing of bitreflected CRC-32 checksums for IEEE 802.3 Ethernet
+ * and Castagnoli.
+ *
+ * This CRC-32 implementation algorithm is bitreflected and processes
+ * the least-significant bit first (Little-Endian).
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/vx-insn.h>
+
+/* Vector register range containing CRC-32 constants */
+#define CONST_PERM_LE2BE %v9
+#define CONST_R2R1 %v10
+#define CONST_R4R3 %v11
+#define CONST_R5 %v12
+#define CONST_RU_POLY %v13
+#define CONST_CRC_POLY %v14
+
+.data
+.align 8
+
+/*
+ * The CRC-32 constant block contains reduction constants to fold and
+ * process particular chunks of the input data stream in parallel.
+ *
+ * For the CRC-32 variants, the constants are precomputed according to
+ * these definitions:
+ *
+ * R1 = [(x4*128+32 mod P'(x) << 32)]' << 1
+ * R2 = [(x4*128-32 mod P'(x) << 32)]' << 1
+ * R3 = [(x128+32 mod P'(x) << 32)]' << 1
+ * R4 = [(x128-32 mod P'(x) << 32)]' << 1
+ * R5 = [(x64 mod P'(x) << 32)]' << 1
+ * R6 = [(x32 mod P'(x) << 32)]' << 1
+ *
+ * The bitreflected Barret reduction constant, u', is defined as
+ * the bit reversal of floor(x**64 / P(x)).
+ *
+ * where P(x) is the polynomial in the normal domain and the P'(x) is the
+ * polynomial in the reversed (bitreflected) domain.
+ *
+ * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
+ *
+ * P(x) = 0x04C11DB7
+ * P'(x) = 0xEDB88320
+ *
+ * CRC-32C (Castagnoli) polynomials:
+ *
+ * P(x) = 0x1EDC6F41
+ * P'(x) = 0x82F63B78
+ */
+
+.Lconstants_CRC_32_LE:
+ .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
+ .quad 0x1c6e41596, 0x154442bd4 # R2, R1
+ .quad 0x0ccaa009e, 0x1751997d0 # R4, R3
+ .octa 0x163cd6124 # R5
+ .octa 0x1F7011641 # u'
+ .octa 0x1DB710641 # P'(x) << 1
+
+.Lconstants_CRC_32C_LE:
+ .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
+ .quad 0x09e4addf8, 0x740eef02 # R2, R1
+ .quad 0x14cd00bd6, 0xf20c0dfe # R4, R3
+ .octa 0x0dd45aab8 # R5
+ .octa 0x0dea713f1 # u'
+ .octa 0x105ec76f0 # P'(x) << 1
+
+.previous
+
+
+.text
+
+/*
+ * The CRC-32 functions use these calling conventions:
+ *
+ * Parameters:
+ *
+ * %r2: Initial CRC value, typically ~0; and final CRC (return) value.
+ * %r3: Input buffer pointer, performance might be improved if the
+ * buffer is on a doubleword boundary.
+ * %r4: Length of the buffer, must be 64 bytes or greater.
+ *
+ * Register usage:
+ *
+ * %r5: CRC-32 constant pool base pointer.
+ * V0: Initial CRC value and intermediate constants and results.
+ * V1..V4: Data for CRC computation.
+ * V5..V8: Next data chunks that are fetched from the input buffer.
+ * V9: Constant for BE->LE conversion and shift operations
+ *
+ * V10..V14: CRC-32 constants.
+ */
+
+ENTRY(crc32_le_vgfm_16)
+ larl %r5,.Lconstants_CRC_32_LE
+ j crc32_le_vgfm_generic
+
+ENTRY(crc32c_le_vgfm_16)
+ larl %r5,.Lconstants_CRC_32C_LE
+ j crc32_le_vgfm_generic
+
+
+crc32_le_vgfm_generic:
+ /* Load CRC-32 constants */
+ VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
+
+ /*
+ * Load the initial CRC value.
+ *
+ * The CRC value is loaded into the rightmost word of the
+ * vector register and is later XORed with the LSB portion
+ * of the loaded input data.
+ */
+ VZERO %v0 /* Clear V0 */
+ VLVGF %v0,%r2,3 /* Load CRC into rightmost word */
+
+ /* Load a 64-byte data chunk and XOR with CRC */
+ VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */
+ VPERM %v1,%v1,%v1,CONST_PERM_LE2BE
+ VPERM %v2,%v2,%v2,CONST_PERM_LE2BE
+ VPERM %v3,%v3,%v3,CONST_PERM_LE2BE
+ VPERM %v4,%v4,%v4,CONST_PERM_LE2BE
+
+ VX %v1,%v0,%v1 /* V1 ^= CRC */
+ aghi %r3,64 /* BUF = BUF + 64 */
+ aghi %r4,-64 /* LEN = LEN - 64 */
+
+ cghi %r4,64
+ jl .Lless_than_64bytes
+
+.Lfold_64bytes_loop:
+ /* Load the next 64-byte data chunk into V5 to V8 */
+ VLM %v5,%v8,0,%r3
+ VPERM %v5,%v5,%v5,CONST_PERM_LE2BE
+ VPERM %v6,%v6,%v6,CONST_PERM_LE2BE
+ VPERM %v7,%v7,%v7,CONST_PERM_LE2BE
+ VPERM %v8,%v8,%v8,CONST_PERM_LE2BE
+
+ /*
+ * Perform a GF(2) multiplication of the doublewords in V1 with
+ * the R1 and R2 reduction constants in V0. The intermediate result
+ * is then folded (accumulated) with the next data chunk in V5 and
+ * stored in V1. Repeat this step for the register contents
+ * in V2, V3, and V4 respectively.
+ */
+ VGFMAG %v1,CONST_R2R1,%v1,%v5
+ VGFMAG %v2,CONST_R2R1,%v2,%v6
+ VGFMAG %v3,CONST_R2R1,%v3,%v7
+ VGFMAG %v4,CONST_R2R1,%v4,%v8
+
+ aghi %r3,64 /* BUF = BUF + 64 */
+ aghi %r4,-64 /* LEN = LEN - 64 */
+
+ cghi %r4,64
+ jnl .Lfold_64bytes_loop
+
+.Lless_than_64bytes:
+ /*
+ * Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3
+ * and R4 and accumulating the next 128-bit chunk until a single 128-bit
+ * value remains.
+ */
+ VGFMAG %v1,CONST_R4R3,%v1,%v2
+ VGFMAG %v1,CONST_R4R3,%v1,%v3
+ VGFMAG %v1,CONST_R4R3,%v1,%v4
+
+ cghi %r4,16
+ jl .Lfinal_fold
+
+.Lfold_16bytes_loop:
+
+ VL %v2,0,,%r3 /* Load next data chunk */
+ VPERM %v2,%v2,%v2,CONST_PERM_LE2BE
+ VGFMAG %v1,CONST_R4R3,%v1,%v2 /* Fold next data chunk */
+
+ aghi %r3,16
+ aghi %r4,-16
+
+ cghi %r4,16
+ jnl .Lfold_16bytes_loop
+
+.Lfinal_fold:
+ /*
+ * Set up a vector register for byte shifts. The shift value must
+ * be loaded in bits 1-4 in byte element 7 of a vector register.
+ * Shift by 8 bytes: 0x40
+ * Shift by 4 bytes: 0x20
+ */
+ VLEIB %v9,0x40,7
+
+ /*
+ * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
+ * to move R4 into the rightmost doubleword and set the leftmost
+ * doubleword to 0x1.
+ */
+ VSRLB %v0,CONST_R4R3,%v9
+ VLEIG %v0,1,0
+
+ /*
+ * Compute GF(2) product of V1 and V0. The rightmost doubleword
+ * of V1 is multiplied with R4. The leftmost doubleword of V1 is
+ * multiplied by 0x1 and is then XORed with rightmost product.
+ * Implicitly, the intermediate leftmost product becomes padded
+ */
+ VGFMG %v1,%v0,%v1
+
+ /*
+ * Now do the final 32-bit fold by multiplying the rightmost word
+ * in V1 with R5 and XOR the result with the remaining bits in V1.
+ *
+ * To achieve this by a single VGFMAG, right shift V1 by a word
+ * and store the result in V2 which is then accumulated. Use the
+ * vector unpack instruction to load the rightmost half of the
+ * doubleword into the rightmost doubleword element of V1; the other
+ * half is loaded in the leftmost doubleword.
+ * The vector register with CONST_R5 contains the R5 constant in the
+ * rightmost doubleword and the leftmost doubleword is zero to ignore
+ * the leftmost product of V1.
+ */
+ VLEIB %v9,0x20,7 /* Shift by words */
+ VSRLB %v2,%v1,%v9 /* Store remaining bits in V2 */
+ VUPLLF %v1,%v1 /* Split rightmost doubleword */
+ VGFMAG %v1,CONST_R5,%v1,%v2 /* V1 = (V1 * R5) XOR V2 */
+
+ /*
+ * Apply a Barret reduction to compute the final 32-bit CRC value.
+ *
+ * The input values to the Barret reduction are the degree-63 polynomial
+ * in V1 (R(x)), degree-32 generator polynomial, and the reduction
+ * constant u. The Barret reduction result is the CRC value of R(x) mod
+ * P(x).
+ *
+ * The Barret reduction algorithm is defined as:
+ *
+ * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
+ * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
+ * 3. C(x) = R(x) XOR T2(x) mod x^32
+ *
+ * Note: The leftmost doubleword of vector register containing
+ * CONST_RU_POLY is zero and, thus, the intermediate GF(2) product
+ * is zero and does not contribute to the final result.
+ */
+
+ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
+ VUPLLF %v2,%v1
+ VGFMG %v2,CONST_RU_POLY,%v2
+
+ /*
+ * Compute the GF(2) product of the CRC polynomial with T1(x) in
+ * V2 and XOR the intermediate result, T2(x), with the value in V1.
+ * The final result is stored in word element 2 of V2.
+ */
+ VUPLLF %v2,%v2
+ VGFMAG %v2,CONST_CRC_POLY,%v2,%v1
+
+.Ldone:
+ VLGVF %r2,%v2,2
+ br %r14
+
+.previous
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index e24f2af4c73b..ccccebeeaaf6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,8 +1,8 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+CONFIG_USELIB=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
@@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
@@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_HZ_100=y
@@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
@@ -61,7 +68,6 @@ CONFIG_UNIX=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
-# CONFIG_INET_LRO is not set
CONFIG_L2TP=m
CONFIG_L2TP_DEBUGFS=m
CONFIG_VLAN_8021Q=y
@@ -144,6 +150,9 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
@@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
-CONFIG_TRACER_SNAPSHOT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
@@ -212,17 +222,19 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_ZLIB=m
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_CRC32_S390=m
CONFIG_CRC7=m
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 045035796ca7..67d43a0eabb4 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -337,25 +337,27 @@ static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
/* Diagnose 204 functions */
-static inline int __diag204(unsigned long subcode, unsigned long size, void *addr)
+static inline int __diag204(unsigned long *subcode, unsigned long size, void *addr)
{
- register unsigned long _subcode asm("0") = subcode;
+ register unsigned long _subcode asm("0") = *subcode;
register unsigned long _size asm("1") = size;
asm volatile(
" diag %2,%0,0x204\n"
- "0:\n"
+ "0: nopr %%r7\n"
EX_TABLE(0b,0b)
: "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
- if (_subcode)
- return -1;
+ *subcode = _subcode;
return _size;
}
static int diag204(unsigned long subcode, unsigned long size, void *addr)
{
diag_stat_inc(DIAG_STAT_X204);
- return __diag204(subcode, size, addr);
+ size = __diag204(&subcode, size, addr);
+ if (subcode)
+ return -1;
+ return size;
}
/*
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 44feac38ccfc..012919d9833b 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -70,7 +70,7 @@ static int diag2fc(int size, char* query, void *addr)
diag_stat_inc(DIAG_STAT_X2FC);
asm volatile(
" diag %0,%1,0x2fc\n"
- "0:\n"
+ "0: nopr %%r7\n"
EX_TABLE(0b,0b)
: "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory");
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 911064aa59b2..d28cc2f5b7b2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -93,6 +93,11 @@ static inline int atomic_add_return(int i, atomic_t *v)
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
}
+static inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
+}
+
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -114,22 +119,27 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
+#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-#define ATOMIC_OP(op, OP) \
+#define ATOMIC_OPS(op, OP) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
+} \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
}
-ATOMIC_OP(and, AND)
-ATOMIC_OP(or, OR)
-ATOMIC_OP(xor, XOR)
+ATOMIC_OPS(and, AND)
+ATOMIC_OPS(or, OR)
+ATOMIC_OPS(xor, XOR)
-#undef ATOMIC_OP
+#undef ATOMIC_OPS
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -236,6 +246,11 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
}
+static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+{
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
+}
+
static inline void atomic64_add(long long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -264,17 +279,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
return old;
}
-#define ATOMIC64_OP(op, OP) \
+#define ATOMIC64_OPS(op, OP) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
+} \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
+{ \
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
}
-ATOMIC64_OP(and, AND)
-ATOMIC64_OP(or, OR)
-ATOMIC64_OP(xor, XOR)
+ATOMIC64_OPS(and, AND)
+ATOMIC64_OPS(or, OR)
+ATOMIC64_OPS(xor, XOR)
-#undef ATOMIC64_OP
+#undef ATOMIC64_OPS
#undef __ATOMIC64_LOOP
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
@@ -315,6 +334,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v)
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 22da3b34c655..05219a5e0b2f 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -13,9 +13,6 @@
#define L1_CACHE_SHIFT 8
#define NET_SKB_PAD 32
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
-
-/* Read-only memory is marked before mark_rodata_ro() is called. */
-#define __ro_after_init __read_mostly
+#define __read_mostly __section(.data..read_mostly)
#endif
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index d1e7b0a0feeb..f7ed88cc066e 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -320,7 +320,7 @@ struct cio_iplinfo {
extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
/* Function from drivers/s390/cio/chsc.c */
-int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size);
#endif
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 9dd04b9e9782..03516476127b 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -169,16 +169,27 @@ static inline int lcctl(u64 ctl)
}
/* Extract CPU counter */
-static inline int ecctr(u64 ctr, u64 *val)
+static inline int __ecctr(u64 ctr, u64 *content)
{
- register u64 content asm("4") = 0;
+ register u64 _content asm("4") = 0;
int cc;
asm volatile (
" .insn rre,0xb2e40000,%0,%2\n"
" ipm %1\n"
" srl %1,28\n"
- : "=d" (content), "=d" (cc) : "d" (ctr) : "cc");
+ : "=d" (_content), "=d" (cc) : "d" (ctr) : "cc");
+ *content = _content;
+ return cc;
+}
+
+/* Extract CPU counter */
+static inline int ecctr(u64 ctr, u64 *val)
+{
+ u64 content;
+ int cc;
+
+ cc = __ecctr(ctr, &content);
if (!cc)
*val = content;
return cc;
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 5fac921c1c42..86cae09e076a 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -49,7 +49,7 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
diag_stat_inc(DIAG_STAT_X010);
asm volatile(
"0: diag %0,%1,0x10\n"
- "1:\n"
+ "1: nopr %%r7\n"
EX_TABLE(0b, 1b)
EX_TABLE(1b, 1b)
: : "a" (start_addr), "a" (end_addr));
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
deleted file mode 100644
index 105f90e63a0e..000000000000
--- a/arch/s390/include/asm/etr.h
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- */
-#ifndef __S390_ETR_H
-#define __S390_ETR_H
-
-/* ETR attachment control register */
-struct etr_eacr {
- unsigned int e0 : 1; /* port 0 stepping control */
- unsigned int e1 : 1; /* port 1 stepping control */
- unsigned int _pad0 : 5; /* must be 00100 */
- unsigned int dp : 1; /* data port control */
- unsigned int p0 : 1; /* port 0 change recognition control */
- unsigned int p1 : 1; /* port 1 change recognition control */
- unsigned int _pad1 : 3; /* must be 000 */
- unsigned int ea : 1; /* ETR alert control */
- unsigned int es : 1; /* ETR sync check control */
- unsigned int sl : 1; /* switch to local control */
-} __attribute__ ((packed));
-
-/* Port state returned by steai */
-enum etr_psc {
- etr_psc_operational = 0,
- etr_psc_semi_operational = 1,
- etr_psc_protocol_error = 4,
- etr_psc_no_symbols = 8,
- etr_psc_no_signal = 12,
- etr_psc_pps_mode = 13
-};
-
-/* Logical port state returned by stetr */
-enum etr_lpsc {
- etr_lpsc_operational_step = 0,
- etr_lpsc_operational_alt = 1,
- etr_lpsc_semi_operational = 2,
- etr_lpsc_protocol_error = 4,
- etr_lpsc_no_symbol_sync = 8,
- etr_lpsc_no_signal = 12,
- etr_lpsc_pps_mode = 13
-};
-
-/* ETR status words */
-struct etr_esw {
- struct etr_eacr eacr; /* attachment control register */
- unsigned int y : 1; /* stepping mode */
- unsigned int _pad0 : 5; /* must be 00000 */
- unsigned int p : 1; /* stepping port number */
- unsigned int q : 1; /* data port number */
- unsigned int psc0 : 4; /* port 0 state code */
- unsigned int psc1 : 4; /* port 1 state code */
-} __attribute__ ((packed));
-
-/* Second level data register status word */
-struct etr_slsw {
- unsigned int vv1 : 1; /* copy of validity bit data frame 1 */
- unsigned int vv2 : 1; /* copy of validity bit data frame 2 */
- unsigned int vv3 : 1; /* copy of validity bit data frame 3 */
- unsigned int vv4 : 1; /* copy of validity bit data frame 4 */
- unsigned int _pad0 : 19; /* must by all zeroes */
- unsigned int n : 1; /* EAF port number */
- unsigned int v1 : 1; /* validity bit ETR data frame 1 */
- unsigned int v2 : 1; /* validity bit ETR data frame 2 */
- unsigned int v3 : 1; /* validity bit ETR data frame 3 */
- unsigned int v4 : 1; /* validity bit ETR data frame 4 */
- unsigned int _pad1 : 4; /* must be 0000 */
-} __attribute__ ((packed));
-
-/* ETR data frames */
-struct etr_edf1 {
- unsigned int u : 1; /* untuned bit */
- unsigned int _pad0 : 1; /* must be 0 */
- unsigned int r : 1; /* service request bit */
- unsigned int _pad1 : 4; /* must be 0000 */
- unsigned int a : 1; /* time adjustment bit */
- unsigned int net_id : 8; /* ETR network id */
- unsigned int etr_id : 8; /* id of ETR which sends data frames */
- unsigned int etr_pn : 8; /* port number of ETR output port */
-} __attribute__ ((packed));
-
-struct etr_edf2 {
- unsigned int etv : 32; /* Upper 32 bits of TOD. */
-} __attribute__ ((packed));
-
-struct etr_edf3 {
- unsigned int rc : 8; /* failure reason code */
- unsigned int _pad0 : 3; /* must be 000 */
- unsigned int c : 1; /* ETR coupled bit */
- unsigned int tc : 4; /* ETR type code */
- unsigned int blto : 8; /* biased local time offset */
- /* (blto - 128) * 15 = minutes */
- unsigned int buo : 8; /* biased utc offset */
- /* (buo - 128) = leap seconds */
-} __attribute__ ((packed));
-
-struct etr_edf4 {
- unsigned int ed : 8; /* ETS device dependent data */
- unsigned int _pad0 : 1; /* must be 0 */
- unsigned int buc : 5; /* biased ut1 correction */
- /* (buc - 16) * 0.1 seconds */
- unsigned int em : 6; /* ETS error magnitude */
- unsigned int dc : 6; /* ETS drift code */
- unsigned int sc : 6; /* ETS steering code */
-} __attribute__ ((packed));
-
-/*
- * ETR attachment information block, two formats
- * format 1 has 4 reserved words with a size of 64 bytes
- * format 2 has 16 reserved words with a size of 96 bytes
- */
-struct etr_aib {
- struct etr_esw esw;
- struct etr_slsw slsw;
- unsigned long long tsp;
- struct etr_edf1 edf1;
- struct etr_edf2 edf2;
- struct etr_edf3 edf3;
- struct etr_edf4 edf4;
- unsigned int reserved[16];
-} __attribute__ ((packed,aligned(8)));
-
-/* ETR interruption parameter */
-struct etr_irq_parm {
- unsigned int _pad0 : 8;
- unsigned int pc0 : 1; /* port 0 state change */
- unsigned int pc1 : 1; /* port 1 state change */
- unsigned int _pad1 : 3;
- unsigned int eai : 1; /* ETR alert indication */
- unsigned int _pad2 : 18;
-} __attribute__ ((packed));
-
-/* Query TOD offset result */
-struct etr_ptff_qto {
- unsigned long long physical_clock;
- unsigned long long tod_offset;
- unsigned long long logical_tod_offset;
- unsigned long long tod_epoch_difference;
-} __attribute__ ((packed));
-
-/* Inline assembly helper functions */
-static inline int etr_setr(struct etr_eacr *ctrl)
-{
- int rc = -EOPNOTSUPP;
-
- asm volatile(
- " .insn s,0xb2160000,%1\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (rc) : "Q" (*ctrl));
- return rc;
-}
-
-/* Stores a format 1 aib with 64 bytes */
-static inline int etr_stetr(struct etr_aib *aib)
-{
- int rc = -EOPNOTSUPP;
-
- asm volatile(
- " .insn s,0xb2170000,%1\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (rc) : "Q" (*aib));
- return rc;
-}
-
-/* Stores a format 2 aib with 96 bytes for specified port */
-static inline int etr_steai(struct etr_aib *aib, unsigned int func)
-{
- register unsigned int reg0 asm("0") = func;
- int rc = -EOPNOTSUPP;
-
- asm volatile(
- " .insn s,0xb2b30000,%1\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (rc) : "Q" (*aib), "d" (reg0));
- return rc;
-}
-
-/* Function codes for the steai instruction. */
-#define ETR_STEAI_STEPPING_PORT 0x10
-#define ETR_STEAI_ALTERNATE_PORT 0x11
-#define ETR_STEAI_PORT_0 0x12
-#define ETR_STEAI_PORT_1 0x13
-
-static inline int etr_ptff(void *ptff_block, unsigned int func)
-{
- register unsigned int reg0 asm("0") = func;
- register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
- int rc = -EOPNOTSUPP;
-
- asm volatile(
- " .word 0x0104\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (rc), "=m" (ptff_block)
- : "d" (reg0), "d" (reg1), "m" (ptff_block) : "cc");
- return rc;
-}
-
-/* Function codes for the ptff instruction. */
-#define ETR_PTFF_QAF 0x00 /* query available functions */
-#define ETR_PTFF_QTO 0x01 /* query tod offset */
-#define ETR_PTFF_QSI 0x02 /* query steering information */
-#define ETR_PTFF_ATO 0x40 /* adjust tod offset */
-#define ETR_PTFF_STO 0x41 /* set tod offset */
-#define ETR_PTFF_SFS 0x42 /* set fine steering rate */
-#define ETR_PTFF_SGS 0x43 /* set gross steering rate */
-
-/* Functions needed by the machine check handler */
-int etr_switch_to_local(void);
-int etr_sync_check(void);
-void etr_queue_work(void);
-
-/* notifier for syncs */
-extern struct atomic_notifier_head s390_epoch_delta_notifier;
-
-/* STP interruption parameter */
-struct stp_irq_parm {
- unsigned int _pad0 : 14;
- unsigned int tsc : 1; /* Timing status change */
- unsigned int lac : 1; /* Link availability change */
- unsigned int tcpc : 1; /* Time control parameter change */
- unsigned int _pad2 : 15;
-} __attribute__ ((packed));
-
-#define STP_OP_SYNC 1
-#define STP_OP_CTRL 3
-
-struct stp_sstpi {
- unsigned int rsvd0;
- unsigned int rsvd1 : 8;
- unsigned int stratum : 8;
- unsigned int vbits : 16;
- unsigned int leaps : 16;
- unsigned int tmd : 4;
- unsigned int ctn : 4;
- unsigned int rsvd2 : 3;
- unsigned int c : 1;
- unsigned int tst : 4;
- unsigned int tzo : 16;
- unsigned int dsto : 16;
- unsigned int ctrl : 16;
- unsigned int rsvd3 : 16;
- unsigned int tto;
- unsigned int rsvd4;
- unsigned int ctnid[3];
- unsigned int rsvd5;
- unsigned int todoff[4];
- unsigned int rsvd6[48];
-} __attribute__ ((packed));
-
-/* Functions needed by the machine check handler */
-int stp_sync_check(void);
-int stp_island_check(void);
-void stp_queue_work(void);
-
-#endif /* __S390_ETR_H */
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index 7ecb92b469b6..04cb4b4bcc5f 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -6,7 +6,7 @@
*/
#ifndef _ASM_S390_FCX_H
-#define _ASM_S390_FCX_H _ASM_S390_FCX_H
+#define _ASM_S390_FCX_H
#include <linux/types.h>
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
index 5e04f3cbd320..6aba6fc406ad 100644
--- a/arch/s390/include/asm/fpu/api.h
+++ b/arch/s390/include/asm/fpu/api.h
@@ -1,6 +1,41 @@
/*
* In-kernel FPU support functions
*
+ *
+ * Consider these guidelines before using in-kernel FPU functions:
+ *
+ * 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel
+ * use of floating-point or vector registers and instructions.
+ *
+ * 2. For kernel_fpu_begin(), specify the vector register range you want to
+ * use with the KERNEL_VXR_* constants. Consider these usage guidelines:
+ *
+ * a) If your function typically runs in process-context, use the lower
+ * half of the vector registers, for example, specify KERNEL_VXR_LOW.
+ * b) If your function typically runs in soft-irq or hard-irq context,
+ * prefer using the upper half of the vector registers, for example,
+ * specify KERNEL_VXR_HIGH.
+ *
+ * If you adhere to these guidelines, an interrupted process context
+ * does not require to save and restore vector registers because of
+ * disjoint register ranges.
+ *
+ * Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions
+ * includes logic to save and restore up to 16 vector registers at once.
+ *
+ * 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different
+ * struct kernel_fpu states. Vector registers that are in use by outer
+ * levels are saved and restored. You can minimize the save and restore
+ * effort by choosing disjoint vector register ranges.
+ *
+ * 5. To use vector floating-point instructions, specify the KERNEL_FPC
+ * flag to save and restore floating-point controls in addition to any
+ * vector register range.
+ *
+ * 6. To use floating-point registers and instructions only, specify the
+ * KERNEL_FPR flag. This flag triggers a save and restore of vector
+ * registers V0 to V15 and floating-point controls.
+ *
* Copyright IBM Corp. 2015
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
@@ -8,6 +43,8 @@
#ifndef _ASM_S390_FPU_API_H
#define _ASM_S390_FPU_API_H
+#include <linux/preempt.h>
+
void save_fpu_regs(void);
static inline int test_fp_ctl(u32 fpc)
@@ -22,9 +59,47 @@ static inline int test_fp_ctl(u32 fpc)
" la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "=d" (rc), "=d" (orig_fpc)
+ : "=d" (rc), "=&d" (orig_fpc)
: "d" (fpc), "0" (-EINVAL));
return rc;
}
+#define KERNEL_VXR_V0V7 1
+#define KERNEL_VXR_V8V15 2
+#define KERNEL_VXR_V16V23 4
+#define KERNEL_VXR_V24V31 8
+#define KERNEL_FPR 16
+#define KERNEL_FPC 256
+
+#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
+#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
+#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
+
+#define KERNEL_FPU_MASK (KERNEL_VXR_LOW|KERNEL_VXR_HIGH|KERNEL_FPR)
+
+struct kernel_fpu;
+
+/*
+ * Note the functions below must be called with preemption disabled.
+ * Do not enable preemption before calling __kernel_fpu_end() to prevent
+ * an corruption of an existing kernel FPU state.
+ *
+ * Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions.
+ */
+void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
+void __kernel_fpu_end(struct kernel_fpu *state);
+
+
+static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
+{
+ preempt_disable();
+ __kernel_fpu_begin(state, flags);
+}
+
+static inline void kernel_fpu_end(struct kernel_fpu *state)
+{
+ __kernel_fpu_end(state);
+ preempt_enable();
+}
+
#endif /* _ASM_S390_FPU_API_H */
diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h
index fe937c9b6471..bce255ead72b 100644
--- a/arch/s390/include/asm/fpu/types.h
+++ b/arch/s390/include/asm/fpu/types.h
@@ -24,4 +24,14 @@ struct fpu {
/* VX array structure for address operand constraints in inline assemblies */
struct vx_array { __vector128 _[__NUM_VXRS]; };
+/* In-kernel FPU state structure */
+struct kernel_fpu {
+ u32 mask;
+ u32 fpc;
+ union {
+ freg_t fprs[__NUM_FPRS];
+ __vector128 vxrs[__NUM_VXRS];
+ };
+};
+
#endif /* _ASM_S390_FPU_TYPES_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index d9be7c0c1291..4c7fac75090e 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -41,7 +41,10 @@ static inline int prepare_hugepage_range(struct file *file,
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
+ if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
+ else
+ pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 6fc44dca193e..4da22b2f0521 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -141,11 +141,11 @@ extern void setup_ipl(void);
* DIAG 308 support
*/
enum diag308_subcode {
- DIAG308_REL_HSA = 2,
- DIAG308_IPL = 3,
- DIAG308_DUMP = 4,
- DIAG308_SET = 5,
- DIAG308_STORE = 6,
+ DIAG308_REL_HSA = 2,
+ DIAG308_LOAD_CLEAR = 3,
+ DIAG308_LOAD_NORMAL_DUMP = 4,
+ DIAG308_SET = 5,
+ DIAG308_STORE = 6,
};
enum diag308_ipl_type {
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index f97b055de76a..70c9bce766f5 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -7,11 +7,8 @@
#define NR_IRQS_BASE 3
-#ifdef CONFIG_PCI_NR_MSI
-# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
-#else
-# define NR_IRQS NR_IRQS_BASE
-#endif
+#define NR_IRQS NR_IRQS_BASE
+#define NR_IRQS_LEGACY NR_IRQS_BASE
/* External interruption codes */
#define EXT_IRQ_INTERRUPT_KEY 0x0040
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 7f9fd5e3f1bf..9be198f5ee79 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <linux/stringify.h>
#define JUMP_LABEL_NOP_SIZE 6
#define JUMP_LABEL_NOP_OFFSET 2
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index b47ad3b642cc..591e5a5279b0 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -43,9 +43,9 @@ typedef u16 kprobe_opcode_t;
#define MAX_INSN_SIZE 0x0003
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
- (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+ (((unsigned long)task_stack_page(current)) + THREAD_SIZE - (ADDR))) \
? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+ : (((unsigned long)task_stack_page(current)) + THREAD_SIZE - (ADDR)))
#define kretprobe_blacklist_size 0
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 37b9017c6a96..ac82e8eb936d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -245,6 +245,7 @@ struct kvm_vcpu_stat {
u32 exit_stop_request;
u32 exit_validity;
u32 exit_instruction;
+ u32 exit_pei;
u32 halt_successful_poll;
u32 halt_attempted_poll;
u32 halt_poll_invalid;
diff --git a/arch/s390/include/asm/mathemu.h b/arch/s390/include/asm/mathemu.h
deleted file mode 100644
index 614dfaf47f71..000000000000
--- a/arch/s390/include/asm/mathemu.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * IEEE floating point emulation.
- *
- * S390 version
- * Copyright IBM Corp. 1999
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- */
-
-#ifndef __MATHEMU__
-#define __MATHEMU__
-
-extern int math_emu_b3(__u8 *, struct pt_regs *);
-extern int math_emu_ed(__u8 *, struct pt_regs *);
-extern int math_emu_ldr(__u8 *);
-extern int math_emu_ler(__u8 *);
-extern int math_emu_std(__u8 *, struct pt_regs *);
-extern int math_emu_ld(__u8 *, struct pt_regs *);
-extern int math_emu_ste(__u8 *, struct pt_regs *);
-extern int math_emu_le(__u8 *, struct pt_regs *);
-extern int math_emu_lfpc(__u8 *, struct pt_regs *);
-extern int math_emu_stfpc(__u8 *, struct pt_regs *);
-extern int math_emu_srnm(__u8 *, struct pt_regs *);
-
-#endif /* __MATHEMU__ */
-
-
-
-
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 081b2ad99d73..18226437a832 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -6,7 +6,7 @@
typedef struct {
cpumask_t cpu_attach_mask;
- atomic_t attach_count;
+ atomic_t flush_count;
unsigned int flush_mm;
spinlock_t list_lock;
struct list_head pgtable_list;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c837b79b455d..f77c638bf397 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -19,7 +19,7 @@ static inline int init_new_context(struct task_struct *tsk,
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
- atomic_set(&mm->context.attach_count, 0);
+ atomic_set(&mm->context.flush_count, 0);
mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste;
@@ -90,15 +90,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
S390_lowcore.user_asce = next->context.asce;
if (prev == next)
return;
- if (MACHINE_HAS_TLB_LC)
- cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
+ cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
/* Clear old ASCE by loading the kernel ASCE. */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
- atomic_inc(&next->context.attach_count);
- atomic_dec(&prev->context.attach_count);
- if (MACHINE_HAS_TLB_LC)
- cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -110,10 +107,9 @@ static inline void finish_arch_post_lock_switch(void)
load_kernel_asce();
if (mm) {
preempt_disable();
- while (atomic_read(&mm->context.attach_count) >> 16)
+ while (atomic_read(&mm->context.flush_count))
cpu_relax();
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
@@ -128,7 +124,6 @@ static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
set_user_asce(next);
}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 53eacbd4f09b..b2146c4119b2 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -21,6 +21,7 @@
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#define HUGE_MAX_HSTATE 2
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define ARCH_HAS_HUGE_PTE_TYPE
@@ -30,11 +31,12 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
+void __storage_key_init_range(unsigned long start, unsigned long end);
+
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
-#if PAGE_DEFAULT_KEY
- __storage_key_init_range(start, end);
-#endif
+ if (PAGE_DEFAULT_KEY)
+ __storage_key_init_range(start, end);
}
#define clear_page(page) memset((page), 0, PAGE_SIZE)
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 1f7ff85c5e4c..c64c0befd3f3 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -86,16 +86,4 @@ struct sf_raw_sample {
u8 padding[]; /* Padding to next multiple of 8 */
} __packed;
-/* Perf hardware reserve and release functions */
-#ifdef CONFIG_PERF_EVENTS
-int perf_reserve_sampling(void);
-void perf_release_sampling(void);
-#else /* CONFIG_PERF_EVENTS */
-static inline int perf_reserve_sampling(void)
-{
- return 0;
-}
-static inline void perf_release_sampling(void) {}
-#endif /* CONFIG_PERF_EVENTS */
-
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 18d2beb89340..ea1533e07271 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -28,12 +28,33 @@
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
+#include <linux/atomic.h>
#include <asm/bug.h>
#include <asm/page.h>
-extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
+extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
extern void vmem_map_init(void);
+pmd_t *vmem_pmd_alloc(void);
+pte_t *vmem_pte_alloc(void);
+
+enum {
+ PG_DIRECT_MAP_4K = 0,
+ PG_DIRECT_MAP_1M,
+ PG_DIRECT_MAP_2G,
+ PG_DIRECT_MAP_MAX
+};
+
+extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+
+static inline void update_page_count(int level, long count)
+{
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ atomic_long_add(count, &direct_pages_count[level]);
+}
+
+struct seq_file;
+void arch_report_meminfo(struct seq_file *m);
/*
* The S390 doesn't have any external MMU info: the kernel page
@@ -270,8 +291,23 @@ static inline int is_module_addr(void *addr)
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
-#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
-#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
+#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
+#define _REGION3_ENTRY_ORIGIN ~0x7ffUL/* region third table origin */
+
+#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
+#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
+#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
+#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
+#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
+#else
+#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
+#endif
+
+#define _REGION_ENTRY_BITS 0xfffffffffffff227UL
+#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
@@ -297,7 +333,8 @@ static inline int is_module_addr(void *addr)
#endif
/*
- * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
+ * Segment table and region3 table entry encoding
+ * (R = read-only, I = invalid, y = young bit):
* dy..R...I...rw
* prot-none, clean, old 00..1...1...00
* prot-none, clean, young 01..1...1...00
@@ -391,6 +428,33 @@ static inline int is_module_addr(void *addr)
_SEGMENT_ENTRY_READ)
#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE)
+#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
+ _SEGMENT_ENTRY_LARGE | \
+ _SEGMENT_ENTRY_READ | \
+ _SEGMENT_ENTRY_WRITE | \
+ _SEGMENT_ENTRY_YOUNG | \
+ _SEGMENT_ENTRY_DIRTY)
+#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
+ _SEGMENT_ENTRY_LARGE | \
+ _SEGMENT_ENTRY_READ | \
+ _SEGMENT_ENTRY_YOUNG | \
+ _SEGMENT_ENTRY_PROTECT)
+
+/*
+ * Region3 entry (large page) protection definitions.
+ */
+
+#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
+ _REGION3_ENTRY_LARGE | \
+ _REGION3_ENTRY_READ | \
+ _REGION3_ENTRY_WRITE | \
+ _REGION3_ENTRY_YOUNG | \
+ _REGION3_ENTRY_DIRTY)
+#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
+ _REGION3_ENTRY_LARGE | \
+ _REGION3_ENTRY_READ | \
+ _REGION3_ENTRY_YOUNG | \
+ _REGION_ENTRY_PROTECT)
static inline int mm_has_pgste(struct mm_struct *mm)
{
@@ -424,6 +488,53 @@ static inline int mm_use_skey(struct mm_struct *mm)
return 0;
}
+static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
+{
+ register unsigned long reg2 asm("2") = old;
+ register unsigned long reg3 asm("3") = new;
+ unsigned long address = (unsigned long)ptr | 1;
+
+ asm volatile(
+ " csp %0,%3"
+ : "+d" (reg2), "+m" (*ptr)
+ : "d" (reg3), "d" (address)
+ : "cc");
+}
+
+static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
+{
+ register unsigned long reg2 asm("2") = old;
+ register unsigned long reg3 asm("3") = new;
+ unsigned long address = (unsigned long)ptr | 1;
+
+ asm volatile(
+ " .insn rre,0xb98a0000,%0,%3"
+ : "+d" (reg2), "+m" (*ptr)
+ : "d" (reg3), "d" (address)
+ : "cc");
+}
+
+#define CRDTE_DTT_PAGE 0x00UL
+#define CRDTE_DTT_SEGMENT 0x10UL
+#define CRDTE_DTT_REGION3 0x14UL
+#define CRDTE_DTT_REGION2 0x18UL
+#define CRDTE_DTT_REGION1 0x1cUL
+
+static inline void crdte(unsigned long old, unsigned long new,
+ unsigned long table, unsigned long dtt,
+ unsigned long address, unsigned long asce)
+{
+ register unsigned long reg2 asm("2") = old;
+ register unsigned long reg3 asm("3") = new;
+ register unsigned long reg4 asm("4") = table | dtt;
+ register unsigned long reg5 asm("5") = address;
+
+ asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
+ : "+d" (reg2)
+ : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
+ : "memory", "cc");
+}
+
/*
* pgd/pmd/pte query functions
*/
@@ -465,7 +576,7 @@ static inline int pud_none(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
return 0;
- return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
+ return pud_val(pud) == _REGION3_ENTRY_EMPTY;
}
static inline int pud_large(pud_t pud)
@@ -475,17 +586,35 @@ static inline int pud_large(pud_t pud)
return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ unsigned long origin_mask;
+
+ origin_mask = _REGION3_ENTRY_ORIGIN;
+ if (pud_large(pud))
+ origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
+ return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
+}
+
+static inline int pmd_large(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ if (pmd_large(pmd))
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
+}
+
static inline int pud_bad(pud_t pud)
{
- /*
- * With dynamic page table levels the pud can be a region table
- * entry or a segment table entry. Check for the bit that are
- * invalid for either table entry.
- */
- unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
- ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
- return (pud_val(pud) & mask) != 0;
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
+ return pmd_bad(__pmd(pud_val(pud)));
+ if (pud_large(pud))
+ return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
+ return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
}
static inline int pmd_present(pmd_t pmd)
@@ -498,11 +627,6 @@ static inline int pmd_none(pmd_t pmd)
return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
}
-static inline int pmd_large(pmd_t pmd)
-{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
-}
-
static inline unsigned long pmd_pfn(pmd_t pmd)
{
unsigned long origin_mask;
@@ -513,13 +637,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
}
-static inline int pmd_bad(pmd_t pmd)
-{
- if (pmd_large(pmd))
- return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
- return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
-}
-
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
@@ -963,6 +1080,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
+#define pud_page(pud) pfn_to_page(pud_pfn(pud))
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
@@ -970,20 +1088,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
-static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
-{
- /*
- * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
- * Convert to segment table entry format.
- */
- if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
- return pgprot_val(SEGMENT_NONE);
- if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
- return pgprot_val(SEGMENT_READ);
- return pgprot_val(SEGMENT_WRITE);
-}
-
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
@@ -1020,6 +1124,56 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
return pmd;
}
+static inline pud_t pud_wrprotect(pud_t pud)
+{
+ pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
+ pud_val(pud) |= _REGION_ENTRY_PROTECT;
+ return pud;
+}
+
+static inline pud_t pud_mkwrite(pud_t pud)
+{
+ pud_val(pud) |= _REGION3_ENTRY_WRITE;
+ if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
+ return pud;
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
+ return pud;
+}
+
+static inline pud_t pud_mkclean(pud_t pud)
+{
+ if (pud_large(pud)) {
+ pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
+ pud_val(pud) |= _REGION_ENTRY_PROTECT;
+ }
+ return pud;
+}
+
+static inline pud_t pud_mkdirty(pud_t pud)
+{
+ if (pud_large(pud)) {
+ pud_val(pud) |= _REGION3_ENTRY_DIRTY |
+ _REGION3_ENTRY_SOFT_DIRTY;
+ if (pud_val(pud) & _REGION3_ENTRY_WRITE)
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
+ }
+ return pud;
+}
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
+{
+ /*
+ * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
+ * Convert to segment table entry format.
+ */
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
+ return pgprot_val(SEGMENT_NONE);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
+ return pgprot_val(SEGMENT_READ);
+ return pgprot_val(SEGMENT_WRITE);
+}
+
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
if (pmd_large(pmd)) {
@@ -1068,15 +1222,8 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
static inline void __pmdp_csp(pmd_t *pmdp)
{
- register unsigned long reg2 asm("2") = pmd_val(*pmdp);
- register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
- _SEGMENT_ENTRY_INVALID;
- register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
-
- asm volatile(
- " csp %1,%3"
- : "=m" (*pmdp)
- : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+ csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
+ pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
}
static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
@@ -1091,6 +1238,19 @@ static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
: "cc" );
}
+static inline void __pudp_idte(unsigned long address, pud_t *pudp)
+{
+ unsigned long r3o;
+
+ r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
+ r3o |= _ASCE_TYPE_REGION3;
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,0"
+ : "=m" (*pudp)
+ : "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
+ : "cc");
+}
+
static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
{
unsigned long sto;
@@ -1103,8 +1263,22 @@ static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
: "cc" );
}
+static inline void __pudp_idte_local(unsigned long address, pud_t *pudp)
+{
+ unsigned long r3o;
+
+ r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
+ r3o |= _ASCE_TYPE_REGION3;
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,1"
+ : "=m" (*pudp)
+ : "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
+ : "cc");
+}
+
pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
+pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9d4d311d7e52..09529202ea77 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -77,7 +77,10 @@ static inline void get_cpu_id(struct cpuid *ptr)
asm volatile("stidp %0" : "=Q" (*ptr));
}
-extern void s390_adjust_jiffies(void);
+void s390_adjust_jiffies(void);
+void s390_update_cpu_mhz(void);
+void cpu_detect_mhz_feature(void);
+
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
@@ -233,6 +236,18 @@ void cpu_relax(void);
#define cpu_relax_lowlatency() barrier()
+#define ECAG_CACHE_ATTRIBUTE 0
+#define ECAG_CPU_ATTRIBUTE 1
+
+static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
+{
+ unsigned long val;
+
+ asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
+ : "=d" (val) : "a" (asi << 8 | parm));
+ return val;
+}
+
static inline void psw_set_key(unsigned int key)
{
asm volatile("spka 0(%0)" : : "d" (key));
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index c75e4471e618..597e7e96b59e 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -207,41 +207,4 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
rwsem_downgrade_wake(sem);
}
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " agr %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "d" (delta)
- : "cc", "memory");
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " agr %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "d" (delta)
- : "cc", "memory");
- return new;
-}
-
#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index fbd9116eb17b..5ce29fe100ba 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,5 +4,6 @@
#include <asm-generic/sections.h>
extern char _eshared[], _ehead[];
+extern char __start_ro_after_init[], __end_ro_after_init[];
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index c0f0efbb6ab5..5e8d57e1cc5e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -86,9 +86,13 @@ extern char vmpoff_cmd[];
#define CONSOLE_IS_SCLP (console_mode == 1)
#define CONSOLE_IS_3215 (console_mode == 2)
#define CONSOLE_IS_3270 (console_mode == 3)
+#define CONSOLE_IS_VT220 (console_mode == 4)
+#define CONSOLE_IS_HVC (console_mode == 5)
#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0)
#define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
#define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
+#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
+#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
#define NSS_NAME_SIZE 8
extern char kernel_nss_name[];
diff --git a/arch/s390/include/asm/sfp-machine.h b/arch/s390/include/asm/sfp-machine.h
deleted file mode 100644
index 4e16aede4b06..000000000000
--- a/arch/s390/include/asm/sfp-machine.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/* Machine-dependent software floating-point definitions.
- S/390 kernel version.
- Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Richard Henderson (rth@cygnus.com),
- Jakub Jelinek (jj@ultra.linux.cz),
- David S. Miller (davem@redhat.com) and
- Peter Maydell (pmaydell@chiark.greenend.org.uk).
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
- License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with the GNU C Library; see the file COPYING.LIB. If
- not, write to the Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
-
-#ifndef _SFP_MACHINE_H
-#define _SFP_MACHINE_H
-
-
-#define _FP_W_TYPE_SIZE 32
-#define _FP_W_TYPE unsigned int
-#define _FP_WS_TYPE signed int
-#define _FP_I_TYPE int
-
-#define _FP_MUL_MEAT_S(R,X,Y) \
- _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
-#define _FP_MUL_MEAT_D(R,X,Y) \
- _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
-#define _FP_MUL_MEAT_Q(R,X,Y) \
- _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
-
-#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
-#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
-#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
-
-#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
-#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
-#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
-#define _FP_NANSIGN_S 0
-#define _FP_NANSIGN_D 0
-#define _FP_NANSIGN_Q 0
-
-#define _FP_KEEPNANFRACP 1
-
-/*
- * If one NaN is signaling and the other is not,
- * we choose that one, otherwise we choose X.
- */
-#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
- do { \
- if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
- && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
- { \
- R##_s = Y##_s; \
- _FP_FRAC_COPY_##wc(R,Y); \
- } \
- else \
- { \
- R##_s = X##_s; \
- _FP_FRAC_COPY_##wc(R,X); \
- } \
- R##_c = FP_CLS_NAN; \
- } while (0)
-
-/* Some assembly to speed things up. */
-#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) ({ \
- unsigned int __r2 = (x2) + (y2); \
- unsigned int __r1 = (x1); \
- unsigned int __r0 = (x0); \
- asm volatile( \
- " alr %2,%3\n" \
- " brc 12,0f\n" \
- " lhi 0,1\n" \
- " alr %1,0\n" \
- " brc 12,0f\n" \
- " alr %0,0\n" \
- "0:" \
- : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
- : "d" (y0), "i" (1) : "cc", "0" ); \
- asm volatile( \
- " alr %1,%2\n" \
- " brc 12,0f\n" \
- " ahi %0,1\n" \
- "0:" \
- : "+&d" (__r2), "+&d" (__r1) \
- : "d" (y1) : "cc"); \
- (r2) = __r2; \
- (r1) = __r1; \
- (r0) = __r0; \
-})
-
-#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) ({ \
- unsigned int __r2 = (x2) - (y2); \
- unsigned int __r1 = (x1); \
- unsigned int __r0 = (x0); \
- asm volatile( \
- " slr %2,%3\n" \
- " brc 3,0f\n" \
- " lhi 0,1\n" \
- " slr %1,0\n" \
- " brc 3,0f\n" \
- " slr %0,0\n" \
- "0:" \
- : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
- : "d" (y0) : "cc", "0"); \
- asm volatile( \
- " slr %1,%2\n" \
- " brc 3,0f\n" \
- " ahi %0,-1\n" \
- "0:" \
- : "+&d" (__r2), "+&d" (__r1) \
- : "d" (y1) : "cc"); \
- (r2) = __r2; \
- (r1) = __r1; \
- (r0) = __r0; \
-})
-
-#define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0)
-
-/* Obtain the current rounding mode. */
-#define FP_ROUNDMODE mode
-
-/* Exception flags. */
-#define FP_EX_INVALID 0x800000
-#define FP_EX_DIVZERO 0x400000
-#define FP_EX_OVERFLOW 0x200000
-#define FP_EX_UNDERFLOW 0x100000
-#define FP_EX_INEXACT 0x080000
-
-/* We write the results always */
-#define FP_INHIBIT_RESULTS 0
-
-#endif
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
deleted file mode 100644
index c8b7cf9d6279..000000000000
--- a/arch/s390/include/asm/sfp-util.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
- unsigned int __sh = (ah); \
- unsigned int __sl = (al); \
- asm volatile( \
- " alr %1,%3\n" \
- " brc 12,0f\n" \
- " ahi %0,1\n" \
- "0: alr %0,%2" \
- : "+&d" (__sh), "+d" (__sl) \
- : "d" (bh), "d" (bl) : "cc"); \
- (sh) = __sh; \
- (sl) = __sl; \
-})
-
-#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
- unsigned int __sh = (ah); \
- unsigned int __sl = (al); \
- asm volatile( \
- " slr %1,%3\n" \
- " brc 3,0f\n" \
- " ahi %0,-1\n" \
- "0: slr %0,%2" \
- : "+&d" (__sh), "+d" (__sl) \
- : "d" (bh), "d" (bl) : "cc"); \
- (sh) = __sh; \
- (sl) = __sl; \
-})
-
-/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */
-#define umul_ppmm(wh, wl, u, v) ({ \
- unsigned int __wh = u; \
- unsigned int __wl = v; \
- asm volatile( \
- " ltr 1,%0\n" \
- " mr 0,%1\n" \
- " jnm 0f\n" \
- " alr 0,%1\n" \
- "0: ltr %1,%1\n" \
- " jnm 1f\n" \
- " alr 0,%0\n" \
- "1: lr %0,0\n" \
- " lr %1,1\n" \
- : "+d" (__wh), "+d" (__wl) \
- : : "0", "1", "cc"); \
- wh = __wh; \
- wl = __wl; \
-})
-
-#define udiv_qrnnd(q, r, n1, n0, d) \
- do { unsigned long __n; \
- unsigned int __r, __d; \
- __n = ((unsigned long)(n1) << 32) + n0; \
- __d = (d); \
- (q) = __n / __d; \
- (r) = __n % __d; \
- } while (0)
-
-#define UDIV_NEEDS_NORMALIZATION 0
-
-#define abort() BUG()
-
-#define __BYTE_ORDER __BIG_ENDIAN
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 1c8f33fca356..72df5f2de6b0 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -37,8 +37,8 @@
#ifndef __ASSEMBLY__
-static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
- u32 *status)
+static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
+ u32 *status)
{
register unsigned long reg1 asm ("1") = parm;
int cc;
@@ -48,8 +48,19 @@ static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
+ *status = reg1;
+ return cc;
+}
+
+static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
+ u32 *status)
+{
+ u32 _status;
+ int cc;
+
+ cc = ____pcpu_sigp(addr, order, parm, &_status);
if (status && cc == 1)
- *status = reg1;
+ *status = _status;
return cc;
}
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 63ebf37d3143..7e9e09f600fa 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -10,6 +10,8 @@
#define __ASM_SPINLOCK_H
#include <linux/smp.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
@@ -97,6 +99,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
arch_spin_relax(lock);
+ smp_acquire__after_ctrl_dep();
}
/*
diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h
new file mode 100644
index 000000000000..7689727585b2
--- /dev/null
+++ b/arch/s390/include/asm/stp.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright IBM Corp. 2006
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+#ifndef __S390_STP_H
+#define __S390_STP_H
+
+/* notifier for syncs */
+extern struct atomic_notifier_head s390_epoch_delta_notifier;
+
+/* STP interruption parameter */
+struct stp_irq_parm {
+ unsigned int _pad0 : 14;
+ unsigned int tsc : 1; /* Timing status change */
+ unsigned int lac : 1; /* Link availability change */
+ unsigned int tcpc : 1; /* Time control parameter change */
+ unsigned int _pad2 : 15;
+} __attribute__ ((packed));
+
+#define STP_OP_SYNC 1
+#define STP_OP_CTRL 3
+
+struct stp_sstpi {
+ unsigned int rsvd0;
+ unsigned int rsvd1 : 8;
+ unsigned int stratum : 8;
+ unsigned int vbits : 16;
+ unsigned int leaps : 16;
+ unsigned int tmd : 4;
+ unsigned int ctn : 4;
+ unsigned int rsvd2 : 3;
+ unsigned int c : 1;
+ unsigned int tst : 4;
+ unsigned int tzo : 16;
+ unsigned int dsto : 16;
+ unsigned int ctrl : 16;
+ unsigned int rsvd3 : 16;
+ unsigned int tto;
+ unsigned int rsvd4;
+ unsigned int ctnid[3];
+ unsigned int rsvd5;
+ unsigned int todoff[4];
+ unsigned int rsvd6[48];
+} __attribute__ ((packed));
+
+/* Functions needed by the machine check handler */
+int stp_sync_check(void);
+int stp_island_check(void);
+void stp_queue_work(void);
+
+#endif /* __S390_STP_H */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index dcb6312a0b91..0bb08f341c09 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -52,6 +52,70 @@ static inline void store_clock_comparator(__u64 *time)
void clock_comparator_work(void);
+void __init ptff_init(void);
+
+extern unsigned char ptff_function_mask[16];
+extern unsigned long lpar_offset;
+extern unsigned long initial_leap_seconds;
+
+/* Function codes for the ptff instruction. */
+#define PTFF_QAF 0x00 /* query available functions */
+#define PTFF_QTO 0x01 /* query tod offset */
+#define PTFF_QSI 0x02 /* query steering information */
+#define PTFF_QUI 0x04 /* query UTC information */
+#define PTFF_ATO 0x40 /* adjust tod offset */
+#define PTFF_STO 0x41 /* set tod offset */
+#define PTFF_SFS 0x42 /* set fine steering rate */
+#define PTFF_SGS 0x43 /* set gross steering rate */
+
+/* Query TOD offset result */
+struct ptff_qto {
+ unsigned long long physical_clock;
+ unsigned long long tod_offset;
+ unsigned long long logical_tod_offset;
+ unsigned long long tod_epoch_difference;
+} __packed;
+
+static inline int ptff_query(unsigned int nr)
+{
+ unsigned char *ptr;
+
+ ptr = ptff_function_mask + (nr >> 3);
+ return (*ptr & (0x80 >> (nr & 7))) != 0;
+}
+
+/* Query UTC information result */
+struct ptff_qui {
+ unsigned int tm : 2;
+ unsigned int ts : 2;
+ unsigned int : 28;
+ unsigned int pad_0x04;
+ unsigned long leap_event;
+ short old_leap;
+ short new_leap;
+ unsigned int pad_0x14;
+ unsigned long prt[5];
+ unsigned long cst[3];
+ unsigned int skew;
+ unsigned int pad_0x5c[41];
+} __packed;
+
+static inline int ptff(void *ptff_block, size_t len, unsigned int func)
+{
+ typedef struct { char _[len]; } addrtype;
+ register unsigned int reg0 asm("0") = func;
+ register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
+ int rc;
+
+ asm volatile(
+ " .word 0x0104\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (rc), "+m" (*(addrtype *) ptff_block)
+ : "d" (reg0), "d" (reg1) : "cc");
+ return rc;
+}
+
static inline unsigned long long local_tick_disable(void)
{
unsigned long long old;
@@ -105,7 +169,7 @@ static inline cycles_t get_cycles(void)
return (cycles_t) get_tod_clock() >> 2;
}
-int get_sync_clock(unsigned long long *clock);
+int get_phys_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 7a92e69c50bc..15711de10403 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -87,10 +87,10 @@ static inline void tlb_finish_mmu(struct mmu_gather *tlb,
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache.
*/
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
+ return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
@@ -98,6 +98,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
free_page_and_swap_cache(page);
}
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return tlb_remove_page(tlb, page);
+}
+
/*
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index a2e6ef32e054..1a691ef740cf 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
/*
* Flush all TLB entries on the local CPU.
@@ -44,17 +45,9 @@ void smp_ptlb_all(void);
*/
static inline void __tlb_flush_global(void)
{
- register unsigned long reg2 asm("2");
- register unsigned long reg3 asm("3");
- register unsigned long reg4 asm("4");
- long dummy;
-
- dummy = 0;
- reg2 = reg3 = 0;
- reg4 = ((unsigned long) &dummy) + 1;
- asm volatile(
- " csp %0,%2"
- : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
+ unsigned int dummy = 0;
+
+ csp(&dummy, 0, 0);
}
/*
@@ -64,7 +57,7 @@ static inline void __tlb_flush_global(void)
static inline void __tlb_flush_full(struct mm_struct *mm)
{
preempt_disable();
- atomic_add(0x10000, &mm->context.attach_count);
+ atomic_inc(&mm->context.flush_count);
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
/* Local TLB flush */
__tlb_flush_local();
@@ -76,21 +69,19 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
- atomic_sub(0x10000, &mm->context.attach_count);
+ atomic_dec(&mm->context.flush_count);
preempt_enable();
}
/*
- * Flush TLB entries for a specific ASCE on all CPUs.
+ * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
+ * when more than one asce (e.g. gmap) ran on this mm.
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
- int active, count;
-
preempt_disable();
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ atomic_inc(&mm->context.flush_count);
+ if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
__tlb_flush_idte_local(asce);
} else {
@@ -103,7 +94,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
- atomic_sub(0x10000, &mm->context.attach_count);
+ atomic_dec(&mm->context.flush_count);
preempt_enable();
}
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 6b53962e807e..f15f5571ca2b 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -14,10 +14,12 @@ struct cpu_topology_s390 {
unsigned short core_id;
unsigned short socket_id;
unsigned short book_id;
+ unsigned short drawer_id;
unsigned short node_id;
cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
+ cpumask_t drawer_mask;
};
DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
@@ -30,6 +32,8 @@ DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
+#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id)
+#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
#define mc_capable() 1
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index e0900ddf91dd..9b49cf1daa8f 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -151,8 +151,65 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
__rc; \
})
-#define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL)
-#define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL)
+static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+{
+ unsigned long spec = 0x810000UL;
+ int rc;
+
+ switch (size) {
+ case 1:
+ rc = __put_get_user_asm((unsigned char __user *)ptr,
+ (unsigned char *)x,
+ size, spec);
+ break;
+ case 2:
+ rc = __put_get_user_asm((unsigned short __user *)ptr,
+ (unsigned short *)x,
+ size, spec);
+ break;
+ case 4:
+ rc = __put_get_user_asm((unsigned int __user *)ptr,
+ (unsigned int *)x,
+ size, spec);
+ break;
+ case 8:
+ rc = __put_get_user_asm((unsigned long __user *)ptr,
+ (unsigned long *)x,
+ size, spec);
+ break;
+ };
+ return rc;
+}
+
+static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
+{
+ unsigned long spec = 0x81UL;
+ int rc;
+
+ switch (size) {
+ case 1:
+ rc = __put_get_user_asm((unsigned char *)x,
+ (unsigned char __user *)ptr,
+ size, spec);
+ break;
+ case 2:
+ rc = __put_get_user_asm((unsigned short *)x,
+ (unsigned short __user *)ptr,
+ size, spec);
+ break;
+ case 4:
+ rc = __put_get_user_asm((unsigned int *)x,
+ (unsigned int __user *)ptr,
+ size, spec);
+ break;
+ case 8:
+ rc = __put_get_user_asm((unsigned long *)x,
+ (unsigned long __user *)ptr,
+ size, spec);
+ break;
+ };
+ return rc;
+}
#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
@@ -191,7 +248,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
__put_user_bad(); \
break; \
} \
- __pu_err; \
+ __builtin_expect(__pu_err, 0); \
})
#define put_user(x, ptr) \
@@ -240,7 +297,7 @@ int __put_user_bad(void) __attribute__((noreturn));
__get_user_bad(); \
break; \
} \
- __gu_err; \
+ __builtin_expect(__gu_err, 0); \
})
#define get_user(x, ptr) \
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index a150f4fabe43..77630c74f13b 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -359,9 +359,9 @@ typedef struct
per_cr_bits bits;
} control_regs;
/*
- * Use these flags instead of setting em_instruction_fetch
- * directly they are used so that single stepping can be
- * switched on & off while not affecting other tracing
+ * The single_step and instruction_fetch bits are obsolete,
+ * the kernel always sets them to zero. To enable single
+ * stepping use ptrace(PTRACE_SINGLESTEP) instead.
*/
unsigned single_step : 1;
unsigned instruction_fetch : 1;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2f5586ab8a6a..f37be37edd3a 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -2,6 +2,9 @@
# Makefile for the linux kernel.
#
+KCOV_INSTRUMENT_early.o := n
+KCOV_INSTRUMENT_sclp.o := n
+
ifdef CONFIG_FUNCTION_TRACER
# Don't trace early setup code and tracing code
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
@@ -45,7 +48,7 @@ obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
-obj-y += runtime_instr.o cache.o dumpstack.o
+obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
obj-y += entry.o reipl.o relocate_kernel.o
extra-y += head.o head64.o vmlinux.lds
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 77a84bd78be2..c8a83276a4dc 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -99,12 +99,7 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
static inline unsigned long ecag(int ai, int li, int ti)
{
- unsigned long cmd, val;
-
- cmd = ai << 4 | li << 1 | ti;
- asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
- : "=d" (val) : "a" (cmd));
- return val;
+ return __ecag(ECAG_CACHE_ATTRIBUTE, ai << 4 | li << 1 | ti);
}
static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 8cb9bfdd3ea8..43446fa2a4e5 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -26,7 +26,6 @@
#include <asm/dis.h>
#include <asm/io.h>
#include <linux/atomic.h>
-#include <asm/mathemu.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 69f9908ac44c..6693383bc01b 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -78,14 +78,10 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
sp = __dump_trace(func, data, sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size);
- if (task)
- __dump_trace(func, data, sp,
- (unsigned long)task_stack_page(task),
- (unsigned long)task_stack_page(task) + THREAD_SIZE);
- else
- __dump_trace(func, data, sp,
- S390_lowcore.thread_info,
- S390_lowcore.thread_info + THREAD_SIZE);
+ task = task ?: current;
+ __dump_trace(func, data, sp,
+ (unsigned long)task_stack_page(task),
+ (unsigned long)task_stack_page(task) + THREAD_SIZE);
}
EXPORT_SYMBOL_GPL(dump_trace);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index a0684de5a93b..717b03aa16b5 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -231,6 +231,26 @@ static noinline __init void detect_machine_type(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
+static noinline __init void setup_arch_string(void)
+{
+ struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
+
+ if (stsi(mach, 1, 1, 1))
+ return;
+ EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
+ EBCASC(mach->type, sizeof(mach->type));
+ EBCASC(mach->model, sizeof(mach->model));
+ EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
+ dump_stack_set_arch_desc("%-16.16s %-4.4s %-16.16s %-16.16s (%s)",
+ mach->manufacturer,
+ mach->type,
+ mach->model,
+ mach->model_capacity,
+ MACHINE_IS_LPAR ? "LPAR" :
+ MACHINE_IS_VM ? "z/VM" :
+ MACHINE_IS_KVM ? "KVM" : "unknown");
+}
+
static __init void setup_topology(void)
{
int max_mnest;
@@ -447,11 +467,13 @@ void __init startup_init(void)
ipl_save_parameters();
rescue_initrd();
clear_bss_section();
+ ptff_init();
init_kernel_storage_key();
lockdep_off();
setup_lowcore_early();
setup_facility_list();
detect_machine_type();
+ setup_arch_string();
ipl_update_parameters();
setup_boot_command_line();
create_kernel_nss();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 2d47f9cfcb36..c51650a1ed16 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -163,6 +163,16 @@ _PIF_WORK = (_PIF_PER_TRAP)
.endm
.section .kprobes.text, "ax"
+.Ldummy:
+ /*
+ * This nop exists only in order to avoid that __switch_to starts at
+ * the beginning of the kprobes text section. In that case we would
+ * have several symbols at the same address. E.g. objdump would take
+ * an arbitrary symbol name when disassembling this code.
+ * With the added nop in between the __switch_to symbol is unique
+ * again.
+ */
+ nop 0
/*
* Scheduler resume function, called by switch_to
@@ -175,7 +185,6 @@ ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lgr %r1,%r2
aghi %r1,__TASK_thread # thread_struct of prev task
- lg %r4,__TASK_thread_info(%r2) # get thread_info of prev
lg %r5,__TASK_thread_info(%r3) # get thread_info of next
stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
lgr %r1,%r3
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
new file mode 100644
index 000000000000..81d1d1887507
--- /dev/null
+++ b/arch/s390/kernel/fpu.c
@@ -0,0 +1,249 @@
+/*
+ * In-kernel vector facility support functions
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <asm/fpu/types.h>
+#include <asm/fpu/api.h>
+
+/*
+ * Per-CPU variable to maintain FPU register ranges that are in use
+ * by the kernel.
+ */
+static DEFINE_PER_CPU(u32, kernel_fpu_state);
+
+#define KERNEL_FPU_STATE_MASK (KERNEL_FPU_MASK|KERNEL_FPC)
+
+
+void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
+{
+ if (!__this_cpu_read(kernel_fpu_state)) {
+ /*
+ * Save user space FPU state and register contents. Multiple
+ * calls because of interruptions do not matter and return
+ * immediately. This also sets CIF_FPU to lazy restore FP/VX
+ * register contents when returning to user space.
+ */
+ save_fpu_regs();
+ }
+
+ /* Update flags to use the vector facility for KERNEL_FPR */
+ if (MACHINE_HAS_VX && (state->mask & KERNEL_FPR)) {
+ flags |= KERNEL_VXR_LOW | KERNEL_FPC;
+ flags &= ~KERNEL_FPR;
+ }
+
+ /* Save and update current kernel VX state */
+ state->mask = __this_cpu_read(kernel_fpu_state);
+ __this_cpu_or(kernel_fpu_state, flags & KERNEL_FPU_STATE_MASK);
+
+ /*
+ * If this is the first call to __kernel_fpu_begin(), no additional
+ * work is required.
+ */
+ if (!(state->mask & KERNEL_FPU_STATE_MASK))
+ return;
+
+ /*
+ * If KERNEL_FPR is still set, the vector facility is not available
+ * and, thus, save floating-point control and registers only.
+ */
+ if (state->mask & KERNEL_FPR) {
+ asm volatile("stfpc %0" : "=Q" (state->fpc));
+ asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
+ asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
+ asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
+ asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
+ asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
+ asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
+ asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
+ asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
+ asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
+ asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
+ asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
+ asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
+ asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
+ asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
+ asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
+ asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
+ return;
+ }
+
+ /*
+ * If this is a nested call to __kernel_fpu_begin(), check the saved
+ * state mask to save and later restore the vector registers that
+ * are already in use. Let's start with checking floating-point
+ * controls.
+ */
+ if (state->mask & KERNEL_FPC)
+ asm volatile("stfpc %0" : "=m" (state->fpc));
+
+ /* Test and save vector registers */
+ asm volatile (
+ /*
+ * Test if any vector register must be saved and, if so,
+ * test if all register can be saved.
+ */
+ " tmll %[m],15\n" /* KERNEL_VXR_MASK */
+ " jz 20f\n" /* no work -> done */
+ " la 1,%[vxrs]\n" /* load save area */
+ " jo 18f\n" /* -> save V0..V31 */
+
+ /*
+ * Test if V8..V23 can be saved at once... this speeds up
+ * for KERNEL_fpu_MID only. Otherwise continue to split the
+ * range of vector registers into two halves and test them
+ * separately.
+ */
+ " tmll %[m],6\n" /* KERNEL_VXR_MID */
+ " jo 17f\n" /* -> save V8..V23 */
+
+ /* Test and save the first half of 16 vector registers */
+ "1: tmll %[m],3\n" /* KERNEL_VXR_LOW */
+ " jz 10f\n" /* -> KERNEL_VXR_HIGH */
+ " jo 2f\n" /* 11 -> save V0..V15 */
+ " brc 4,3f\n" /* 01 -> save V0..V7 */
+ " brc 2,4f\n" /* 10 -> save V8..V15 */
+
+ /* Test and save the second half of 16 vector registers */
+ "10: tmll %[m],12\n" /* KERNEL_VXR_HIGH */
+ " jo 19f\n" /* 11 -> save V16..V31 */
+ " brc 4,11f\n" /* 01 -> save V16..V23 */
+ " brc 2,12f\n" /* 10 -> save V24..V31 */
+ " j 20f\n" /* 00 -> done */
+
+ /*
+ * Below are the vstm combinations to save multiple vector
+ * registers at once.
+ */
+ "2: .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
+ " j 10b\n" /* -> VXR_HIGH */
+ "3: .word 0xe707,0x1000,0x003e\n" /* vstm 0,7,0(1) */
+ " j 10b\n" /* -> VXR_HIGH */
+ "4: .word 0xe78f,0x1080,0x003e\n" /* vstm 8,15,128(1) */
+ " j 10b\n" /* -> VXR_HIGH */
+ "\n"
+ "11: .word 0xe707,0x1100,0x0c3e\n" /* vstm 16,23,256(1) */
+ " j 20f\n" /* -> done */
+ "12: .word 0xe78f,0x1180,0x0c3e\n" /* vstm 24,31,384(1) */
+ " j 20f\n" /* -> done */
+ "\n"
+ "17: .word 0xe787,0x1080,0x043e\n" /* vstm 8,23,128(1) */
+ " nill %[m],249\n" /* m &= ~VXR_MID */
+ " j 1b\n" /* -> VXR_LOW */
+ "\n"
+ "18: .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
+ "19: .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
+ "20:"
+ : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
+ : [m] "d" (state->mask)
+ : "1", "cc");
+}
+EXPORT_SYMBOL(__kernel_fpu_begin);
+
+void __kernel_fpu_end(struct kernel_fpu *state)
+{
+ /* Just update the per-CPU state if there is nothing to restore */
+ if (!(state->mask & KERNEL_FPU_STATE_MASK))
+ goto update_fpu_state;
+
+ /*
+ * If KERNEL_FPR is specified, the vector facility is not available
+ * and, thus, restore floating-point control and registers only.
+ */
+ if (state->mask & KERNEL_FPR) {
+ asm volatile("lfpc %0" : : "Q" (state->fpc));
+ asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
+ asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
+ asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
+ asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
+ asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
+ asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
+ asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
+ asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
+ asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
+ asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
+ asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
+ asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
+ asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
+ asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
+ asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
+ asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
+ goto update_fpu_state;
+ }
+
+ /* Test and restore floating-point controls */
+ if (state->mask & KERNEL_FPC)
+ asm volatile("lfpc %0" : : "Q" (state->fpc));
+
+ /* Test and restore (load) vector registers */
+ asm volatile (
+ /*
+ * Test if any vector registers must be loaded and, if so,
+ * test if all registers can be loaded at once.
+ */
+ " tmll %[m],15\n" /* KERNEL_VXR_MASK */
+ " jz 20f\n" /* no work -> done */
+ " la 1,%[vxrs]\n" /* load load area */
+ " jo 18f\n" /* -> load V0..V31 */
+
+ /*
+ * Test if V8..V23 can be restored at once... this speeds up
+ * for KERNEL_VXR_MID only. Otherwise continue to split the
+ * range of vector registers into two halves and test them
+ * separately.
+ */
+ " tmll %[m],6\n" /* KERNEL_VXR_MID */
+ " jo 17f\n" /* -> load V8..V23 */
+
+ /* Test and load the first half of 16 vector registers */
+ "1: tmll %[m],3\n" /* KERNEL_VXR_LOW */
+ " jz 10f\n" /* -> KERNEL_VXR_HIGH */
+ " jo 2f\n" /* 11 -> load V0..V15 */
+ " brc 4,3f\n" /* 01 -> load V0..V7 */
+ " brc 2,4f\n" /* 10 -> load V8..V15 */
+
+ /* Test and load the second half of 16 vector registers */
+ "10: tmll %[m],12\n" /* KERNEL_VXR_HIGH */
+ " jo 19f\n" /* 11 -> load V16..V31 */
+ " brc 4,11f\n" /* 01 -> load V16..V23 */
+ " brc 2,12f\n" /* 10 -> load V24..V31 */
+ " j 20f\n" /* 00 -> done */
+
+ /*
+ * Below are the vstm combinations to load multiple vector
+ * registers at once.
+ */
+ "2: .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
+ " j 10b\n" /* -> VXR_HIGH */
+ "3: .word 0xe707,0x1000,0x0036\n" /* vlm 0,7,0(1) */
+ " j 10b\n" /* -> VXR_HIGH */
+ "4: .word 0xe78f,0x1080,0x0036\n" /* vlm 8,15,128(1) */
+ " j 10b\n" /* -> VXR_HIGH */
+ "\n"
+ "11: .word 0xe707,0x1100,0x0c36\n" /* vlm 16,23,256(1) */
+ " j 20f\n" /* -> done */
+ "12: .word 0xe78f,0x1180,0x0c36\n" /* vlm 24,31,384(1) */
+ " j 20f\n" /* -> done */
+ "\n"
+ "17: .word 0xe787,0x1080,0x0436\n" /* vlm 8,23,128(1) */
+ " nill %[m],249\n" /* m &= ~VXR_MID */
+ " j 1b\n" /* -> VXR_LOW */
+ "\n"
+ "18: .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
+ "19: .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
+ "20:"
+ :
+ : [vxrs] "Q" (*(struct vx_array *) &state->vxrs),
+ [m] "d" (state->mask)
+ : "1", "cc");
+
+update_fpu_state:
+ /* Update current kernel VX state */
+ __this_cpu_write(kernel_fpu_state, state->mask);
+}
+EXPORT_SYMBOL(__kernel_fpu_end);
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f20abdb5630a..295bfb7124bc 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -121,9 +121,9 @@ static char *dump_type_str(enum dump_type type)
* Must be in data section since the bss section
* is not cleared when these are accessed.
*/
-static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
-static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
-u32 ipl_flags __attribute__((__section__(".data"))) = 0;
+static u8 ipl_ssid __section(.data) = 0;
+static u16 ipl_devno __section(.data) = 0;
+u32 ipl_flags __section(.data) = 0;
enum ipl_method {
REIPL_METHOD_CCW_CIO,
@@ -174,7 +174,7 @@ static inline int __diag308(unsigned long subcode, void *addr)
asm volatile(
" diag %0,%2,0x308\n"
- "0:\n"
+ "0: nopr %%r7\n"
EX_TABLE(0b,0b)
: "+d" (_addr), "+d" (_rc)
: "d" (subcode) : "cc", "memory");
@@ -563,7 +563,7 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
- diag308(DIAG308_IPL, NULL);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
else if (ipl_info.type == IPL_TYPE_CCW)
@@ -1085,21 +1085,24 @@ static void __reipl_run(void *unused)
break;
case REIPL_METHOD_CCW_DIAG:
diag308(DIAG308_SET, reipl_block_ccw);
- diag308(DIAG308_IPL, NULL);
+ if (MACHINE_IS_LPAR)
+ diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
+ else
+ diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case REIPL_METHOD_FCP_RW_DIAG:
diag308(DIAG308_SET, reipl_block_fcp);
- diag308(DIAG308_IPL, NULL);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case REIPL_METHOD_FCP_RO_DIAG:
- diag308(DIAG308_IPL, NULL);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case REIPL_METHOD_FCP_RO_VM:
__cpcmd("IPL", NULL, 0, NULL);
break;
case REIPL_METHOD_NSS_DIAG:
diag308(DIAG308_SET, reipl_block_nss);
- diag308(DIAG308_IPL, NULL);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case REIPL_METHOD_NSS:
get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS);
@@ -1108,7 +1111,7 @@ static void __reipl_run(void *unused)
case REIPL_METHOD_DEFAULT:
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
- diag308(DIAG308_IPL, NULL);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case REIPL_METHOD_FCP_DUMP:
break;
@@ -1423,7 +1426,7 @@ static void diag308_dump(void *dump_block)
{
diag308(DIAG308_SET, dump_block);
while (1) {
- if (diag308(DIAG308_DUMP, NULL) != 0x302)
+ if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
break;
udelay_simple(USEC_PER_SEC);
}
@@ -2064,12 +2067,5 @@ void s390_reset_system(void)
S390_lowcore.program_new_psw.addr =
(unsigned long) s390_base_pgm_handler;
- /*
- * Clear subchannel ID and number to signal new kernel that no CCW or
- * SCSI IPL has been done (for kexec and kdump)
- */
- S390_lowcore.subchannel_id = 0;
- S390_lowcore.subchannel_nr = 0;
-
do_reset_calls();
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index c373a1d41d10..285d6561076d 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -127,9 +127,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
}
- if (index < NR_IRQS) {
- if (index >= NR_IRQS_BASE)
- goto out;
+ if (index < NR_IRQS_BASE) {
seq_printf(p, "%s: ", irqclass_main_desc[index].name);
irq = irqclass_main_desc[index].irq;
for_each_online_cpu(cpu)
@@ -137,6 +135,9 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
goto out;
}
+ if (index > NR_IRQS_BASE)
+ goto out;
+
for (index = 0; index < NR_ARCH_IRQS; index++) {
seq_printf(p, "%s: ", irqclass_sub_desc[index].name);
irq = irqclass_sub_desc[index].irq;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 0e64f08d3d69..3074c1d83829 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -24,6 +24,7 @@
#include <asm/diag.h>
#include <asm/elf.h>
#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
#include <asm/os_info.h>
#include <asm/switch_to.h>
@@ -60,8 +61,6 @@ static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
static int __init machine_kdump_pm_init(void)
{
pm_notifier(machine_kdump_pm_cb, 0);
- /* Create initial mapping for crashkernel memory */
- arch_kexec_unprotect_crashkres();
return 0;
}
arch_initcall(machine_kdump_pm_init);
@@ -150,42 +149,40 @@ static int kdump_csum_valid(struct kimage *image)
#ifdef CONFIG_CRASH_DUMP
-/*
- * Map or unmap crashkernel memory
- */
-static void crash_map_pages(int enable)
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
{
- unsigned long size = resource_size(&crashk_res);
-
- BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN ||
- size % KEXEC_CRASH_MEM_ALIGN);
- if (enable)
- vmem_add_mapping(crashk_res.start, size);
- else {
- vmem_remove_mapping(crashk_res.start, size);
- if (size)
- os_info_crashkernel_add(crashk_res.start, size);
- else
- os_info_crashkernel_add(0, 0);
- }
+ unsigned long addr, size;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE)
+ free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
+ size = begin - crashk_res.start;
+ if (size)
+ os_info_crashkernel_add(crashk_res.start, size);
+ else
+ os_info_crashkernel_add(0, 0);
+}
+
+static void crash_protect_pages(int protect)
+{
+ unsigned long size;
+
+ if (!crashk_res.end)
+ return;
+ size = resource_size(&crashk_res);
+ if (protect)
+ set_memory_ro(crashk_res.start, size >> PAGE_SHIFT);
+ else
+ set_memory_rw(crashk_res.start, size >> PAGE_SHIFT);
}
-/*
- * Unmap crashkernel memory
- */
void arch_kexec_protect_crashkres(void)
{
- if (crashk_res.end)
- crash_map_pages(0);
+ crash_protect_pages(1);
}
-/*
- * Map crashkernel memory
- */
void arch_kexec_unprotect_crashkres(void)
{
- if (crashk_res.end)
- crash_map_pages(1);
+ crash_protect_pages(0);
}
#endif
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 07302ce37648..29376f0e725c 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <asm/lowcore.h>
#include <asm/smp.h>
-#include <asm/etr.h>
+#include <asm/stp.h>
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/crw.h>
@@ -27,7 +27,6 @@ struct mcck_struct {
unsigned int kill_task : 1;
unsigned int channel_report : 1;
unsigned int warning : 1;
- unsigned int etr_queue : 1;
unsigned int stp_queue : 1;
unsigned long mcck_code;
};
@@ -82,8 +81,6 @@ void s390_handle_mcck(void)
if (xchg(&mchchk_wng_posted, 1) == 0)
kill_cad_pid(SIGPWR, 1);
}
- if (mcck.etr_queue)
- etr_queue_work();
if (mcck.stp_queue)
stp_queue_work();
if (mcck.kill_task) {
@@ -241,8 +238,6 @@ static int notrace s390_validate_registers(union mci mci)
#define ED_STP_ISLAND 6 /* External damage STP island check */
#define ED_STP_SYNC 7 /* External damage STP sync check */
-#define ED_ETR_SYNC 12 /* External damage ETR sync check */
-#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
/*
* machine check handler.
@@ -325,15 +320,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
}
if (mci.ed && mci.ec) {
/* External damage */
- if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
- mcck->etr_queue |= etr_sync_check();
- if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
- mcck->etr_queue |= etr_switch_to_local();
if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
mcck->stp_queue |= stp_sync_check();
if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
mcck->stp_queue |= stp_island_check();
- if (mcck->etr_queue || mcck->stp_queue)
+ if (mcck->stp_queue)
set_cpu_flag(CIF_MCCK_PENDING);
}
if (mci.se)
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 59215c518f37..037c2a253ae4 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -649,6 +649,8 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
/* Performance monitoring unit for s390x */
static struct pmu cpumf_pmu = {
+ .task_ctx_nr = perf_sw_context,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
.pmu_enable = cpumf_pmu_enable,
.pmu_disable = cpumf_pmu_disable,
.event_init = cpumf_pmu_event_init,
@@ -662,30 +664,22 @@ static struct pmu cpumf_pmu = {
.cancel_txn = cpumf_pmu_cancel_txn,
};
-static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
- void *hcpu)
+static int cpumf_pmf_setup(unsigned int cpu, int flags)
{
- int flags;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- flags = PMC_INIT;
- local_irq_disable();
- setup_pmc_cpu(&flags);
- local_irq_enable();
- break;
- case CPU_DOWN_PREPARE:
- flags = PMC_RELEASE;
- local_irq_disable();
- setup_pmc_cpu(&flags);
- local_irq_enable();
- break;
- default:
- break;
- }
+ local_irq_disable();
+ setup_pmc_cpu(&flags);
+ local_irq_enable();
+ return 0;
+}
- return NOTIFY_OK;
+static int s390_pmu_online_cpu(unsigned int cpu)
+{
+ return cpumf_pmf_setup(cpu, PMC_INIT);
+}
+
+static int s390_pmu_offline_cpu(unsigned int cpu)
+{
+ return cpumf_pmf_setup(cpu, PMC_RELEASE);
}
static int __init cpumf_pmu_init(void)
@@ -705,25 +699,19 @@ static int __init cpumf_pmu_init(void)
if (rc) {
pr_err("Registering for CPU-measurement alerts "
"failed with rc=%i\n", rc);
- goto out;
+ return rc;
}
- /* The CPU measurement counter facility does not have overflow
- * interrupts to do sampling. Sampling must be provided by
- * external means, for example, by timers.
- */
- cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) {
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
- goto out;
+ return rc;
}
- perf_cpu_notifier(cpumf_pmu_notifier);
-out:
- return rc;
+ return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
+ "AP_PERF_S390_CF_ONLINE",
+ s390_pmu_online_cpu, s390_pmu_offline_cpu);
}
early_initcall(cpumf_pmu_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index a8e832166417..fcc634c1479a 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -601,17 +601,12 @@ static void release_pmc_hardware(void)
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
on_each_cpu(setup_pmc_cpu, &flags, 1);
- perf_release_sampling();
}
static int reserve_pmc_hardware(void)
{
int flags = PMC_INIT;
- int err;
- err = perf_reserve_sampling();
- if (err)
- return err;
on_each_cpu(setup_pmc_cpu, &flags, 1);
if (flags & PMC_FAILURE) {
release_pmc_hardware();
@@ -979,12 +974,15 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
struct pt_regs regs;
struct perf_sf_sde_regs *sde_regs;
struct perf_sample_data data;
- struct perf_raw_record raw;
+ struct perf_raw_record raw = {
+ .frag = {
+ .size = sfr->size,
+ .data = sfr,
+ },
+ };
/* Setup perf sample */
perf_sample_data_init(&data, 0, event->hw.last_period);
- raw.size = sfr->size;
- raw.data = sfr;
data.raw = &raw;
/* Setup pt_regs to look like an CPU-measurement external interrupt
@@ -1506,37 +1504,28 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
sf_disable();
}
}
-
-static int cpumf_pmu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int cpusf_pmu_setup(unsigned int cpu, int flags)
{
- int flags;
-
/* Ignore the notification if no events are scheduled on the PMU.
* This might be racy...
*/
if (!atomic_read(&num_events))
- return NOTIFY_OK;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- flags = PMC_INIT;
- local_irq_disable();
- setup_pmc_cpu(&flags);
- local_irq_enable();
- break;
- case CPU_DOWN_PREPARE:
- flags = PMC_RELEASE;
- local_irq_disable();
- setup_pmc_cpu(&flags);
- local_irq_enable();
- break;
- default:
- break;
- }
+ return 0;
- return NOTIFY_OK;
+ local_irq_disable();
+ setup_pmc_cpu(&flags);
+ local_irq_enable();
+ return 0;
+}
+
+static int s390_pmu_sf_online_cpu(unsigned int cpu)
+{
+ return cpusf_pmu_setup(cpu, PMC_INIT);
+}
+
+static int s390_pmu_sf_offline_cpu(unsigned int cpu)
+{
+ return cpusf_pmu_setup(cpu, PMC_RELEASE);
}
static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
@@ -1636,7 +1625,9 @@ static int __init init_cpum_sampling_pmu(void)
cpumf_measurement_alert);
goto out;
}
- perf_cpu_notifier(cpumf_pmu_notifier);
+
+ cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE",
+ s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
out:
return err;
}
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 87035fa58bbe..17431f63de00 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -248,33 +248,3 @@ ssize_t cpumf_events_sysfs_show(struct device *dev,
return sprintf(page, "event=0x%04llx,name=%s\n",
pmu_attr->id, attr->attr.name);
}
-
-/* Reserve/release functions for sharing perf hardware */
-static DEFINE_SPINLOCK(perf_hw_owner_lock);
-static void *perf_sampling_owner;
-
-int perf_reserve_sampling(void)
-{
- int err;
-
- err = 0;
- spin_lock(&perf_hw_owner_lock);
- if (perf_sampling_owner) {
- pr_warn("The sampling facility is already reserved by %p\n",
- perf_sampling_owner);
- err = -EBUSY;
- } else
- perf_sampling_owner = __builtin_return_address(0);
- spin_unlock(&perf_hw_owner_lock);
- return err;
-}
-EXPORT_SYMBOL(perf_reserve_sampling);
-
-void perf_release_sampling(void)
-{
- spin_lock(&perf_hw_owner_lock);
- WARN_ON(!perf_sampling_owner);
- perf_sampling_owner = NULL;
- spin_unlock(&perf_hw_owner_lock);
-}
-EXPORT_SYMBOL(perf_release_sampling);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index de7451065c34..81d0808085e6 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -13,12 +13,45 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/diag.h>
+#include <asm/facility.h>
#include <asm/elf.h>
#include <asm/lowcore.h>
#include <asm/param.h>
#include <asm/smp.h>
-static DEFINE_PER_CPU(struct cpuid, cpu_id);
+struct cpu_info {
+ unsigned int cpu_mhz_dynamic;
+ unsigned int cpu_mhz_static;
+ struct cpuid cpu_id;
+};
+
+static DEFINE_PER_CPU(struct cpu_info, cpu_info);
+
+static bool machine_has_cpu_mhz;
+
+void __init cpu_detect_mhz_feature(void)
+{
+ if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
+ machine_has_cpu_mhz = 1;
+}
+
+static void update_cpu_mhz(void *arg)
+{
+ unsigned long mhz;
+ struct cpu_info *c;
+
+ mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
+ c = this_cpu_ptr(&cpu_info);
+ c->cpu_mhz_dynamic = mhz >> 32;
+ c->cpu_mhz_static = mhz & 0xffffffff;
+}
+
+void s390_update_cpu_mhz(void)
+{
+ s390_adjust_jiffies();
+ if (machine_has_cpu_mhz)
+ on_each_cpu(update_cpu_mhz, NULL, 0);
+}
void notrace cpu_relax(void)
{
@@ -35,9 +68,11 @@ EXPORT_SYMBOL(cpu_relax);
*/
void cpu_init(void)
{
- struct cpuid *id = this_cpu_ptr(&cpu_id);
+ struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
get_cpu_id(id);
+ if (machine_has_cpu_mhz)
+ update_cpu_mhz(NULL);
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
@@ -53,10 +88,7 @@ int cpu_have_feature(unsigned int num)
}
EXPORT_SYMBOL(cpu_have_feature);
-/*
- * show_cpuinfo - Get information on one CPU for use by procfs.
- */
-static int show_cpuinfo(struct seq_file *m, void *v)
+static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
@@ -65,34 +97,55 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static const char * const int_hwcap_str[] = {
"sie"
};
- unsigned long n = (unsigned long) v - 1;
- int i;
-
- if (!n) {
- s390_adjust_jiffies();
- seq_printf(m, "vendor_id : IBM/S390\n"
- "# processors : %i\n"
- "bogomips per cpu: %lu.%02lu\n",
- num_online_cpus(), loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ))%100);
- seq_puts(m, "features\t: ");
- for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
- if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
- seq_printf(m, "%s ", hwcap_str[i]);
- for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
- if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
- seq_printf(m, "%s ", int_hwcap_str[i]);
- seq_puts(m, "\n");
- show_cacheinfo(m);
- }
- if (cpu_online(n)) {
- struct cpuid *id = &per_cpu(cpu_id, n);
- seq_printf(m, "processor %li: "
+ int i, cpu;
+
+ seq_printf(m, "vendor_id : IBM/S390\n"
+ "# processors : %i\n"
+ "bogomips per cpu: %lu.%02lu\n",
+ num_online_cpus(), loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ))%100);
+ seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
+ seq_puts(m, "features\t: ");
+ for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
+ if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
+ seq_printf(m, "%s ", hwcap_str[i]);
+ for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
+ if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
+ seq_printf(m, "%s ", int_hwcap_str[i]);
+ seq_puts(m, "\n");
+ show_cacheinfo(m);
+ for_each_online_cpu(cpu) {
+ struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
+
+ seq_printf(m, "processor %d: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
- n, id->version, id->ident, id->machine);
+ cpu, id->version, id->ident, id->machine);
}
+}
+
+static void show_cpu_mhz(struct seq_file *m, unsigned long n)
+{
+ struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
+
+ seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
+ seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
+}
+
+/*
+ * show_cpuinfo - Get information on one CPU for use by procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long n = (unsigned long) v - 1;
+
+ if (!n)
+ show_cpu_summary(m, v);
+ if (!machine_has_cpu_mhz)
+ return 0;
+ seq_printf(m, "\ncpu number : %ld\n", n);
+ show_cpu_mhz(m, n);
return 0;
}
@@ -126,4 +179,3 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
-
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 49b1c13bf6c9..9336e824e2db 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -821,14 +821,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret = 0;
-
- /* Do the secure computing check first. */
- if (secure_computing()) {
- /* seccomp failures shouldn't expose any additional code. */
- ret = -1;
- goto out;
- }
+ unsigned long mask = -1UL;
/*
* The sysc_tracesys code in entry.S stored the system
@@ -843,17 +836,26 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
* the system call and the system call restart handling.
*/
clear_pt_regs_flag(regs, PIF_SYSCALL);
- ret = -1;
+ return -1;
+ }
+
+ /* Do the secure computing check after ptrace. */
+ if (secure_computing(NULL)) {
+ /* seccomp failures shouldn't expose any additional code. */
+ return -1;
}
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gprs[2]);
- audit_syscall_entry(regs->gprs[2], regs->orig_gpr2,
- regs->gprs[3], regs->gprs[4],
- regs->gprs[5]);
-out:
- return ret ?: regs->gprs[2];
+ if (is_compat_task())
+ mask = 0xffffffff;
+
+ audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
+ regs->gprs[3] &mask, regs->gprs[4] &mask,
+ regs->gprs[5] &mask);
+
+ return regs->gprs[2];
}
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f31939147ccd..ba5f456edaa9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -130,17 +130,14 @@ __setup("condev=", condev_setup);
static void __init set_preferred_console(void)
{
- if (MACHINE_IS_KVM) {
- if (sclp.has_vt220)
- add_preferred_console("ttyS", 1, NULL);
- else if (sclp.has_linemode)
- add_preferred_console("ttyS", 0, NULL);
- else
- add_preferred_console("hvc", 0, NULL);
- } else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
+ if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
add_preferred_console("ttyS", 0, NULL);
else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
+ else if (CONSOLE_IS_VT220)
+ add_preferred_console("ttyS", 1, NULL);
+ else if (CONSOLE_IS_HVC)
+ add_preferred_console("hvc", 0, NULL);
}
static int __init conmode_setup(char *str)
@@ -206,6 +203,15 @@ static void __init conmode_default(void)
SET_CONSOLE_SCLP;
#endif
}
+ } else if (MACHINE_IS_KVM) {
+ if (sclp.has_vt220 &&
+ config_enabled(CONFIG_SCLP_VT220_CONSOLE))
+ SET_CONSOLE_VT220;
+ else if (sclp.has_linemode &&
+ config_enabled(CONFIG_SCLP_CONSOLE))
+ SET_CONSOLE_SCLP;
+ else
+ SET_CONSOLE_HVC;
} else {
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
@@ -289,7 +295,7 @@ static int __init parse_vmalloc(char *arg)
}
early_param("vmalloc", parse_vmalloc);
-void *restart_stack __attribute__((__section__(".data")));
+void *restart_stack __section(.data);
static void __init setup_lowcore(void)
{
@@ -432,6 +438,20 @@ static void __init setup_resources(void)
}
}
}
+#ifdef CONFIG_CRASH_DUMP
+ /*
+ * Re-add removed crash kernel memory as reserved memory. This makes
+ * sure it will be mapped with the identity mapping and struct pages
+ * will be created, so it can be resized later on.
+ * However add it later since the crash kernel resource should not be
+ * part of the System RAM resource.
+ */
+ if (crashk_res.end) {
+ memblock_add(crashk_res.start, resource_size(&crashk_res));
+ memblock_reserve(crashk_res.start, resource_size(&crashk_res));
+ insert_resource(&iomem_resource, &crashk_res);
+ }
+#endif
}
static void __init setup_memory_end(void)
@@ -602,7 +622,6 @@ static void __init reserve_crashkernel(void)
diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
- insert_resource(&iomem_resource, &crashk_res);
memblock_remove(crash_base, crash_size);
pr_info("Reserving %lluMB of memory at %lluMB "
"for crashkernel (System RAM: %luMB)\n",
@@ -901,6 +920,7 @@ void __init setup_arch(char **cmdline_p)
setup_vmcoreinfo();
setup_lowcore();
smp_fill_possible_mask();
+ cpu_detect_mhz_feature();
cpu_init();
numa_setup();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7b89a7572100..35531fe1c5ea 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -242,10 +242,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
struct lowcore *lc = pcpu->lowcore;
- if (MACHINE_HAS_TLB_LC)
- cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
+ cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
- atomic_inc(&init_mm.context.attach_count);
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->percpu_offset = __per_cpu_offset[cpu];
@@ -320,17 +318,11 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
*/
static int pcpu_set_smt(unsigned int mtid)
{
- register unsigned long reg1 asm ("1") = (unsigned long) mtid;
int cc;
if (smp_cpu_mtid == mtid)
return 0;
- asm volatile(
- " sigp %1,0,%2 # sigp set multi-threading\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
- : "cc");
+ cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
if (cc == 0) {
smp_cpu_mtid = mtid;
smp_cpu_mt_shift = 0;
@@ -876,10 +868,8 @@ void __cpu_die(unsigned int cpu)
while (!pcpu_stopped(pcpu))
cpu_relax();
pcpu_free_lowcore(pcpu);
- atomic_dec(&init_mm.context.attach_count);
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
- if (MACHINE_HAS_TLB_LC)
- cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
+ cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
}
void __noreturn cpu_die(void)
@@ -897,7 +887,7 @@ void __init smp_fill_possible_mask(void)
sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
sclp_max = min(smp_max_threads, sclp_max);
- sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids;
+ sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
possible = setup_possible_cpus ?: nr_cpu_ids;
possible = min(possible, sclp_max);
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index f7dba3887a54..050b8d067d3b 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -16,21 +16,11 @@
#include <asm/sysinfo.h>
#include <asm/cpcmd.h>
#include <asm/topology.h>
-
-/* Sigh, math-emu. Don't ask. */
-#include <asm/sfp-util.h>
-#include <math-emu/soft-fp.h>
-#include <math-emu/single.h>
+#include <asm/fpu/api.h>
int topology_max_mnest;
-/*
- * stsi - store system information
- *
- * Returns the current configuration level if function code 0 was specified.
- * Otherwise returns 0 on success or a negative value on error.
- */
-int stsi(void *sysinfo, int fc, int sel1, int sel2)
+static inline int __stsi(void *sysinfo, int fc, int sel1, int sel2, int *lvl)
{
register int r0 asm("0") = (fc << 28) | sel1;
register int r1 asm("1") = sel2;
@@ -45,9 +35,24 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
: "+d" (r0), "+d" (rc)
: "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP)
: "cc", "memory");
+ *lvl = ((unsigned int) r0) >> 28;
+ return rc;
+}
+
+/*
+ * stsi - store system information
+ *
+ * Returns the current configuration level if function code 0 was specified.
+ * Otherwise returns 0 on success or a negative value on error.
+ */
+int stsi(void *sysinfo, int fc, int sel1, int sel2)
+{
+ int lvl, rc;
+
+ rc = __stsi(sysinfo, fc, sel1, sel2, &lvl);
if (rc)
return rc;
- return fc ? 0 : ((unsigned int) r0) >> 28;
+ return fc ? 0 : lvl;
}
EXPORT_SYMBOL(stsi);
@@ -414,10 +419,8 @@ subsys_initcall(create_proc_service_level);
void s390_adjust_jiffies(void)
{
struct sysinfo_1_2_2 *info;
- const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- unsigned int capability;
+ unsigned long capability;
+ struct kernel_fpu fpu;
info = (void *) get_zeroed_page(GFP_KERNEL);
if (!info)
@@ -433,15 +436,25 @@ void s390_adjust_jiffies(void)
* higher cpu capacity. Bogomips are the other way round.
* To get to a halfway suitable number we divide 1e7
* by the cpu capability number. Yes, that means a floating
- * point division .. math-emu here we come :-)
+ * point division ..
*/
- FP_UNPACK_SP(SA, &fmil);
- if ((info->capability >> 23) == 0)
- FP_FROM_INT_S(SB, (long) info->capability, 64, long);
- else
- FP_UNPACK_SP(SB, &info->capability);
- FP_DIV_S(SR, SA, SB);
- FP_TO_INT_S(capability, SR, 32, 0);
+ kernel_fpu_begin(&fpu, KERNEL_FPR);
+ asm volatile(
+ " sfpc %3\n"
+ " l %0,%1\n"
+ " tmlh %0,0xff80\n"
+ " jnz 0f\n"
+ " cefbr %%f2,%0\n"
+ " j 1f\n"
+ "0: le %%f2,%1\n"
+ "1: cefbr %%f0,%2\n"
+ " debr %%f0,%%f2\n"
+ " cgebr %0,5,%%f0\n"
+ : "=&d" (capability)
+ : "Q" (info->capability), "d" (10000000), "d" (0)
+ : "cc"
+ );
+ kernel_fpu_end(&fpu);
} else
/*
* Really old machine without stsi block for basic
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 9409d32f285e..4e9949800562 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -39,13 +39,14 @@
#include <linux/gfp.h>
#include <linux/kprobes.h>
#include <asm/uaccess.h>
+#include <asm/facility.h>
#include <asm/delay.h>
#include <asm/div64.h>
#include <asm/vdso.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/vtimer.h>
-#include <asm/etr.h>
+#include <asm/stp.h>
#include <asm/cio.h>
#include "entry.h"
@@ -61,6 +62,32 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
EXPORT_SYMBOL(s390_epoch_delta_notifier);
+unsigned char ptff_function_mask[16];
+unsigned long lpar_offset;
+unsigned long initial_leap_seconds;
+
+/*
+ * Get time offsets with PTFF
+ */
+void __init ptff_init(void)
+{
+ struct ptff_qto qto;
+ struct ptff_qui qui;
+
+ if (!test_facility(28))
+ return;
+ ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
+
+ /* get LPAR offset */
+ if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+ lpar_offset = qto.tod_epoch_difference;
+
+ /* get initial leap seconds */
+ if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
+ initial_leap_seconds = (unsigned long)
+ ((long) qui.old_leap * 4096000000L);
+}
+
/*
* Scheduler clock - returns current time in nanosec units.
*/
@@ -162,30 +189,32 @@ static void clock_comparator_interrupt(struct ext_code ext_code,
set_clock_comparator(S390_lowcore.clock_comparator);
}
-static void etr_timing_alert(struct etr_irq_parm *);
static void stp_timing_alert(struct stp_irq_parm *);
static void timing_alert_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
inc_irq_stat(IRQEXT_TLA);
- if (param32 & 0x00c40000)
- etr_timing_alert((struct etr_irq_parm *) &param32);
if (param32 & 0x00038000)
stp_timing_alert((struct stp_irq_parm *) &param32);
}
-static void etr_reset(void);
static void stp_reset(void);
void read_persistent_clock64(struct timespec64 *ts)
{
- tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts);
+ __u64 clock;
+
+ clock = get_tod_clock() - initial_leap_seconds;
+ tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
}
void read_boot_clock64(struct timespec64 *ts)
{
- tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts);
+ __u64 clock;
+
+ clock = sched_clock_base_cc - initial_leap_seconds;
+ tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
}
static cycle_t read_tod_clock(struct clocksource *cs)
@@ -269,7 +298,6 @@ void update_vsyscall_tz(void)
void __init time_init(void)
{
/* Reset time synchronization interfaces. */
- etr_reset();
stp_reset();
/* request the clock comparator external interrupt */
@@ -337,20 +365,20 @@ static unsigned long clock_sync_flags;
#define CLOCK_SYNC_STP 3
/*
- * The synchronous get_clock function. It will write the current clock
- * value to the clock pointer and return 0 if the clock is in sync with
- * the external time source. If the clock mode is local it will return
- * -EOPNOTSUPP and -EAGAIN if the clock is not in sync with the external
- * reference.
+ * The get_clock function for the physical clock. It will get the current
+ * TOD clock, subtract the LPAR offset and write the result to *clock.
+ * The function returns 0 if the clock is in sync with the external time
+ * source. If the clock mode is local it will return -EOPNOTSUPP and
+ * -EAGAIN if the clock is not in sync with the external reference.
*/
-int get_sync_clock(unsigned long long *clock)
+int get_phys_clock(unsigned long long *clock)
{
atomic_t *sw_ptr;
unsigned int sw0, sw1;
sw_ptr = &get_cpu_var(clock_sync_word);
sw0 = atomic_read(sw_ptr);
- *clock = get_tod_clock();
+ *clock = get_tod_clock() - lpar_offset;
sw1 = atomic_read(sw_ptr);
put_cpu_var(clock_sync_word);
if (sw0 == sw1 && (sw0 & 0x80000000U))
@@ -364,7 +392,7 @@ int get_sync_clock(unsigned long long *clock)
return -EACCES;
return -EAGAIN;
}
-EXPORT_SYMBOL(get_sync_clock);
+EXPORT_SYMBOL(get_phys_clock);
/*
* Make get_sync_clock return -EAGAIN.
@@ -416,301 +444,6 @@ static void __init time_init_wq(void)
time_sync_wq = create_singlethread_workqueue("timesync");
}
-/*
- * External Time Reference (ETR) code.
- */
-static int etr_port0_online;
-static int etr_port1_online;
-static int etr_steai_available;
-
-static int __init early_parse_etr(char *p)
-{
- if (strncmp(p, "off", 3) == 0)
- etr_port0_online = etr_port1_online = 0;
- else if (strncmp(p, "port0", 5) == 0)
- etr_port0_online = 1;
- else if (strncmp(p, "port1", 5) == 0)
- etr_port1_online = 1;
- else if (strncmp(p, "on", 2) == 0)
- etr_port0_online = etr_port1_online = 1;
- return 0;
-}
-early_param("etr", early_parse_etr);
-
-enum etr_event {
- ETR_EVENT_PORT0_CHANGE,
- ETR_EVENT_PORT1_CHANGE,
- ETR_EVENT_PORT_ALERT,
- ETR_EVENT_SYNC_CHECK,
- ETR_EVENT_SWITCH_LOCAL,
- ETR_EVENT_UPDATE,
-};
-
-/*
- * Valid bit combinations of the eacr register are (x = don't care):
- * e0 e1 dp p0 p1 ea es sl
- * 0 0 x 0 0 0 0 0 initial, disabled state
- * 0 0 x 0 1 1 0 0 port 1 online
- * 0 0 x 1 0 1 0 0 port 0 online
- * 0 0 x 1 1 1 0 0 both ports online
- * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode
- * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode
- * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync
- * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync
- * 0 1 x 1 1 1 0 0 both ports online, port 1 usable
- * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync
- * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync
- * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode
- * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode
- * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync
- * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync
- * 1 0 x 1 1 1 0 0 both ports online, port 0 usable
- * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync
- * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync
- * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync
- * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync
- */
-static struct etr_eacr etr_eacr;
-static u64 etr_tolec; /* time of last eacr update */
-static struct etr_aib etr_port0;
-static int etr_port0_uptodate;
-static struct etr_aib etr_port1;
-static int etr_port1_uptodate;
-static unsigned long etr_events;
-static struct timer_list etr_timer;
-
-static void etr_timeout(unsigned long dummy);
-static void etr_work_fn(struct work_struct *work);
-static DEFINE_MUTEX(etr_work_mutex);
-static DECLARE_WORK(etr_work, etr_work_fn);
-
-/*
- * Reset ETR attachment.
- */
-static void etr_reset(void)
-{
- etr_eacr = (struct etr_eacr) {
- .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
- .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
- .es = 0, .sl = 0 };
- if (etr_setr(&etr_eacr) == 0) {
- etr_tolec = get_tod_clock();
- set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
- if (etr_port0_online && etr_port1_online)
- set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
- } else if (etr_port0_online || etr_port1_online) {
- pr_warn("The real or virtual hardware system does not provide an ETR interface\n");
- etr_port0_online = etr_port1_online = 0;
- }
-}
-
-static int __init etr_init(void)
-{
- struct etr_aib aib;
-
- if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
- return 0;
- time_init_wq();
- /* Check if this machine has the steai instruction. */
- if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
- etr_steai_available = 1;
- setup_timer(&etr_timer, etr_timeout, 0UL);
- if (etr_port0_online) {
- set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
- queue_work(time_sync_wq, &etr_work);
- }
- if (etr_port1_online) {
- set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
- queue_work(time_sync_wq, &etr_work);
- }
- return 0;
-}
-
-arch_initcall(etr_init);
-
-/*
- * Two sorts of ETR machine checks. The architecture reads:
- * "When a machine-check niterruption occurs and if a switch-to-local or
- * ETR-sync-check interrupt request is pending but disabled, this pending
- * disabled interruption request is indicated and is cleared".
- * Which means that we can get etr_switch_to_local events from the machine
- * check handler although the interruption condition is disabled. Lovely..
- */
-
-/*
- * Switch to local machine check. This is called when the last usable
- * ETR port goes inactive. After switch to local the clock is not in sync.
- */
-int etr_switch_to_local(void)
-{
- if (!etr_eacr.sl)
- return 0;
- disable_sync_clock(NULL);
- if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
- etr_eacr.es = etr_eacr.sl = 0;
- etr_setr(&etr_eacr);
- return 1;
- }
- return 0;
-}
-
-/*
- * ETR sync check machine check. This is called when the ETR OTE and the
- * local clock OTE are farther apart than the ETR sync check tolerance.
- * After a ETR sync check the clock is not in sync. The machine check
- * is broadcasted to all cpus at the same time.
- */
-int etr_sync_check(void)
-{
- if (!etr_eacr.es)
- return 0;
- disable_sync_clock(NULL);
- if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
- etr_eacr.es = 0;
- etr_setr(&etr_eacr);
- return 1;
- }
- return 0;
-}
-
-void etr_queue_work(void)
-{
- queue_work(time_sync_wq, &etr_work);
-}
-
-/*
- * ETR timing alert. There are two causes:
- * 1) port state change, check the usability of the port
- * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
- * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
- * or ETR-data word 4 (edf4) has changed.
- */
-static void etr_timing_alert(struct etr_irq_parm *intparm)
-{
- if (intparm->pc0)
- /* ETR port 0 state change. */
- set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
- if (intparm->pc1)
- /* ETR port 1 state change. */
- set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
- if (intparm->eai)
- /*
- * ETR port alert on either port 0, 1 or both.
- * Both ports are not up-to-date now.
- */
- set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
- queue_work(time_sync_wq, &etr_work);
-}
-
-static void etr_timeout(unsigned long dummy)
-{
- set_bit(ETR_EVENT_UPDATE, &etr_events);
- queue_work(time_sync_wq, &etr_work);
-}
-
-/*
- * Check if the etr mode is pss.
- */
-static inline int etr_mode_is_pps(struct etr_eacr eacr)
-{
- return eacr.es && !eacr.sl;
-}
-
-/*
- * Check if the etr mode is etr.
- */
-static inline int etr_mode_is_etr(struct etr_eacr eacr)
-{
- return eacr.es && eacr.sl;
-}
-
-/*
- * Check if the port can be used for TOD synchronization.
- * For PPS mode the port has to receive OTEs. For ETR mode
- * the port has to receive OTEs, the ETR stepping bit has to
- * be zero and the validity bits for data frame 1, 2, and 3
- * have to be 1.
- */
-static int etr_port_valid(struct etr_aib *aib, int port)
-{
- unsigned int psc;
-
- /* Check that this port is receiving OTEs. */
- if (aib->tsp == 0)
- return 0;
-
- psc = port ? aib->esw.psc1 : aib->esw.psc0;
- if (psc == etr_lpsc_pps_mode)
- return 1;
- if (psc == etr_lpsc_operational_step)
- return !aib->esw.y && aib->slsw.v1 &&
- aib->slsw.v2 && aib->slsw.v3;
- return 0;
-}
-
-/*
- * Check if two ports are on the same network.
- */
-static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
-{
- // FIXME: any other fields we have to compare?
- return aib1->edf1.net_id == aib2->edf1.net_id;
-}
-
-/*
- * Wrapper for etr_stei that converts physical port states
- * to logical port states to be consistent with the output
- * of stetr (see etr_psc vs. etr_lpsc).
- */
-static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
-{
- BUG_ON(etr_steai(aib, func) != 0);
- /* Convert port state to logical port state. */
- if (aib->esw.psc0 == 1)
- aib->esw.psc0 = 2;
- else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
- aib->esw.psc0 = 1;
- if (aib->esw.psc1 == 1)
- aib->esw.psc1 = 2;
- else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
- aib->esw.psc1 = 1;
-}
-
-/*
- * Check if the aib a2 is still connected to the same attachment as
- * aib a1, the etv values differ by one and a2 is valid.
- */
-static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
-{
- int state_a1, state_a2;
-
- /* Paranoia check: e0/e1 should better be the same. */
- if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
- a1->esw.eacr.e1 != a2->esw.eacr.e1)
- return 0;
-
- /* Still connected to the same etr ? */
- state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
- state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
- if (state_a1 == etr_lpsc_operational_step) {
- if (state_a2 != etr_lpsc_operational_step ||
- a1->edf1.net_id != a2->edf1.net_id ||
- a1->edf1.etr_id != a2->edf1.etr_id ||
- a1->edf1.etr_pn != a2->edf1.etr_pn)
- return 0;
- } else if (state_a2 != etr_lpsc_pps_mode)
- return 0;
-
- /* The ETV value of a2 needs to be ETV of a1 + 1. */
- if (a1->edf2.etv + 1 != a2->edf2.etv)
- return 0;
-
- if (!etr_port_valid(a2, p))
- return 0;
-
- return 1;
-}
-
struct clock_sync_data {
atomic_t cpus;
int in_sync;
@@ -748,688 +481,6 @@ static void clock_sync_cpu(struct clock_sync_data *sync)
}
/*
- * Sync the TOD clock using the port referred to by aibp. This port
- * has to be enabled and the other port has to be disabled. The
- * last eacr update has to be more than 1.6 seconds in the past.
- */
-static int etr_sync_clock(void *data)
-{
- static int first;
- unsigned long long clock, old_clock, clock_delta, delay, delta;
- struct clock_sync_data *etr_sync;
- struct etr_aib *sync_port, *aib;
- int port;
- int rc;
-
- etr_sync = data;
-
- if (xchg(&first, 1) == 1) {
- /* Slave */
- clock_sync_cpu(etr_sync);
- return 0;
- }
-
- /* Wait until all other cpus entered the sync function. */
- while (atomic_read(&etr_sync->cpus) != 0)
- cpu_relax();
-
- port = etr_sync->etr_port;
- aib = etr_sync->etr_aib;
- sync_port = (port == 0) ? &etr_port0 : &etr_port1;
- enable_sync_clock();
-
- /* Set clock to next OTE. */
- __ctl_set_bit(14, 21);
- __ctl_set_bit(0, 29);
- clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
- old_clock = get_tod_clock();
- if (set_tod_clock(clock) == 0) {
- __udelay(1); /* Wait for the clock to start. */
- __ctl_clear_bit(0, 29);
- __ctl_clear_bit(14, 21);
- etr_stetr(aib);
- /* Adjust Linux timing variables. */
- delay = (unsigned long long)
- (aib->edf2.etv - sync_port->edf2.etv) << 32;
- delta = adjust_time(old_clock, clock, delay);
- clock_delta = clock - old_clock;
- atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0,
- &clock_delta);
- etr_sync->fixup_cc = delta;
- fixup_clock_comparator(delta);
- /* Verify that the clock is properly set. */
- if (!etr_aib_follows(sync_port, aib, port)) {
- /* Didn't work. */
- disable_sync_clock(NULL);
- etr_sync->in_sync = -EAGAIN;
- rc = -EAGAIN;
- } else {
- etr_sync->in_sync = 1;
- rc = 0;
- }
- } else {
- /* Could not set the clock ?!? */
- __ctl_clear_bit(0, 29);
- __ctl_clear_bit(14, 21);
- disable_sync_clock(NULL);
- etr_sync->in_sync = -EAGAIN;
- rc = -EAGAIN;
- }
- xchg(&first, 0);
- return rc;
-}
-
-static int etr_sync_clock_stop(struct etr_aib *aib, int port)
-{
- struct clock_sync_data etr_sync;
- struct etr_aib *sync_port;
- int follows;
- int rc;
-
- /* Check if the current aib is adjacent to the sync port aib. */
- sync_port = (port == 0) ? &etr_port0 : &etr_port1;
- follows = etr_aib_follows(sync_port, aib, port);
- memcpy(sync_port, aib, sizeof(*aib));
- if (!follows)
- return -EAGAIN;
- memset(&etr_sync, 0, sizeof(etr_sync));
- etr_sync.etr_aib = aib;
- etr_sync.etr_port = port;
- get_online_cpus();
- atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
- rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask);
- put_online_cpus();
- return rc;
-}
-
-/*
- * Handle the immediate effects of the different events.
- * The port change event is used for online/offline changes.
- */
-static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
-{
- if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
- eacr.es = 0;
- if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
- eacr.es = eacr.sl = 0;
- if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
- etr_port0_uptodate = etr_port1_uptodate = 0;
-
- if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
- if (eacr.e0)
- /*
- * Port change of an enabled port. We have to
- * assume that this can have caused an stepping
- * port switch.
- */
- etr_tolec = get_tod_clock();
- eacr.p0 = etr_port0_online;
- if (!eacr.p0)
- eacr.e0 = 0;
- etr_port0_uptodate = 0;
- }
- if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
- if (eacr.e1)
- /*
- * Port change of an enabled port. We have to
- * assume that this can have caused an stepping
- * port switch.
- */
- etr_tolec = get_tod_clock();
- eacr.p1 = etr_port1_online;
- if (!eacr.p1)
- eacr.e1 = 0;
- etr_port1_uptodate = 0;
- }
- clear_bit(ETR_EVENT_UPDATE, &etr_events);
- return eacr;
-}
-
-/*
- * Set up a timer that expires after the etr_tolec + 1.6 seconds if
- * one of the ports needs an update.
- */
-static void etr_set_tolec_timeout(unsigned long long now)
-{
- unsigned long micros;
-
- if ((!etr_eacr.p0 || etr_port0_uptodate) &&
- (!etr_eacr.p1 || etr_port1_uptodate))
- return;
- micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
- micros = (micros > 1600000) ? 0 : 1600000 - micros;
- mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
-}
-
-/*
- * Set up a time that expires after 1/2 second.
- */
-static void etr_set_sync_timeout(void)
-{
- mod_timer(&etr_timer, jiffies + HZ/2);
-}
-
-/*
- * Update the aib information for one or both ports.
- */
-static struct etr_eacr etr_handle_update(struct etr_aib *aib,
- struct etr_eacr eacr)
-{
- /* With both ports disabled the aib information is useless. */
- if (!eacr.e0 && !eacr.e1)
- return eacr;
-
- /* Update port0 or port1 with aib stored in etr_work_fn. */
- if (aib->esw.q == 0) {
- /* Information for port 0 stored. */
- if (eacr.p0 && !etr_port0_uptodate) {
- etr_port0 = *aib;
- if (etr_port0_online)
- etr_port0_uptodate = 1;
- }
- } else {
- /* Information for port 1 stored. */
- if (eacr.p1 && !etr_port1_uptodate) {
- etr_port1 = *aib;
- if (etr_port0_online)
- etr_port1_uptodate = 1;
- }
- }
-
- /*
- * Do not try to get the alternate port aib if the clock
- * is not in sync yet.
- */
- if (!eacr.es || !check_sync_clock())
- return eacr;
-
- /*
- * If steai is available we can get the information about
- * the other port immediately. If only stetr is available the
- * data-port bit toggle has to be used.
- */
- if (etr_steai_available) {
- if (eacr.p0 && !etr_port0_uptodate) {
- etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
- etr_port0_uptodate = 1;
- }
- if (eacr.p1 && !etr_port1_uptodate) {
- etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
- etr_port1_uptodate = 1;
- }
- } else {
- /*
- * One port was updated above, if the other
- * port is not uptodate toggle dp bit.
- */
- if ((eacr.p0 && !etr_port0_uptodate) ||
- (eacr.p1 && !etr_port1_uptodate))
- eacr.dp ^= 1;
- else
- eacr.dp = 0;
- }
- return eacr;
-}
-
-/*
- * Write new etr control register if it differs from the current one.
- * Return 1 if etr_tolec has been updated as well.
- */
-static void etr_update_eacr(struct etr_eacr eacr)
-{
- int dp_changed;
-
- if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
- /* No change, return. */
- return;
- /*
- * The disable of an active port of the change of the data port
- * bit can/will cause a change in the data port.
- */
- dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
- (etr_eacr.dp ^ eacr.dp) != 0;
- etr_eacr = eacr;
- etr_setr(&etr_eacr);
- if (dp_changed)
- etr_tolec = get_tod_clock();
-}
-
-/*
- * ETR work. In this function you'll find the main logic. In
- * particular this is the only function that calls etr_update_eacr(),
- * it "controls" the etr control register.
- */
-static void etr_work_fn(struct work_struct *work)
-{
- unsigned long long now;
- struct etr_eacr eacr;
- struct etr_aib aib;
- int sync_port;
-
- /* prevent multiple execution. */
- mutex_lock(&etr_work_mutex);
-
- /* Create working copy of etr_eacr. */
- eacr = etr_eacr;
-
- /* Check for the different events and their immediate effects. */
- eacr = etr_handle_events(eacr);
-
- /* Check if ETR is supposed to be active. */
- eacr.ea = eacr.p0 || eacr.p1;
- if (!eacr.ea) {
- /* Both ports offline. Reset everything. */
- eacr.dp = eacr.es = eacr.sl = 0;
- on_each_cpu(disable_sync_clock, NULL, 1);
- del_timer_sync(&etr_timer);
- etr_update_eacr(eacr);
- goto out_unlock;
- }
-
- /* Store aib to get the current ETR status word. */
- BUG_ON(etr_stetr(&aib) != 0);
- etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
- now = get_tod_clock();
-
- /*
- * Update the port information if the last stepping port change
- * or data port change is older than 1.6 seconds.
- */
- if (now >= etr_tolec + (1600000 << 12))
- eacr = etr_handle_update(&aib, eacr);
-
- /*
- * Select ports to enable. The preferred synchronization mode is PPS.
- * If a port can be enabled depends on a number of things:
- * 1) The port needs to be online and uptodate. A port is not
- * disabled just because it is not uptodate, but it is only
- * enabled if it is uptodate.
- * 2) The port needs to have the same mode (pps / etr).
- * 3) The port needs to be usable -> etr_port_valid() == 1
- * 4) To enable the second port the clock needs to be in sync.
- * 5) If both ports are useable and are ETR ports, the network id
- * has to be the same.
- * The eacr.sl bit is used to indicate etr mode vs. pps mode.
- */
- if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
- eacr.sl = 0;
- eacr.e0 = 1;
- if (!etr_mode_is_pps(etr_eacr))
- eacr.es = 0;
- if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
- eacr.e1 = 0;
- // FIXME: uptodate checks ?
- else if (etr_port0_uptodate && etr_port1_uptodate)
- eacr.e1 = 1;
- sync_port = (etr_port0_uptodate &&
- etr_port_valid(&etr_port0, 0)) ? 0 : -1;
- } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
- eacr.sl = 0;
- eacr.e0 = 0;
- eacr.e1 = 1;
- if (!etr_mode_is_pps(etr_eacr))
- eacr.es = 0;
- sync_port = (etr_port1_uptodate &&
- etr_port_valid(&etr_port1, 1)) ? 1 : -1;
- } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
- eacr.sl = 1;
- eacr.e0 = 1;
- if (!etr_mode_is_etr(etr_eacr))
- eacr.es = 0;
- if (!eacr.es || !eacr.p1 ||
- aib.esw.psc1 != etr_lpsc_operational_alt)
- eacr.e1 = 0;
- else if (etr_port0_uptodate && etr_port1_uptodate &&
- etr_compare_network(&etr_port0, &etr_port1))
- eacr.e1 = 1;
- sync_port = (etr_port0_uptodate &&
- etr_port_valid(&etr_port0, 0)) ? 0 : -1;
- } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
- eacr.sl = 1;
- eacr.e0 = 0;
- eacr.e1 = 1;
- if (!etr_mode_is_etr(etr_eacr))
- eacr.es = 0;
- sync_port = (etr_port1_uptodate &&
- etr_port_valid(&etr_port1, 1)) ? 1 : -1;
- } else {
- /* Both ports not usable. */
- eacr.es = eacr.sl = 0;
- sync_port = -1;
- }
-
- /*
- * If the clock is in sync just update the eacr and return.
- * If there is no valid sync port wait for a port update.
- */
- if ((eacr.es && check_sync_clock()) || sync_port < 0) {
- etr_update_eacr(eacr);
- etr_set_tolec_timeout(now);
- goto out_unlock;
- }
-
- /*
- * Prepare control register for clock syncing
- * (reset data port bit, set sync check control.
- */
- eacr.dp = 0;
- eacr.es = 1;
-
- /*
- * Update eacr and try to synchronize the clock. If the update
- * of eacr caused a stepping port switch (or if we have to
- * assume that a stepping port switch has occurred) or the
- * clock syncing failed, reset the sync check control bit
- * and set up a timer to try again after 0.5 seconds
- */
- etr_update_eacr(eacr);
- if (now < etr_tolec + (1600000 << 12) ||
- etr_sync_clock_stop(&aib, sync_port) != 0) {
- /* Sync failed. Try again in 1/2 second. */
- eacr.es = 0;
- etr_update_eacr(eacr);
- etr_set_sync_timeout();
- } else
- etr_set_tolec_timeout(now);
-out_unlock:
- mutex_unlock(&etr_work_mutex);
-}
-
-/*
- * Sysfs interface functions
- */
-static struct bus_type etr_subsys = {
- .name = "etr",
- .dev_name = "etr",
-};
-
-static struct device etr_port0_dev = {
- .id = 0,
- .bus = &etr_subsys,
-};
-
-static struct device etr_port1_dev = {
- .id = 1,
- .bus = &etr_subsys,
-};
-
-/*
- * ETR subsys attributes
- */
-static ssize_t etr_stepping_port_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", etr_port0.esw.p);
-}
-
-static DEVICE_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
-
-static ssize_t etr_stepping_mode_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- char *mode_str;
-
- if (etr_mode_is_pps(etr_eacr))
- mode_str = "pps";
- else if (etr_mode_is_etr(etr_eacr))
- mode_str = "etr";
- else
- mode_str = "local";
- return sprintf(buf, "%s\n", mode_str);
-}
-
-static DEVICE_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
-
-/*
- * ETR port attributes
- */
-static inline struct etr_aib *etr_aib_from_dev(struct device *dev)
-{
- if (dev == &etr_port0_dev)
- return etr_port0_online ? &etr_port0 : NULL;
- else
- return etr_port1_online ? &etr_port1 : NULL;
-}
-
-static ssize_t etr_online_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned int online;
-
- online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
- return sprintf(buf, "%i\n", online);
-}
-
-static ssize_t etr_online_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int value;
-
- value = simple_strtoul(buf, NULL, 0);
- if (value != 0 && value != 1)
- return -EINVAL;
- if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
- return -EOPNOTSUPP;
- mutex_lock(&clock_sync_mutex);
- if (dev == &etr_port0_dev) {
- if (etr_port0_online == value)
- goto out; /* Nothing to do. */
- etr_port0_online = value;
- if (etr_port0_online && etr_port1_online)
- set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
- else
- clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
- set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
- queue_work(time_sync_wq, &etr_work);
- } else {
- if (etr_port1_online == value)
- goto out; /* Nothing to do. */
- etr_port1_online = value;
- if (etr_port0_online && etr_port1_online)
- set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
- else
- clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
- set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
- queue_work(time_sync_wq, &etr_work);
- }
-out:
- mutex_unlock(&clock_sync_mutex);
- return count;
-}
-
-static DEVICE_ATTR(online, 0600, etr_online_show, etr_online_store);
-
-static ssize_t etr_stepping_control_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
- etr_eacr.e0 : etr_eacr.e1);
-}
-
-static DEVICE_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
-
-static ssize_t etr_mode_code_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if (!etr_port0_online && !etr_port1_online)
- /* Status word is not uptodate if both ports are offline. */
- return -ENODATA;
- return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
- etr_port0.esw.psc0 : etr_port0.esw.psc1);
-}
-
-static DEVICE_ATTR(state_code, 0400, etr_mode_code_show, NULL);
-
-static ssize_t etr_untuned_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct etr_aib *aib = etr_aib_from_dev(dev);
-
- if (!aib || !aib->slsw.v1)
- return -ENODATA;
- return sprintf(buf, "%i\n", aib->edf1.u);
-}
-
-static DEVICE_ATTR(untuned, 0400, etr_untuned_show, NULL);
-
-static ssize_t etr_network_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct etr_aib *aib = etr_aib_from_dev(dev);
-
- if (!aib || !aib->slsw.v1)
- return -ENODATA;
- return sprintf(buf, "%i\n", aib->edf1.net_id);
-}
-
-static DEVICE_ATTR(network, 0400, etr_network_id_show, NULL);
-
-static ssize_t etr_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct etr_aib *aib = etr_aib_from_dev(dev);
-
- if (!aib || !aib->slsw.v1)
- return -ENODATA;
- return sprintf(buf, "%i\n", aib->edf1.etr_id);
-}
-
-static DEVICE_ATTR(id, 0400, etr_id_show, NULL);
-
-static ssize_t etr_port_number_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct etr_aib *aib = etr_aib_from_dev(dev);
-
- if (!aib || !aib->slsw.v1)
- return -ENODATA;
- return sprintf(buf, "%i\n", aib->edf1.etr_pn);
-}
-
-static DEVICE_ATTR(port, 0400, etr_port_number_show, NULL);
-
-static ssize_t etr_coupled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct etr_aib *aib = etr_aib_from_dev(dev);
-
- if (!aib || !aib->slsw.v3)
- return -ENODATA;
- return sprintf(buf, "%i\n", aib->edf3.c);
-}
-
-static DEVICE_ATTR(coupled, 0400, etr_coupled_show, NULL);
-
-static ssize_t etr_local_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct etr_aib *aib = etr_aib_from_dev(dev);
-
- if (!aib || !aib->slsw.v3)
- return -ENODATA;
- return sprintf(buf, "%i\n", aib->edf3.blto);
-}
-
-static DEVICE_ATTR(local_time, 0400, etr_local_time_show, NULL);
-
-static ssize_t etr_utc_offset_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct etr_aib *aib = etr_aib_from_dev(dev);
-
- if (!aib || !aib->slsw.v3)
- return -ENODATA;
- return sprintf(buf, "%i\n", aib->edf3.buo);
-}
-
-static DEVICE_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
-
-static struct device_attribute *etr_port_attributes[] = {
- &dev_attr_online,
- &dev_attr_stepping_control,
- &dev_attr_state_code,
- &dev_attr_untuned,
- &dev_attr_network,
- &dev_attr_id,
- &dev_attr_port,
- &dev_attr_coupled,
- &dev_attr_local_time,
- &dev_attr_utc_offset,
- NULL
-};
-
-static int __init etr_register_port(struct device *dev)
-{
- struct device_attribute **attr;
- int rc;
-
- rc = device_register(dev);
- if (rc)
- goto out;
- for (attr = etr_port_attributes; *attr; attr++) {
- rc = device_create_file(dev, *attr);
- if (rc)
- goto out_unreg;
- }
- return 0;
-out_unreg:
- for (; attr >= etr_port_attributes; attr--)
- device_remove_file(dev, *attr);
- device_unregister(dev);
-out:
- return rc;
-}
-
-static void __init etr_unregister_port(struct device *dev)
-{
- struct device_attribute **attr;
-
- for (attr = etr_port_attributes; *attr; attr++)
- device_remove_file(dev, *attr);
- device_unregister(dev);
-}
-
-static int __init etr_init_sysfs(void)
-{
- int rc;
-
- rc = subsys_system_register(&etr_subsys, NULL);
- if (rc)
- goto out;
- rc = device_create_file(etr_subsys.dev_root, &dev_attr_stepping_port);
- if (rc)
- goto out_unreg_subsys;
- rc = device_create_file(etr_subsys.dev_root, &dev_attr_stepping_mode);
- if (rc)
- goto out_remove_stepping_port;
- rc = etr_register_port(&etr_port0_dev);
- if (rc)
- goto out_remove_stepping_mode;
- rc = etr_register_port(&etr_port1_dev);
- if (rc)
- goto out_remove_port0;
- return 0;
-
-out_remove_port0:
- etr_unregister_port(&etr_port0_dev);
-out_remove_stepping_mode:
- device_remove_file(etr_subsys.dev_root, &dev_attr_stepping_mode);
-out_remove_stepping_port:
- device_remove_file(etr_subsys.dev_root, &dev_attr_stepping_port);
-out_unreg_subsys:
- bus_unregister(&etr_subsys);
-out:
- return rc;
-}
-
-device_initcall(etr_init_sysfs);
-
-/*
* Server Time Protocol (STP) code.
*/
static bool stp_online;
@@ -1455,7 +506,7 @@ static void __init stp_reset(void)
int rc;
stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
- rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
+ rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
@@ -1533,6 +584,7 @@ static int stp_sync_clock(void *data)
static int first;
unsigned long long old_clock, delta, new_clock, clock_delta;
struct clock_sync_data *stp_sync;
+ struct ptff_qto qto;
int rc;
stp_sync = data;
@@ -1554,11 +606,14 @@ static int stp_sync_clock(void *data)
stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) {
old_clock = get_tod_clock();
- rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
+ rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
if (rc == 0) {
- new_clock = get_tod_clock();
+ new_clock = old_clock + clock_delta;
delta = adjust_time(old_clock, new_clock, 0);
- clock_delta = new_clock - old_clock;
+ if (ptff_query(PTFF_QTO) &&
+ ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+ /* Update LPAR offset */
+ lpar_offset = qto.tod_epoch_difference;
atomic_notifier_call_chain(&s390_epoch_delta_notifier,
0, &clock_delta);
fixup_clock_comparator(delta);
@@ -1590,12 +645,12 @@ static void stp_work_fn(struct work_struct *work)
mutex_lock(&stp_work_mutex);
if (!stp_online) {
- chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
+ chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
del_timer_sync(&stp_timer);
goto out_unlock;
}
- rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
+ rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0, NULL);
if (rc)
goto out_unlock;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 64298a867589..e959c02e0cac 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -46,6 +46,7 @@ static DECLARE_WORK(topology_work, topology_work_fn);
*/
static struct mask_info socket_info;
static struct mask_info book_info;
+static struct mask_info drawer_info;
DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
@@ -79,10 +80,10 @@ static cpumask_t cpu_thread_map(unsigned int cpu)
return mask;
}
-static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
- struct mask_info *book,
- struct mask_info *socket,
- int one_socket_per_cpu)
+static void add_cpus_to_mask(struct topology_core *tl_core,
+ struct mask_info *drawer,
+ struct mask_info *book,
+ struct mask_info *socket)
{
struct cpu_topology_s390 *topo;
unsigned int core;
@@ -97,21 +98,17 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
continue;
for (i = 0; i <= smp_cpu_mtid; i++) {
topo = &per_cpu(cpu_topology, lcpu + i);
+ topo->drawer_id = drawer->id;
topo->book_id = book->id;
+ topo->socket_id = socket->id;
topo->core_id = rcore;
topo->thread_id = lcpu + i;
+ cpumask_set_cpu(lcpu + i, &drawer->mask);
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
- if (one_socket_per_cpu)
- topo->socket_id = rcore;
- else
- topo->socket_id = socket->id;
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
- if (one_socket_per_cpu)
- socket = socket->next;
}
- return socket;
}
static void clear_masks(void)
@@ -128,6 +125,11 @@ static void clear_masks(void)
cpumask_clear(&info->mask);
info = info->next;
}
+ info = &drawer_info;
+ while (info) {
+ cpumask_clear(&info->mask);
+ info = info->next;
+ }
}
static union topology_entry *next_tle(union topology_entry *tle)
@@ -137,16 +139,22 @@ static union topology_entry *next_tle(union topology_entry *tle)
return (union topology_entry *)((struct topology_container *)tle + 1);
}
-static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
+static void tl_to_masks(struct sysinfo_15_1_x *info)
{
struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info;
+ struct mask_info *drawer = &drawer_info;
union topology_entry *tle, *end;
+ clear_masks();
tle = info->tle;
end = (union topology_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
+ case 3:
+ drawer = drawer->next;
+ drawer->id = tle->container.id;
+ break;
case 2:
book = book->next;
book->id = tle->container.id;
@@ -156,32 +164,7 @@ static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
socket->id = tle->container.id;
break;
case 0:
- add_cpus_to_mask(&tle->cpu, book, socket, 0);
- break;
- default:
- clear_masks();
- return;
- }
- tle = next_tle(tle);
- }
-}
-
-static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
-{
- struct mask_info *socket = &socket_info;
- struct mask_info *book = &book_info;
- union topology_entry *tle, *end;
-
- tle = info->tle;
- end = (union topology_entry *)((unsigned long)info + info->length);
- while (tle < end) {
- switch (tle->nl) {
- case 1:
- book = book->next;
- book->id = tle->container.id;
- break;
- case 0:
- socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
+ add_cpus_to_mask(&tle->cpu, drawer, book, socket);
break;
default:
clear_masks();
@@ -191,22 +174,6 @@ static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
}
}
-static void tl_to_masks(struct sysinfo_15_1_x *info)
-{
- struct cpuid cpu_id;
-
- get_cpu_id(&cpu_id);
- clear_masks();
- switch (cpu_id.machine) {
- case 0x2097:
- case 0x2098:
- __tl_to_masks_z10(info);
- break;
- default:
- __tl_to_masks_generic(info);
- }
-}
-
static void topology_update_polarization_simple(void)
{
int cpu;
@@ -257,11 +224,13 @@ static void update_cpu_masks(void)
topo->thread_mask = cpu_thread_map(cpu);
topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu);
+ topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) {
topo->thread_id = cpu;
topo->core_id = cpu;
topo->socket_id = cpu;
topo->book_id = cpu;
+ topo->drawer_id = cpu;
}
}
numa_update_cpu_topology();
@@ -269,10 +238,7 @@ static void update_cpu_masks(void)
void store_topology(struct sysinfo_15_1_x *info)
{
- if (topology_max_mnest >= 3)
- stsi(info, 15, 1, 3);
- else
- stsi(info, 15, 1, 2);
+ stsi(info, 15, 1, min(topology_max_mnest, 4));
}
int arch_update_cpu_topology(void)
@@ -442,6 +408,11 @@ static const struct cpumask *cpu_book_mask(int cpu)
return &per_cpu(cpu_topology, cpu).book_mask;
}
+static const struct cpumask *cpu_drawer_mask(int cpu)
+{
+ return &per_cpu(cpu_topology, cpu).drawer_mask;
+}
+
static int __init early_parse_topology(char *p)
{
return kstrtobool(p, &topology_enabled);
@@ -452,6 +423,7 @@ static struct sched_domain_topology_level s390_topology[] = {
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
+ { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
};
@@ -487,6 +459,7 @@ static int __init s390_topology_init(void)
printk(KERN_CONT " / %d\n", info->mnest);
alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
+ alloc_masks(info, &drawer_info, 3);
set_sched_topology(s390_topology);
return 0;
}
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index f9c459586649..68145456fee2 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -1,5 +1,7 @@
# List of files in the vdso, has to be asm only for now
+KCOV_INSTRUMENT := n
+
obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o
# Build rules
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 058659c1b8cf..0b0fd22c869a 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -1,5 +1,7 @@
# List of files in the vdso, has to be asm only for now
+KCOV_INSTRUMENT := n
+
obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o
# Build rules
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 0f41a8286378..429bfd111961 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -4,6 +4,16 @@
#include <asm/thread_info.h>
#include <asm/page.h>
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * make sure it has 16k alignment.
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
+/* Handle ro_after_init data on our own. */
+#define RO_AFTER_INIT_DATA
+
#include <asm-generic/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
@@ -49,7 +59,14 @@ SECTIONS
_eshared = .; /* End of shareable data */
_sdata = .; /* Start of data section */
- EXCEPTION_TABLE(16) :data
+ . = ALIGN(PAGE_SIZE);
+ __start_ro_after_init = .;
+ .data..ro_after_init : {
+ *(.data..ro_after_init)
+ }
+ EXCEPTION_TABLE(16)
+ . = ALIGN(PAGE_SIZE);
+ __end_ro_after_init = .;
RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
@@ -81,7 +98,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
- BSS_SECTION(0, 2, 0)
+ BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE)
_end = . ;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 2e6b54e4d3f9..252157181302 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -341,6 +341,8 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
static int handle_partial_execution(struct kvm_vcpu *vcpu)
{
+ vcpu->stat.exit_pei++;
+
if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
return handle_mvpg_pei(vcpu);
if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6d8ec3ac9dd8..6f5c344cd785 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,7 +28,7 @@
#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
-#include <asm/etr.h>
+#include <asm/stp.h>
#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
@@ -61,6 +61,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exit_external_request", VCPU_STAT(exit_external_request) },
{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
{ "exit_instruction", VCPU_STAT(exit_instruction) },
+ { "exit_pei", VCPU_STAT(exit_pei) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
@@ -657,7 +658,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
kvm->arch.model.cpuid = proc->cpuid;
lowest_ibc = sclp.ibc >> 16 & 0xfff;
unblocked_ibc = sclp.ibc & 0xfff;
- if (lowest_ibc) {
+ if (lowest_ibc && proc->ibc) {
if (proc->ibc > unblocked_ibc)
kvm->arch.model.ibc = unblocked_ibc;
else if (proc->ibc < lowest_ibc)
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index b647d5ff0ad9..e390bbb16443 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -236,6 +236,26 @@ char * strrchr(const char * s, int c)
}
EXPORT_SYMBOL(strrchr);
+static inline int clcle(const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2,
+ int *diff)
+{
+ register unsigned long r2 asm("2") = (unsigned long) s1;
+ register unsigned long r3 asm("3") = (unsigned long) l2;
+ register unsigned long r4 asm("4") = (unsigned long) s2;
+ register unsigned long r5 asm("5") = (unsigned long) l2;
+ int cc;
+
+ asm volatile ("0: clcle %1,%3,0\n"
+ " jo 0b\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=&d" (cc), "+a" (r2), "+a" (r3),
+ "+a" (r4), "+a" (r5) : : "cc");
+ *diff = *(char *)r2 - *(char *)r4;
+ return cc;
+}
+
/**
* strstr - Find the first substring in a %NUL terminated string
* @s1: The string to be searched
@@ -250,18 +270,9 @@ char * strstr(const char * s1,const char * s2)
return (char *) s1;
l1 = __strend(s1) - s1;
while (l1-- >= l2) {
- register unsigned long r2 asm("2") = (unsigned long) s1;
- register unsigned long r3 asm("3") = (unsigned long) l2;
- register unsigned long r4 asm("4") = (unsigned long) s2;
- register unsigned long r5 asm("5") = (unsigned long) l2;
- int cc;
-
- asm volatile ("0: clcle %1,%3,0\n"
- " jo 0b\n"
- " ipm %0\n"
- " srl %0,28"
- : "=&d" (cc), "+a" (r2), "+a" (r3),
- "+a" (r4), "+a" (r5) : : "cc" );
+ int cc, dummy;
+
+ cc = clcle(s1, l1, s2, l2, &dummy);
if (!cc)
return (char *) s1;
s1++;
@@ -302,20 +313,11 @@ EXPORT_SYMBOL(memchr);
*/
int memcmp(const void *cs, const void *ct, size_t n)
{
- register unsigned long r2 asm("2") = (unsigned long) cs;
- register unsigned long r3 asm("3") = (unsigned long) n;
- register unsigned long r4 asm("4") = (unsigned long) ct;
- register unsigned long r5 asm("5") = (unsigned long) n;
- int ret;
+ int ret, diff;
- asm volatile ("0: clcle %1,%3,0\n"
- " jo 0b\n"
- " ipm %0\n"
- " srl %0,28"
- : "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5)
- : : "cc" );
+ ret = clcle(cs, n, ct, n, &diff);
if (ret)
- ret = *(char *) r2 - *(char *) r4;
+ ret = diff;
return ret;
}
EXPORT_SYMBOL(memcmp);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index ae4de559e3a0..d96596128e9f 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -49,7 +49,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
" jnm 5b\n"
" ex %4,0(%3)\n"
" j 8f\n"
- "7:slgr %0,%0\n"
+ "7: slgr %0,%0\n"
"8:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -93,7 +93,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
" jnm 6b\n"
" ex %4,0(%3)\n"
" j 9f\n"
- "8:slgr %0,%0\n"
+ "8: slgr %0,%0\n"
"9: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
@@ -266,7 +266,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
" slgr %0,%3\n"
" j 5f\n"
- "4:slgr %0,%0\n"
+ "4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 8556d6be9b54..861880df12c7 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -157,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
if (pud_large(*pud)) {
- prot = pud_val(*pud) & _REGION3_ENTRY_RO;
+ prot = pud_val(*pud) & _REGION_ENTRY_PROTECT;
note_page(m, st, prot, 2);
} else
walk_pmd_level(m, st, pud, addr);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7a3144017301..25783dc3c813 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
report_user_fault(regs, SIGSEGV, 1);
si.si_signo = SIGSEGV;
+ si.si_errno = 0;
si.si_code = si_code;
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGSEGV, &si, current);
@@ -455,7 +456,7 @@ retry:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
/* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL;
@@ -623,7 +624,7 @@ void pfault_fini(void)
diag_stat_inc(DIAG_STAT_X258);
asm volatile(
" diag %0,0,0x258\n"
- "0:\n"
+ "0: nopr %%r7\n"
EX_TABLE(0b,0b)
: : "a" (&refbk), "m" (refbk) : "cc");
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index cace818d86eb..063c721ec0dc 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -85,7 +85,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
static void gmap_flush_tlb(struct gmap *gmap)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_asce(gmap->mm, gmap->asce);
+ __tlb_flush_idte(gmap->asce);
else
__tlb_flush_global();
}
@@ -124,7 +124,7 @@ void gmap_free(struct gmap *gmap)
/* Flush tlb. */
if (MACHINE_HAS_IDTE)
- __tlb_flush_asce(gmap->mm, gmap->asce);
+ __tlb_flush_idte(gmap->asce);
else
__tlb_flush_global();
@@ -430,6 +430,9 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
VM_BUG_ON(pgd_none(*pgd));
pud = pud_offset(pgd, vmaddr);
VM_BUG_ON(pud_none(*pud));
+ /* large puds cannot yet be handled */
+ if (pud_large(*pud))
+ return -EFAULT;
pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd));
/* large pmds cannot yet be handled */
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index a8a6765f1a51..adb0c34bf431 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -128,6 +128,44 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
return 1;
}
+static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ struct page *head, *page;
+ unsigned long mask;
+ int refs;
+
+ mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
+ if ((pud_val(pud) & mask) != 0)
+ return 0;
+ VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
+
+ refs = 0;
+ head = pud_page(pud);
+ page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON_PAGE(compound_head(page) != head, page);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+ }
+
+ if (unlikely(pud_val(pud) != pud_val(*pudp))) {
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ return 0;
+ }
+
+ return 1;
+}
+
static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
@@ -144,7 +182,12 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
- if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
+ if (unlikely(pud_large(pud))) {
+ if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
+ nr))
+ return 0;
+ } else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
+ nr))
return 0;
} while (pudp++, addr = next, addr != end);
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 1b5e8983f4f3..e19d853883be 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -1,19 +1,22 @@
/*
* IBM System z Huge TLB Page Support for Kernel.
*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007,2016
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
+#define KMSG_COMPONENT "hugetlb"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/mm.h>
#include <linux/hugetlb.h>
-static inline pmd_t __pte_to_pmd(pte_t pte)
+static inline unsigned long __pte_to_rste(pte_t pte)
{
- pmd_t pmd;
+ unsigned long rste;
/*
- * Convert encoding pte bits pmd bits
+ * Convert encoding pte bits pmd / pud bits
* lIR.uswrdy.p dy..R...I...wr
* empty 010.000000.0 -> 00..0...1...00
* prot-none, clean, old 111.000000.1 -> 00..1...1...00
@@ -33,25 +36,31 @@ static inline pmd_t __pte_to_pmd(pte_t pte)
* u unused, l large
*/
if (pte_present(pte)) {
- pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4;
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4;
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5;
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT);
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13;
+ rste = pte_val(pte) & PAGE_MASK;
+ rste |= (pte_val(pte) & _PAGE_READ) >> 4;
+ rste |= (pte_val(pte) & _PAGE_WRITE) >> 4;
+ rste |= (pte_val(pte) & _PAGE_INVALID) >> 5;
+ rste |= (pte_val(pte) & _PAGE_PROTECT);
+ rste |= (pte_val(pte) & _PAGE_DIRTY) << 10;
+ rste |= (pte_val(pte) & _PAGE_YOUNG) << 10;
+ rste |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13;
} else
- pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
- return pmd;
+ rste = _SEGMENT_ENTRY_INVALID;
+ return rste;
}
-static inline pte_t __pmd_to_pte(pmd_t pmd)
+static inline pte_t __rste_to_pte(unsigned long rste)
{
+ int present;
pte_t pte;
+ if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ present = pud_present(__pud(rste));
+ else
+ present = pmd_present(__pmd(rste));
+
/*
- * Convert encoding pmd bits pte bits
+ * Convert encoding pmd / pud bits pte bits
* dy..R...I...wr lIR.uswrdy.p
* empty 00..0...1...00 -> 010.000000.0
* prot-none, clean, old 00..1...1...00 -> 111.000000.1
@@ -70,16 +79,16 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
* SW-bits: p present, y young, d dirty, r read, w write, s special,
* u unused, l large
*/
- if (pmd_present(pmd)) {
- pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
+ if (present) {
+ pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
- pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4;
- pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
- pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
- pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
- pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10;
- pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10;
- pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13;
+ pte_val(pte) |= (rste & _SEGMENT_ENTRY_READ) << 4;
+ pte_val(pte) |= (rste & _SEGMENT_ENTRY_WRITE) << 4;
+ pte_val(pte) |= (rste & _SEGMENT_ENTRY_INVALID) << 5;
+ pte_val(pte) |= (rste & _SEGMENT_ENTRY_PROTECT);
+ pte_val(pte) |= (rste & _SEGMENT_ENTRY_DIRTY) >> 10;
+ pte_val(pte) |= (rste & _SEGMENT_ENTRY_YOUNG) >> 10;
+ pte_val(pte) |= (rste & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13;
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
@@ -88,27 +97,33 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- pmd_t pmd = __pte_to_pmd(pte);
-
- pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
- *(pmd_t *) ptep = pmd;
+ unsigned long rste = __pte_to_rste(pte);
+
+ /* Set correct table type for 2G hugepages */
+ if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
+ else
+ rste |= _SEGMENT_ENTRY_LARGE;
+ pte_val(*ptep) = rste;
}
pte_t huge_ptep_get(pte_t *ptep)
{
- pmd_t pmd = *(pmd_t *) ptep;
-
- return __pmd_to_pte(pmd);
+ return __rste_to_pte(pte_val(*ptep));
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
+ pte_t pte = huge_ptep_get(ptep);
pmd_t *pmdp = (pmd_t *) ptep;
- pmd_t old;
+ pud_t *pudp = (pud_t *) ptep;
- old = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
- return __pmd_to_pte(old);
+ if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
+ else
+ pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ return pte;
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
@@ -120,8 +135,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pgdp = pgd_offset(mm, addr);
pudp = pud_alloc(mm, pgdp, addr);
- if (pudp)
- pmdp = pmd_alloc(mm, pudp, addr);
+ if (pudp) {
+ if (sz == PUD_SIZE)
+ return (pte_t *) pudp;
+ else if (sz == PMD_SIZE)
+ pmdp = pmd_alloc(mm, pudp, addr);
+ }
return (pte_t *) pmdp;
}
@@ -134,8 +153,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
pgdp = pgd_offset(mm, addr);
if (pgd_present(*pgdp)) {
pudp = pud_offset(pgdp, addr);
- if (pud_present(*pudp))
+ if (pud_present(*pudp)) {
+ if (pud_large(*pudp))
+ return (pte_t *) pudp;
pmdp = pmd_offset(pudp, addr);
+ }
}
return (pte_t *) pmdp;
}
@@ -147,5 +169,34 @@ int pmd_huge(pmd_t pmd)
int pud_huge(pud_t pud)
{
- return 0;
+ return pud_large(pud);
+}
+
+struct page *
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int flags)
+{
+ if (flags & FOLL_GET)
+ return NULL;
+
+ return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
+}
+
+static __init int setup_hugepagesz(char *opt)
+{
+ unsigned long size;
+ char *string = opt;
+
+ size = memparse(opt, &opt);
+ if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) {
+ hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
+ } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
+ hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ } else {
+ pr_err("hugepagesz= specifies an unsupported page size %s\n",
+ string);
+ return 0;
+ }
+ return 1;
}
+__setup("hugepagesz=", setup_hugepagesz);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 2489b2e917c8..f56a39bd8ba6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -40,7 +40,7 @@
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
@@ -111,17 +111,16 @@ void __init paging_init(void)
void mark_rodata_ro(void)
{
- /* Text and rodata are already protected. Nothing to do here. */
- pr_info("Write protecting the kernel read-only data: %luk\n",
- ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
+ unsigned long size = __end_ro_after_init - __start_ro_after_init;
+
+ set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
+ pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
}
void __init mem_init(void)
{
- if (MACHINE_HAS_TLB_LC)
- cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
+ cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(0, mm_cpumask(&init_mm));
- atomic_set(&init_mm.context.attach_count, 1);
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..3330ea124eec 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -34,20 +34,25 @@ static int __init cmma(char *str)
}
__setup("cmma=", cmma);
-void __init cmma_init(void)
+static inline int cmma_test_essa(void)
{
register unsigned long tmp asm("0") = 0;
register int rc asm("1") = -EOPNOTSUPP;
- if (!cmma_flag)
- return;
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+&d" (rc), "+&d" (tmp));
- if (rc)
+ return rc;
+}
+
+void __init cmma_init(void)
+{
+ if (!cmma_flag)
+ return;
+ if (cmma_test_essa())
cmma_flag = 0;
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index f2a5c29a97e9..7104ffb5a67f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -10,7 +10,6 @@
#include <asm/pgtable.h>
#include <asm/page.h>
-#if PAGE_DEFAULT_KEY
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
@@ -22,6 +21,8 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, size;
+ if (!PAGE_DEFAULT_KEY)
+ return;
while (start < end) {
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
@@ -38,56 +39,254 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
start += PAGE_SIZE;
}
}
-#endif
-static pte_t *walk_page_table(unsigned long addr)
+#ifdef CONFIG_PROC_FS
+atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+
+void arch_report_meminfo(struct seq_file *m)
{
- pgd_t *pgdp;
- pud_t *pudp;
+ seq_printf(m, "DirectMap4k: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
+ seq_printf(m, "DirectMap1M: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
+ seq_printf(m, "DirectMap2G: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
+}
+#endif /* CONFIG_PROC_FS */
+
+static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
+ unsigned long dtt)
+{
+ unsigned long table, mask;
+
+ mask = 0;
+ if (MACHINE_HAS_EDAT2) {
+ switch (dtt) {
+ case CRDTE_DTT_REGION3:
+ mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
+ break;
+ case CRDTE_DTT_SEGMENT:
+ mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+ break;
+ case CRDTE_DTT_PAGE:
+ mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
+ break;
+ }
+ table = (unsigned long)old & mask;
+ crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
+ } else if (MACHINE_HAS_IDTE) {
+ cspg(old, *old, new);
+ } else {
+ csp((unsigned int *)old + 1, *old, new);
+ }
+}
+
+struct cpa {
+ unsigned int set_ro : 1;
+ unsigned int clear_ro : 1;
+};
+
+static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ struct cpa cpa)
+{
+ pte_t *ptep, new;
+
+ ptep = pte_offset(pmdp, addr);
+ do {
+ if (pte_none(*ptep))
+ return -EINVAL;
+ if (cpa.set_ro)
+ new = pte_wrprotect(*ptep);
+ else if (cpa.clear_ro)
+ new = pte_mkwrite(pte_mkdirty(*ptep));
+ pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
+ ptep++;
+ addr += PAGE_SIZE;
+ cond_resched();
+ } while (addr < end);
+ return 0;
+}
+
+static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
+{
+ unsigned long pte_addr, prot;
+ pte_t *pt_dir, *ptep;
+ pmd_t new;
+ int i, ro;
+
+ pt_dir = vmem_pte_alloc();
+ if (!pt_dir)
+ return -ENOMEM;
+ pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
+ ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
+ prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
+ ptep = pt_dir;
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ pte_val(*ptep) = pte_addr | prot;
+ pte_addr += PAGE_SIZE;
+ ptep++;
+ }
+ pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
+ pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
+ update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
+ update_page_count(PG_DIRECT_MAP_1M, -1);
+ return 0;
+}
+
+static void modify_pmd_page(pmd_t *pmdp, unsigned long addr, struct cpa cpa)
+{
+ pmd_t new;
+
+ if (cpa.set_ro)
+ new = pmd_wrprotect(*pmdp);
+ else if (cpa.clear_ro)
+ new = pmd_mkwrite(pmd_mkdirty(*pmdp));
+ pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
+}
+
+static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
+ struct cpa cpa)
+{
+ unsigned long next;
pmd_t *pmdp;
- pte_t *ptep;
+ int rc = 0;
- pgdp = pgd_offset_k(addr);
- if (pgd_none(*pgdp))
- return NULL;
- pudp = pud_offset(pgdp, addr);
- if (pud_none(*pudp) || pud_large(*pudp))
- return NULL;
pmdp = pmd_offset(pudp, addr);
- if (pmd_none(*pmdp) || pmd_large(*pmdp))
- return NULL;
- ptep = pte_offset_kernel(pmdp, addr);
- if (pte_none(*ptep))
- return NULL;
- return ptep;
+ do {
+ if (pmd_none(*pmdp))
+ return -EINVAL;
+ next = pmd_addr_end(addr, end);
+ if (pmd_large(*pmdp)) {
+ if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
+ rc = split_pmd_page(pmdp, addr);
+ if (rc)
+ return rc;
+ continue;
+ }
+ modify_pmd_page(pmdp, addr, cpa);
+ } else {
+ rc = walk_pte_level(pmdp, addr, next, cpa);
+ if (rc)
+ return rc;
+ }
+ pmdp++;
+ addr = next;
+ cond_resched();
+ } while (addr < end);
+ return rc;
}
-static void change_page_attr(unsigned long addr, int numpages,
- pte_t (*set) (pte_t))
+static int split_pud_page(pud_t *pudp, unsigned long addr)
{
- pte_t *ptep;
- int i;
+ unsigned long pmd_addr, prot;
+ pmd_t *pm_dir, *pmdp;
+ pud_t new;
+ int i, ro;
- for (i = 0; i < numpages; i++) {
- ptep = walk_page_table(addr);
- if (WARN_ON_ONCE(!ptep))
- break;
- *ptep = set(*ptep);
- addr += PAGE_SIZE;
+ pm_dir = vmem_pmd_alloc();
+ if (!pm_dir)
+ return -ENOMEM;
+ pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
+ ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
+ prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
+ pmdp = pm_dir;
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd_val(*pmdp) = pmd_addr | prot;
+ pmd_addr += PMD_SIZE;
+ pmdp++;
}
- __tlb_flush_kernel();
+ pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
+ pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
+ update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
+ update_page_count(PG_DIRECT_MAP_2G, -1);
+ return 0;
+}
+
+static void modify_pud_page(pud_t *pudp, unsigned long addr, struct cpa cpa)
+{
+ pud_t new;
+
+ if (cpa.set_ro)
+ new = pud_wrprotect(*pudp);
+ else if (cpa.clear_ro)
+ new = pud_mkwrite(pud_mkdirty(*pudp));
+ pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
+}
+
+static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
+ struct cpa cpa)
+{
+ unsigned long next;
+ pud_t *pudp;
+ int rc = 0;
+
+ pudp = pud_offset(pgd, addr);
+ do {
+ if (pud_none(*pudp))
+ return -EINVAL;
+ next = pud_addr_end(addr, end);
+ if (pud_large(*pudp)) {
+ if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
+ rc = split_pud_page(pudp, addr);
+ if (rc)
+ break;
+ continue;
+ }
+ modify_pud_page(pudp, addr, cpa);
+ } else {
+ rc = walk_pmd_level(pudp, addr, next, cpa);
+ }
+ pudp++;
+ addr = next;
+ cond_resched();
+ } while (addr < end && !rc);
+ return rc;
+}
+
+static DEFINE_MUTEX(cpa_mutex);
+
+static int change_page_attr(unsigned long addr, unsigned long end,
+ struct cpa cpa)
+{
+ unsigned long next;
+ int rc = -EINVAL;
+ pgd_t *pgdp;
+
+ if (end >= MODULES_END)
+ return -EINVAL;
+ mutex_lock(&cpa_mutex);
+ pgdp = pgd_offset_k(addr);
+ do {
+ if (pgd_none(*pgdp))
+ break;
+ next = pgd_addr_end(addr, end);
+ rc = walk_pud_level(pgdp, addr, next, cpa);
+ if (rc)
+ break;
+ cond_resched();
+ } while (pgdp++, addr = next, addr < end && !rc);
+ mutex_unlock(&cpa_mutex);
+ return rc;
}
int set_memory_ro(unsigned long addr, int numpages)
{
- change_page_attr(addr, numpages, pte_wrprotect);
- return 0;
+ struct cpa cpa = {
+ .set_ro = 1,
+ };
+
+ addr &= PAGE_MASK;
+ return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
}
int set_memory_rw(unsigned long addr, int numpages)
{
- change_page_attr(addr, numpages, pte_mkwrite);
- return 0;
+ struct cpa cpa = {
+ .clear_ro = 1,
+ };
+
+ addr &= PAGE_MASK;
+ return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
}
/* not possible */
@@ -138,7 +337,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
nr = min(numpages - i, nr);
if (enable) {
for (j = 0; j < nr; j++) {
- pte_val(*pte) = __pa(address);
+ pte_val(*pte) = address | pgprot_val(PAGE_KERNEL);
address += PAGE_SIZE;
pte++;
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index e8b5962ac12a..e2565d2d0c32 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
return table;
}
/* Allocate a fresh page */
- page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ page = alloc_page(GFP_KERNEL);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4324b87f9398..b98d1a152d46 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -27,40 +27,37 @@
static inline pte_t ptep_flush_direct(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- int active, count;
pte_t old;
old = *ptep;
if (unlikely(pte_val(old) & _PAGE_INVALID))
return old;
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ atomic_inc(&mm->context.flush_count);
+ if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__ptep_ipte_local(addr, ptep);
else
__ptep_ipte(addr, ptep);
- atomic_sub(0x10000, &mm->context.attach_count);
+ atomic_dec(&mm->context.flush_count);
return old;
}
static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- int active, count;
pte_t old;
old = *ptep;
if (unlikely(pte_val(old) & _PAGE_INVALID))
return old;
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if ((count & 0xffff) <= active) {
+ atomic_inc(&mm->context.flush_count);
+ if (cpumask_equal(&mm->context.cpu_attach_mask,
+ cpumask_of(smp_processor_id()))) {
pte_val(*ptep) |= _PAGE_INVALID;
mm->context.flush_mm = 1;
} else
__ptep_ipte(addr, ptep);
- atomic_sub(0x10000, &mm->context.attach_count);
+ atomic_dec(&mm->context.flush_count);
return old;
}
@@ -70,7 +67,6 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
#ifdef CONFIG_PGSTE
unsigned long old;
- preempt_disable();
asm(
" lg %0,%2\n"
"0: lgr %1,%0\n"
@@ -93,7 +89,6 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
: "=Q" (ptep[PTRS_PER_PTE])
: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
: "cc", "memory");
- preempt_enable();
#endif
}
@@ -230,9 +225,11 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
pgste_t pgste;
pte_t old;
+ preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_direct(mm, addr, ptep);
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ preempt_enable();
return old;
}
EXPORT_SYMBOL(ptep_xchg_direct);
@@ -243,9 +240,11 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
pgste_t pgste;
pte_t old;
+ preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_lazy(mm, addr, ptep);
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ preempt_enable();
return old;
}
EXPORT_SYMBOL(ptep_xchg_lazy);
@@ -256,6 +255,7 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
pgste_t pgste;
pte_t old;
+ preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_lazy(mm, addr, ptep);
if (mm_has_pgste(mm)) {
@@ -279,13 +279,13 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
} else {
*ptep = pte;
}
+ preempt_enable();
}
EXPORT_SYMBOL(ptep_modify_prot_commit);
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- int active, count;
pmd_t old;
old = *pmdp;
@@ -295,36 +295,34 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
__pmdp_csp(pmdp);
return old;
}
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ atomic_inc(&mm->context.flush_count);
+ if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__pmdp_idte_local(addr, pmdp);
else
__pmdp_idte(addr, pmdp);
- atomic_sub(0x10000, &mm->context.attach_count);
+ atomic_dec(&mm->context.flush_count);
return old;
}
static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- int active, count;
pmd_t old;
old = *pmdp;
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
return old;
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if ((count & 0xffff) <= active) {
+ atomic_inc(&mm->context.flush_count);
+ if (cpumask_equal(&mm->context.cpu_attach_mask,
+ cpumask_of(smp_processor_id()))) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
} else if (MACHINE_HAS_IDTE)
__pmdp_idte(addr, pmdp);
else
__pmdp_csp(pmdp);
- atomic_sub(0x10000, &mm->context.attach_count);
+ atomic_dec(&mm->context.flush_count);
return old;
}
@@ -333,8 +331,10 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
{
pmd_t old;
+ preempt_disable();
old = pmdp_flush_direct(mm, addr, pmdp);
*pmdp = new;
+ preempt_enable();
return old;
}
EXPORT_SYMBOL(pmdp_xchg_direct);
@@ -344,12 +344,53 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
{
pmd_t old;
+ preempt_disable();
old = pmdp_flush_lazy(mm, addr, pmdp);
*pmdp = new;
+ preempt_enable();
return old;
}
EXPORT_SYMBOL(pmdp_xchg_lazy);
+static inline pud_t pudp_flush_direct(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp)
+{
+ pud_t old;
+
+ old = *pudp;
+ if (pud_val(old) & _REGION_ENTRY_INVALID)
+ return old;
+ if (!MACHINE_HAS_IDTE) {
+ /*
+ * Invalid bit position is the same for pmd and pud, so we can
+ * re-use _pmd_csp() here
+ */
+ __pmdp_csp((pmd_t *) pudp);
+ return old;
+ }
+ atomic_inc(&mm->context.flush_count);
+ if (MACHINE_HAS_TLB_LC &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+ __pudp_idte_local(addr, pudp);
+ else
+ __pudp_idte(addr, pudp);
+ atomic_dec(&mm->context.flush_count);
+ return old;
+}
+
+pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t new)
+{
+ pud_t old;
+
+ preempt_disable();
+ old = pudp_flush_direct(mm, addr, pudp);
+ *pudp = new;
+ preempt_enable();
+ return old;
+}
+EXPORT_SYMBOL(pudp_xchg_direct);
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
@@ -398,20 +439,24 @@ void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
pgste_t pgste;
/* the mm_has_pgste() check is done in set_pte_at() */
+ preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
pgste_set_key(ptep, pgste, entry, mm);
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
+ preempt_enable();
}
void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pgste_t pgste;
+ preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) |= PGSTE_IN_BIT;
pgste_set_unlock(ptep, pgste);
+ preempt_enable();
}
static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
@@ -434,10 +479,11 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pte_t pte;
/* Zap unused and logically-zero pages */
+ preempt_disable();
pgste = pgste_get_lock(ptep);
pgstev = pgste_val(pgste);
pte = *ptep;
- if (pte_swap(pte) &&
+ if (!reset && pte_swap(pte) &&
((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
(pgstev & _PGSTE_GPS_ZERO))) {
ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
@@ -446,6 +492,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
if (reset)
pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
pgste_set_unlock(ptep, pgste);
+ preempt_enable();
}
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
@@ -454,6 +501,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
pgste_t pgste;
/* Clear storage key */
+ preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
PGSTE_GR_BIT | PGSTE_GC_BIT);
@@ -461,6 +509,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
pgste_set_unlock(ptep, pgste);
+ preempt_enable();
}
/*
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index d48cf25cfe99..1848292766ef 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -11,6 +11,7 @@
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/memblock.h>
+#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
@@ -29,9 +30,11 @@ static LIST_HEAD(mem_segs);
static void __ref *vmem_alloc_pages(unsigned int order)
{
+ unsigned long size = PAGE_SIZE << order;
+
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
- return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
+ return alloc_bootmem_align(size, size);
}
static inline pud_t *vmem_pud_alloc(void)
@@ -45,7 +48,7 @@ static inline pud_t *vmem_pud_alloc(void)
return pud;
}
-static inline pmd_t *vmem_pmd_alloc(void)
+pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd = NULL;
@@ -56,7 +59,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
-static pte_t __ref *vmem_pte_alloc(void)
+pte_t __ref *vmem_pte_alloc(void)
{
pte_t *pte;
@@ -75,8 +78,9 @@ static pte_t __ref *vmem_pte_alloc(void)
/*
* Add a physical memory range to the 1:1 mapping.
*/
-static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
+static int vmem_add_mem(unsigned long start, unsigned long size)
{
+ unsigned long pages4k, pages1m, pages2g;
unsigned long end = start + size;
unsigned long address = start;
pgd_t *pg_dir;
@@ -85,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pte_t *pt_dir;
int ret = -ENOMEM;
+ pages4k = pages1m = pages2g = 0;
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
@@ -97,10 +102,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
!debug_pagealloc_enabled()) {
- pud_val(*pu_dir) = __pa(address) |
- _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
- (ro ? _REGION_ENTRY_PROTECT : 0);
+ pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
address += PUD_SIZE;
+ pages2g++;
continue;
}
if (pud_none(*pu_dir)) {
@@ -113,11 +117,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
!debug_pagealloc_enabled()) {
- pmd_val(*pm_dir) = __pa(address) |
- _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
- _SEGMENT_ENTRY_YOUNG |
- (ro ? _SEGMENT_ENTRY_PROTECT : 0);
+ pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
address += PMD_SIZE;
+ pages1m++;
continue;
}
if (pmd_none(*pm_dir)) {
@@ -128,12 +130,15 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
}
pt_dir = pte_offset_kernel(pm_dir, address);
- pte_val(*pt_dir) = __pa(address) |
- pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
+ pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
address += PAGE_SIZE;
+ pages4k++;
}
ret = 0;
out:
+ update_page_count(PG_DIRECT_MAP_4K, pages4k);
+ update_page_count(PG_DIRECT_MAP_1M, pages1m);
+ update_page_count(PG_DIRECT_MAP_2G, pages2g);
return ret;
}
@@ -143,15 +148,15 @@ out:
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
+ unsigned long pages4k, pages1m, pages2g;
unsigned long end = start + size;
unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
- pte_t pte;
- pte_val(pte) = _PAGE_INVALID;
+ pages4k = pages1m = pages2g = 0;
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
@@ -166,6 +171,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
if (pud_large(*pu_dir)) {
pud_clear(pu_dir);
address += PUD_SIZE;
+ pages2g++;
continue;
}
pm_dir = pmd_offset(pu_dir, address);
@@ -176,13 +182,18 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
if (pmd_large(*pm_dir)) {
pmd_clear(pm_dir);
address += PMD_SIZE;
+ pages1m++;
continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
- *pt_dir = pte;
+ pte_clear(&init_mm, address, pt_dir);
address += PAGE_SIZE;
+ pages4k++;
}
flush_tlb_kernel_range(start, end);
+ update_page_count(PG_DIRECT_MAP_4K, -pages4k);
+ update_page_count(PG_DIRECT_MAP_1M, -pages1m);
+ update_page_count(PG_DIRECT_MAP_2G, -pages2g);
}
/*
@@ -341,7 +352,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
if (ret)
goto out_free;
- ret = vmem_add_mem(start, size, 0);
+ ret = vmem_add_mem(start, size);
if (ret)
goto out_remove;
goto out;
@@ -362,31 +373,13 @@ out:
*/
void __init vmem_map_init(void)
{
- unsigned long ro_start, ro_end;
+ unsigned long size = _eshared - _stext;
struct memblock_region *reg;
- phys_addr_t start, end;
- ro_start = PFN_ALIGN((unsigned long)&_stext);
- ro_end = (unsigned long)&_eshared & PAGE_MASK;
- for_each_memblock(memory, reg) {
- start = reg->base;
- end = reg->base + reg->size;
- if (start >= ro_end || end <= ro_start)
- vmem_add_mem(start, end - start, 0);
- else if (start >= ro_start && end <= ro_end)
- vmem_add_mem(start, end - start, 1);
- else if (start >= ro_start) {
- vmem_add_mem(start, ro_end - start, 1);
- vmem_add_mem(ro_end, end - ro_end, 0);
- } else if (end < ro_end) {
- vmem_add_mem(start, ro_start - start, 0);
- vmem_add_mem(ro_start, end - ro_start, 1);
- } else {
- vmem_add_mem(start, ro_start - start, 0);
- vmem_add_mem(ro_start, ro_end - ro_start, 1);
- vmem_add_mem(ro_end, end - ro_end, 0);
- }
- }
+ for_each_memblock(memory, reg)
+ vmem_add_mem(reg->base, reg->size);
+ set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
+ pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
}
/*
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f010c93a88b1..fda605dbc1b4 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* | | |
* +---------------+ |
* | 8 byte skbp | |
- * R15+170 -> +---------------+ |
+ * R15+176 -> +---------------+ |
* | 8 byte hlen | |
* R15+168 -> +---------------+ |
* | 4 byte align | |
@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
-#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */
+#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9133b0ec000b..bee281f3163d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@ struct bpf_jit {
int labels[1]; /* Labels for local jumps */
};
-#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */
+#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
#define SEEN_SKB 1 /* skb access */
#define SEEN_MEM 2 /* use mem[] for temporary storage */
@@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
emit_load_skb_data_hlen(jit);
if (jit->seen & SEEN_SKB_CHANGE)
/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
+ EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
STK_OFF_SKBP);
}
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 828d0695d0d4..fbc394e16b2c 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -34,7 +34,8 @@
#define DIST_CORE 1
#define DIST_MC 2
#define DIST_BOOK 3
-#define DIST_MAX 4
+#define DIST_DRAWER 4
+#define DIST_MAX 5
/* Node distance reported to common code */
#define EMU_NODE_DIST 10
@@ -43,7 +44,7 @@
#define NODE_ID_FREE -1
/* Different levels of toptree */
-enum toptree_level {CORE, MC, BOOK, NODE, TOPOLOGY};
+enum toptree_level {CORE, MC, BOOK, DRAWER, NODE, TOPOLOGY};
/* The two toptree IDs */
enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA};
@@ -114,6 +115,14 @@ static int cores_free(struct toptree *tree)
*/
static struct toptree *core_node(struct toptree *core)
{
+ return core->parent->parent->parent->parent;
+}
+
+/*
+ * Return drawer of core
+ */
+static struct toptree *core_drawer(struct toptree *core)
+{
return core->parent->parent->parent;
}
@@ -138,6 +147,8 @@ static struct toptree *core_mc(struct toptree *core)
*/
static int dist_core_to_core(struct toptree *core1, struct toptree *core2)
{
+ if (core_drawer(core1)->id != core_drawer(core2)->id)
+ return DIST_DRAWER;
if (core_book(core1)->id != core_book(core2)->id)
return DIST_BOOK;
if (core_mc(core1)->id != core_mc(core2)->id)
@@ -262,6 +273,8 @@ static void toptree_to_numa_first(struct toptree *numa, struct toptree *phys)
struct toptree *core;
/* Always try to move perfectly fitting structures first */
+ move_level_to_numa(numa, phys, DRAWER, true);
+ move_level_to_numa(numa, phys, DRAWER, false);
move_level_to_numa(numa, phys, BOOK, true);
move_level_to_numa(numa, phys, BOOK, false);
move_level_to_numa(numa, phys, MC, true);
@@ -335,7 +348,7 @@ static struct toptree *toptree_to_numa(struct toptree *phys)
*/
static struct toptree *toptree_from_topology(void)
{
- struct toptree *phys, *node, *book, *mc, *core;
+ struct toptree *phys, *node, *drawer, *book, *mc, *core;
struct cpu_topology_s390 *top;
int cpu;
@@ -344,10 +357,11 @@ static struct toptree *toptree_from_topology(void)
for_each_online_cpu(cpu) {
top = &per_cpu(cpu_topology, cpu);
node = toptree_get_child(phys, 0);
- book = toptree_get_child(node, top->book_id);
+ drawer = toptree_get_child(node, top->drawer_id);
+ book = toptree_get_child(drawer, top->book_id);
mc = toptree_get_child(book, top->socket_id);
core = toptree_get_child(mc, top->core_id);
- if (!book || !mc || !core)
+ if (!drawer || !book || !mc || !core)
panic("NUMA emulation could not allocate memory");
cpumask_set_cpu(cpu, &core->mask);
toptree_update_mask(mc);
@@ -368,6 +382,7 @@ static void topology_add_core(struct toptree *core)
cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask);
+ cpumask_copy(&top->drawer_mask, &core_drawer(core)->mask);
cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
top->node_id = core_node(core)->id;
}
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index 496e4a7ee00e..e9dd41b0b8d3 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -7,4 +7,3 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o
-oprofile-y += hwsampler.o
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
deleted file mode 100644
index ff9b4eb34589..000000000000
--- a/arch/s390/oprofile/hwsampler.c
+++ /dev/null
@@ -1,1178 +0,0 @@
-/*
- * Copyright IBM Corp. 2010
- * Author: Heinz Graalfs <graalfs@de.ibm.com>
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/semaphore.h>
-#include <linux/oom.h>
-#include <linux/oprofile.h>
-
-#include <asm/facility.h>
-#include <asm/cpu_mf.h>
-#include <asm/irq.h>
-
-#include "hwsampler.h"
-#include "op_counter.h"
-
-#define MAX_NUM_SDB 511
-#define MIN_NUM_SDB 1
-
-DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
-
-struct hws_execute_parms {
- void *buffer;
- signed int rc;
-};
-
-DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
-EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer);
-
-static DEFINE_MUTEX(hws_sem);
-static DEFINE_MUTEX(hws_sem_oom);
-
-static unsigned char hws_flush_all;
-static unsigned int hws_oom;
-static unsigned int hws_alert;
-static struct workqueue_struct *hws_wq;
-
-static unsigned int hws_state;
-enum {
- HWS_INIT = 1,
- HWS_DEALLOCATED,
- HWS_STOPPED,
- HWS_STARTED,
- HWS_STOPPING };
-
-/* set to 1 if called by kernel during memory allocation */
-static unsigned char oom_killer_was_active;
-/* size of SDBT and SDB as of allocate API */
-static unsigned long num_sdbt = 100;
-static unsigned long num_sdb = 511;
-/* sampling interval (machine cycles) */
-static unsigned long interval;
-
-static unsigned long min_sampler_rate;
-static unsigned long max_sampler_rate;
-
-static void execute_qsi(void *parms)
-{
- struct hws_execute_parms *ep = parms;
-
- ep->rc = qsi(ep->buffer);
-}
-
-static void execute_ssctl(void *parms)
-{
- struct hws_execute_parms *ep = parms;
-
- ep->rc = lsctl(ep->buffer);
-}
-
-static int smp_ctl_ssctl_stop(int cpu)
-{
- int rc;
- struct hws_execute_parms ep;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- cb->ssctl.es = 0;
- cb->ssctl.cs = 0;
-
- ep.buffer = &cb->ssctl;
- smp_call_function_single(cpu, execute_ssctl, &ep, 1);
- rc = ep.rc;
- if (rc) {
- printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
- dump_stack();
- }
-
- ep.buffer = &cb->qsi;
- smp_call_function_single(cpu, execute_qsi, &ep, 1);
-
- if (cb->qsi.es || cb->qsi.cs) {
- printk(KERN_EMERG "CPUMF sampling did not stop properly.\n");
- dump_stack();
- }
-
- return rc;
-}
-
-static int smp_ctl_ssctl_deactivate(int cpu)
-{
- int rc;
- struct hws_execute_parms ep;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- cb->ssctl.es = 1;
- cb->ssctl.cs = 0;
-
- ep.buffer = &cb->ssctl;
- smp_call_function_single(cpu, execute_ssctl, &ep, 1);
- rc = ep.rc;
- if (rc)
- printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
-
- ep.buffer = &cb->qsi;
- smp_call_function_single(cpu, execute_qsi, &ep, 1);
-
- if (cb->qsi.cs)
- printk(KERN_EMERG "CPUMF sampling was not set inactive.\n");
-
- return rc;
-}
-
-static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval)
-{
- int rc;
- struct hws_execute_parms ep;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- cb->ssctl.h = 1;
- cb->ssctl.tear = cb->first_sdbt;
- cb->ssctl.dear = *(unsigned long *) cb->first_sdbt;
- cb->ssctl.interval = interval;
- cb->ssctl.es = 1;
- cb->ssctl.cs = 1;
-
- ep.buffer = &cb->ssctl;
- smp_call_function_single(cpu, execute_ssctl, &ep, 1);
- rc = ep.rc;
- if (rc)
- printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
-
- ep.buffer = &cb->qsi;
- smp_call_function_single(cpu, execute_qsi, &ep, 1);
- if (ep.rc)
- printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu);
-
- return rc;
-}
-
-static int smp_ctl_qsi(int cpu)
-{
- struct hws_execute_parms ep;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- ep.buffer = &cb->qsi;
- smp_call_function_single(cpu, execute_qsi, &ep, 1);
-
- return ep.rc;
-}
-
-static void hws_ext_handler(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
-{
- struct hws_cpu_buffer *cb = this_cpu_ptr(&sampler_cpu_buffer);
-
- if (!(param32 & CPU_MF_INT_SF_MASK))
- return;
-
- if (!hws_alert)
- return;
-
- inc_irq_stat(IRQEXT_CMS);
- atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
-
- if (hws_wq)
- queue_work(hws_wq, &cb->worker);
-}
-
-static void worker(struct work_struct *work);
-
-static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
- unsigned long *dear);
-
-static void init_all_cpu_buffers(void)
-{
- int cpu;
- struct hws_cpu_buffer *cb;
-
- for_each_online_cpu(cpu) {
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- memset(cb, 0, sizeof(struct hws_cpu_buffer));
- }
-}
-
-static void prepare_cpu_buffers(void)
-{
- struct hws_cpu_buffer *cb;
- int cpu;
-
- for_each_online_cpu(cpu) {
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- atomic_set(&cb->ext_params, 0);
- cb->worker_entry = 0;
- cb->sample_overflow = 0;
- cb->req_alert = 0;
- cb->incorrect_sdbt_entry = 0;
- cb->invalid_entry_address = 0;
- cb->loss_of_sample_data = 0;
- cb->sample_auth_change_alert = 0;
- cb->finish = 0;
- cb->oom = 0;
- cb->stop_mode = 0;
- }
-}
-
-/*
- * allocate_sdbt() - allocate sampler memory
- * @cpu: the cpu for which sampler memory is allocated
- *
- * A 4K page is allocated for each requested SDBT.
- * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs.
- * Set ALERT_REQ mask in each SDBs trailer.
- * Returns zero if successful, <0 otherwise.
- */
-static int allocate_sdbt(int cpu)
-{
- int j, k, rc;
- unsigned long *sdbt;
- unsigned long sdb;
- unsigned long *tail;
- unsigned long *trailer;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- if (cb->first_sdbt)
- return -EINVAL;
-
- sdbt = NULL;
- tail = sdbt;
-
- for (j = 0; j < num_sdbt; j++) {
- sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
-
- mutex_lock(&hws_sem_oom);
- /* OOM killer might have been activated */
- barrier();
- if (oom_killer_was_active || !sdbt) {
- if (sdbt)
- free_page((unsigned long)sdbt);
-
- goto allocate_sdbt_error;
- }
- if (cb->first_sdbt == 0)
- cb->first_sdbt = (unsigned long)sdbt;
-
- /* link current page to tail of chain */
- if (tail)
- *tail = (unsigned long)(void *)sdbt + 1;
-
- mutex_unlock(&hws_sem_oom);
-
- for (k = 0; k < num_sdb; k++) {
- /* get and set SDB page */
- sdb = get_zeroed_page(GFP_KERNEL);
-
- mutex_lock(&hws_sem_oom);
- /* OOM killer might have been activated */
- barrier();
- if (oom_killer_was_active || !sdb) {
- if (sdb)
- free_page(sdb);
-
- goto allocate_sdbt_error;
- }
- *sdbt = sdb;
- trailer = trailer_entry_ptr(*sdbt);
- *trailer = SDB_TE_ALERT_REQ_MASK;
- sdbt++;
- mutex_unlock(&hws_sem_oom);
- }
- tail = sdbt;
- }
- mutex_lock(&hws_sem_oom);
- if (oom_killer_was_active)
- goto allocate_sdbt_error;
-
- rc = 0;
- if (tail)
- *tail = (unsigned long)
- ((void *)cb->first_sdbt) + 1;
-
-allocate_sdbt_exit:
- mutex_unlock(&hws_sem_oom);
- return rc;
-
-allocate_sdbt_error:
- rc = -ENOMEM;
- goto allocate_sdbt_exit;
-}
-
-/*
- * deallocate_sdbt() - deallocate all sampler memory
- *
- * For each online CPU all SDBT trees are deallocated.
- * Returns the number of freed pages.
- */
-static int deallocate_sdbt(void)
-{
- int cpu;
- int counter;
-
- counter = 0;
-
- for_each_online_cpu(cpu) {
- unsigned long start;
- unsigned long sdbt;
- unsigned long *curr;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- if (!cb->first_sdbt)
- continue;
-
- sdbt = cb->first_sdbt;
- curr = (unsigned long *) sdbt;
- start = sdbt;
-
- /* we'll free the SDBT after all SDBs are processed... */
- while (1) {
- if (!*curr || !sdbt)
- break;
-
- /* watch for link entry reset if found */
- if (is_link_entry(curr)) {
- curr = get_next_sdbt(curr);
- if (sdbt)
- free_page(sdbt);
-
- /* we are done if we reach the start */
- if ((unsigned long) curr == start)
- break;
- else
- sdbt = (unsigned long) curr;
- } else {
- /* process SDB pointer */
- if (*curr) {
- free_page(*curr);
- curr++;
- }
- }
- counter++;
- }
- cb->first_sdbt = 0;
- }
- return counter;
-}
-
-static int start_sampling(int cpu)
-{
- int rc;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- rc = smp_ctl_ssctl_enable_activate(cpu, interval);
- if (rc) {
- printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu);
- goto start_exit;
- }
-
- rc = -EINVAL;
- if (!cb->qsi.es) {
- printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu);
- goto start_exit;
- }
-
- if (!cb->qsi.cs) {
- printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu);
- goto start_exit;
- }
-
- printk(KERN_INFO
- "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n",
- cpu, interval);
-
- rc = 0;
-
-start_exit:
- return rc;
-}
-
-static int stop_sampling(int cpu)
-{
- unsigned long v;
- int rc;
- struct hws_cpu_buffer *cb;
-
- rc = smp_ctl_qsi(cpu);
- WARN_ON(rc);
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- if (!rc && !cb->qsi.es)
- printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu);
-
- rc = smp_ctl_ssctl_stop(cpu);
- if (rc) {
- printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n",
- cpu, rc);
- goto stop_exit;
- }
-
- printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu);
-
-stop_exit:
- v = cb->req_alert;
- if (v)
- printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert,"
- " count=%lu.\n", cpu, v);
-
- v = cb->loss_of_sample_data;
- if (v)
- printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data,"
- " count=%lu.\n", cpu, v);
-
- v = cb->invalid_entry_address;
- if (v)
- printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address,"
- " count=%lu.\n", cpu, v);
-
- v = cb->incorrect_sdbt_entry;
- if (v)
- printk(KERN_ERR
- "hwsampler: CPU %d CPUMF Incorrect SDBT address,"
- " count=%lu.\n", cpu, v);
-
- v = cb->sample_auth_change_alert;
- if (v)
- printk(KERN_ERR
- "hwsampler: CPU %d CPUMF Sample authorization change,"
- " count=%lu.\n", cpu, v);
-
- return rc;
-}
-
-static int check_hardware_prerequisites(void)
-{
- if (!test_facility(68))
- return -EOPNOTSUPP;
- return 0;
-}
-/*
- * hws_oom_callback() - the OOM callback function
- *
- * In case the callback is invoked during memory allocation for the
- * hw sampler, all obtained memory is deallocated and a flag is set
- * so main sampler memory allocation can exit with a failure code.
- * In case the callback is invoked during sampling the hw sampler
- * is deactivated for all CPUs.
- */
-static int hws_oom_callback(struct notifier_block *nfb,
- unsigned long dummy, void *parm)
-{
- unsigned long *freed;
- int cpu;
- struct hws_cpu_buffer *cb;
-
- freed = parm;
-
- mutex_lock(&hws_sem_oom);
-
- if (hws_state == HWS_DEALLOCATED) {
- /* during memory allocation */
- if (oom_killer_was_active == 0) {
- oom_killer_was_active = 1;
- *freed += deallocate_sdbt();
- }
- } else {
- int i;
- cpu = get_cpu();
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- if (!cb->oom) {
- for_each_online_cpu(i) {
- smp_ctl_ssctl_deactivate(i);
- cb->oom = 1;
- }
- cb->finish = 1;
-
- printk(KERN_INFO
- "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n",
- cpu);
- }
- }
-
- mutex_unlock(&hws_sem_oom);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block hws_oom_notifier = {
- .notifier_call = hws_oom_callback
-};
-
-static int hws_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- /* We do not have sampler space available for all possible CPUs.
- All CPUs should be online when hw sampling is activated. */
- return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
-}
-
-static struct notifier_block hws_cpu_notifier = {
- .notifier_call = hws_cpu_callback
-};
-
-/**
- * hwsampler_deactivate() - set hardware sampling temporarily inactive
- * @cpu: specifies the CPU to be set inactive.
- *
- * Returns 0 on success, !0 on failure.
- */
-int hwsampler_deactivate(unsigned int cpu)
-{
- /*
- * Deactivate hw sampling temporarily and flush the buffer
- * by pushing all the pending samples to oprofile buffer.
- *
- * This function can be called under one of the following conditions:
- * Memory unmap, task is exiting.
- */
- int rc;
- struct hws_cpu_buffer *cb;
-
- rc = 0;
- mutex_lock(&hws_sem);
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- if (hws_state == HWS_STARTED) {
- rc = smp_ctl_qsi(cpu);
- WARN_ON(rc);
- if (cb->qsi.cs) {
- rc = smp_ctl_ssctl_deactivate(cpu);
- if (rc) {
- printk(KERN_INFO
- "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu);
- cb->finish = 1;
- hws_state = HWS_STOPPING;
- } else {
- hws_flush_all = 1;
- /* Add work to queue to read pending samples.*/
- queue_work_on(cpu, hws_wq, &cb->worker);
- }
- }
- }
- mutex_unlock(&hws_sem);
-
- if (hws_wq)
- flush_workqueue(hws_wq);
-
- return rc;
-}
-
-/**
- * hwsampler_activate() - activate/resume hardware sampling which was deactivated
- * @cpu: specifies the CPU to be set active.
- *
- * Returns 0 on success, !0 on failure.
- */
-int hwsampler_activate(unsigned int cpu)
-{
- /*
- * Re-activate hw sampling. This should be called in pair with
- * hwsampler_deactivate().
- */
- int rc;
- struct hws_cpu_buffer *cb;
-
- rc = 0;
- mutex_lock(&hws_sem);
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- if (hws_state == HWS_STARTED) {
- rc = smp_ctl_qsi(cpu);
- WARN_ON(rc);
- if (!cb->qsi.cs) {
- hws_flush_all = 0;
- rc = smp_ctl_ssctl_enable_activate(cpu, interval);
- if (rc) {
- printk(KERN_ERR
- "CPU %d, CPUMF activate sampling failed.\n",
- cpu);
- }
- }
- }
-
- mutex_unlock(&hws_sem);
-
- return rc;
-}
-
-static int check_qsi_on_setup(void)
-{
- int rc;
- unsigned int cpu;
- struct hws_cpu_buffer *cb;
-
- for_each_online_cpu(cpu) {
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- rc = smp_ctl_qsi(cpu);
- WARN_ON(rc);
- if (rc)
- return -EOPNOTSUPP;
-
- if (!cb->qsi.as) {
- printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n");
- return -EINVAL;
- }
-
- if (cb->qsi.es) {
- printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n");
- rc = smp_ctl_ssctl_stop(cpu);
- if (rc)
- return -EINVAL;
-
- printk(KERN_INFO
- "CPU %d, CPUMF Sampling stopped now.\n", cpu);
- }
- }
- return 0;
-}
-
-static int check_qsi_on_start(void)
-{
- unsigned int cpu;
- int rc;
- struct hws_cpu_buffer *cb;
-
- for_each_online_cpu(cpu) {
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- rc = smp_ctl_qsi(cpu);
- WARN_ON(rc);
-
- if (!cb->qsi.as)
- return -EINVAL;
-
- if (cb->qsi.es)
- return -EINVAL;
-
- if (cb->qsi.cs)
- return -EINVAL;
- }
- return 0;
-}
-
-static void worker_on_start(unsigned int cpu)
-{
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- cb->worker_entry = cb->first_sdbt;
-}
-
-static int worker_check_error(unsigned int cpu, int ext_params)
-{
- int rc;
- unsigned long *sdbt;
- struct hws_cpu_buffer *cb;
-
- rc = 0;
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- sdbt = (unsigned long *) cb->worker_entry;
-
- if (!sdbt || !*sdbt)
- return -EINVAL;
-
- if (ext_params & CPU_MF_INT_SF_PRA)
- cb->req_alert++;
-
- if (ext_params & CPU_MF_INT_SF_LSDA)
- cb->loss_of_sample_data++;
-
- if (ext_params & CPU_MF_INT_SF_IAE) {
- cb->invalid_entry_address++;
- rc = -EINVAL;
- }
-
- if (ext_params & CPU_MF_INT_SF_ISE) {
- cb->incorrect_sdbt_entry++;
- rc = -EINVAL;
- }
-
- if (ext_params & CPU_MF_INT_SF_SACA) {
- cb->sample_auth_change_alert++;
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-static void worker_on_finish(unsigned int cpu)
-{
- int rc, i;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- if (cb->finish) {
- rc = smp_ctl_qsi(cpu);
- WARN_ON(rc);
- if (cb->qsi.es) {
- printk(KERN_INFO
- "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n",
- cpu);
- rc = smp_ctl_ssctl_stop(cpu);
- if (rc)
- printk(KERN_INFO
- "hwsampler: CPU %d, CPUMF Deactivation failed.\n",
- cpu);
-
- for_each_online_cpu(i) {
- if (i == cpu)
- continue;
- if (!cb->finish) {
- cb->finish = 1;
- queue_work_on(i, hws_wq,
- &cb->worker);
- }
- }
- }
- }
-}
-
-static void worker_on_interrupt(unsigned int cpu)
-{
- unsigned long *sdbt;
- unsigned char done;
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- sdbt = (unsigned long *) cb->worker_entry;
-
- done = 0;
- /* do not proceed if stop was entered,
- * forget the buffers not yet processed */
- while (!done && !cb->stop_mode) {
- unsigned long *trailer;
- struct hws_trailer_entry *te;
- unsigned long *dear = 0;
-
- trailer = trailer_entry_ptr(*sdbt);
- /* leave loop if no more work to do */
- if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) {
- done = 1;
- if (!hws_flush_all)
- continue;
- }
-
- te = (struct hws_trailer_entry *)trailer;
- cb->sample_overflow += te->overflow;
-
- add_samples_to_oprofile(cpu, sdbt, dear);
-
- /* reset trailer */
- xchg((unsigned char *) te, 0x40);
-
- /* advance to next sdb slot in current sdbt */
- sdbt++;
- /* in case link bit is set use address w/o link bit */
- if (is_link_entry(sdbt))
- sdbt = get_next_sdbt(sdbt);
-
- cb->worker_entry = (unsigned long)sdbt;
- }
-}
-
-static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
- unsigned long *dear)
-{
- struct hws_basic_entry *sample_data_ptr;
- unsigned long *trailer;
-
- trailer = trailer_entry_ptr(*sdbt);
- if (dear) {
- if (dear > trailer)
- return;
- trailer = dear;
- }
-
- sample_data_ptr = (struct hws_basic_entry *)(*sdbt);
-
- while ((unsigned long *)sample_data_ptr < trailer) {
- struct pt_regs *regs = NULL;
- struct task_struct *tsk = NULL;
-
- /*
- * Check sampling mode, 1 indicates basic (=customer) sampling
- * mode.
- */
- if (sample_data_ptr->def != 1) {
- /* sample slot is not yet written */
- break;
- } else {
- /* make sure we don't use it twice,
- * the next time the sampler will set it again */
- sample_data_ptr->def = 0;
- }
-
- /* Get pt_regs. */
- if (sample_data_ptr->P == 1) {
- /* userspace sample */
- unsigned int pid = sample_data_ptr->prim_asn;
- if (!counter_config.user)
- goto skip_sample;
- rcu_read_lock();
- tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
- if (tsk)
- regs = task_pt_regs(tsk);
- rcu_read_unlock();
- } else {
- /* kernelspace sample */
- if (!counter_config.kernel)
- goto skip_sample;
- regs = task_pt_regs(current);
- }
-
- mutex_lock(&hws_sem);
- oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
- !sample_data_ptr->P, tsk);
- mutex_unlock(&hws_sem);
- skip_sample:
- sample_data_ptr++;
- }
-}
-
-static void worker(struct work_struct *work)
-{
- unsigned int cpu;
- int ext_params;
- struct hws_cpu_buffer *cb;
-
- cb = container_of(work, struct hws_cpu_buffer, worker);
- cpu = smp_processor_id();
- ext_params = atomic_xchg(&cb->ext_params, 0);
-
- if (!cb->worker_entry)
- worker_on_start(cpu);
-
- if (worker_check_error(cpu, ext_params))
- return;
-
- if (!cb->finish)
- worker_on_interrupt(cpu);
-
- if (cb->finish)
- worker_on_finish(cpu);
-}
-
-/**
- * hwsampler_allocate() - allocate memory for the hardware sampler
- * @sdbt: number of SDBTs per online CPU (must be > 0)
- * @sdb: number of SDBs per SDBT (minimum 1, maximum 511)
- *
- * Returns 0 on success, !0 on failure.
- */
-int hwsampler_allocate(unsigned long sdbt, unsigned long sdb)
-{
- int cpu, rc;
- mutex_lock(&hws_sem);
-
- rc = -EINVAL;
- if (hws_state != HWS_DEALLOCATED)
- goto allocate_exit;
-
- if (sdbt < 1)
- goto allocate_exit;
-
- if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB)
- goto allocate_exit;
-
- num_sdbt = sdbt;
- num_sdb = sdb;
-
- oom_killer_was_active = 0;
- register_oom_notifier(&hws_oom_notifier);
-
- for_each_online_cpu(cpu) {
- if (allocate_sdbt(cpu)) {
- unregister_oom_notifier(&hws_oom_notifier);
- goto allocate_error;
- }
- }
- unregister_oom_notifier(&hws_oom_notifier);
- if (oom_killer_was_active)
- goto allocate_error;
-
- hws_state = HWS_STOPPED;
- rc = 0;
-
-allocate_exit:
- mutex_unlock(&hws_sem);
- return rc;
-
-allocate_error:
- rc = -ENOMEM;
- printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n");
- goto allocate_exit;
-}
-
-/**
- * hwsampler_deallocate() - deallocate hardware sampler memory
- *
- * Returns 0 on success, !0 on failure.
- */
-int hwsampler_deallocate(void)
-{
- int rc;
-
- mutex_lock(&hws_sem);
-
- rc = -EINVAL;
- if (hws_state != HWS_STOPPED)
- goto deallocate_exit;
-
- irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
- hws_alert = 0;
- deallocate_sdbt();
-
- hws_state = HWS_DEALLOCATED;
- rc = 0;
-
-deallocate_exit:
- mutex_unlock(&hws_sem);
-
- return rc;
-}
-
-unsigned long hwsampler_query_min_interval(void)
-{
- return min_sampler_rate;
-}
-
-unsigned long hwsampler_query_max_interval(void)
-{
- return max_sampler_rate;
-}
-
-unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
-{
- struct hws_cpu_buffer *cb;
-
- cb = &per_cpu(sampler_cpu_buffer, cpu);
-
- return cb->sample_overflow;
-}
-
-int hwsampler_setup(void)
-{
- int rc;
- int cpu;
- struct hws_cpu_buffer *cb;
-
- mutex_lock(&hws_sem);
-
- rc = -EINVAL;
- if (hws_state)
- goto setup_exit;
-
- hws_state = HWS_INIT;
-
- init_all_cpu_buffers();
-
- rc = check_hardware_prerequisites();
- if (rc)
- goto setup_exit;
-
- rc = check_qsi_on_setup();
- if (rc)
- goto setup_exit;
-
- rc = -EINVAL;
- hws_wq = create_workqueue("hwsampler");
- if (!hws_wq)
- goto setup_exit;
-
- register_cpu_notifier(&hws_cpu_notifier);
-
- for_each_online_cpu(cpu) {
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- INIT_WORK(&cb->worker, worker);
- rc = smp_ctl_qsi(cpu);
- WARN_ON(rc);
- if (min_sampler_rate != cb->qsi.min_sampl_rate) {
- if (min_sampler_rate) {
- printk(KERN_WARNING
- "hwsampler: different min sampler rate values.\n");
- if (min_sampler_rate < cb->qsi.min_sampl_rate)
- min_sampler_rate =
- cb->qsi.min_sampl_rate;
- } else
- min_sampler_rate = cb->qsi.min_sampl_rate;
- }
- if (max_sampler_rate != cb->qsi.max_sampl_rate) {
- if (max_sampler_rate) {
- printk(KERN_WARNING
- "hwsampler: different max sampler rate values.\n");
- if (max_sampler_rate > cb->qsi.max_sampl_rate)
- max_sampler_rate =
- cb->qsi.max_sampl_rate;
- } else
- max_sampler_rate = cb->qsi.max_sampl_rate;
- }
- }
- register_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
-
- hws_state = HWS_DEALLOCATED;
- rc = 0;
-
-setup_exit:
- mutex_unlock(&hws_sem);
- return rc;
-}
-
-int hwsampler_shutdown(void)
-{
- int rc;
-
- mutex_lock(&hws_sem);
-
- rc = -EINVAL;
- if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) {
- mutex_unlock(&hws_sem);
-
- if (hws_wq)
- flush_workqueue(hws_wq);
-
- mutex_lock(&hws_sem);
-
- if (hws_state == HWS_STOPPED) {
- irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
- hws_alert = 0;
- deallocate_sdbt();
- }
- if (hws_wq) {
- destroy_workqueue(hws_wq);
- hws_wq = NULL;
- }
-
- unregister_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
- hws_state = HWS_INIT;
- rc = 0;
- }
- mutex_unlock(&hws_sem);
-
- unregister_cpu_notifier(&hws_cpu_notifier);
-
- return rc;
-}
-
-/**
- * hwsampler_start_all() - start hardware sampling on all online CPUs
- * @rate: specifies the used interval when samples are taken
- *
- * Returns 0 on success, !0 on failure.
- */
-int hwsampler_start_all(unsigned long rate)
-{
- int rc, cpu;
-
- mutex_lock(&hws_sem);
-
- hws_oom = 0;
-
- rc = -EINVAL;
- if (hws_state != HWS_STOPPED)
- goto start_all_exit;
-
- interval = rate;
-
- /* fail if rate is not valid */
- if (interval < min_sampler_rate || interval > max_sampler_rate)
- goto start_all_exit;
-
- rc = check_qsi_on_start();
- if (rc)
- goto start_all_exit;
-
- prepare_cpu_buffers();
-
- for_each_online_cpu(cpu) {
- rc = start_sampling(cpu);
- if (rc)
- break;
- }
- if (rc) {
- for_each_online_cpu(cpu) {
- stop_sampling(cpu);
- }
- goto start_all_exit;
- }
- hws_state = HWS_STARTED;
- rc = 0;
-
-start_all_exit:
- mutex_unlock(&hws_sem);
-
- if (rc)
- return rc;
-
- register_oom_notifier(&hws_oom_notifier);
- hws_oom = 1;
- hws_flush_all = 0;
- /* now let them in, 1407 CPUMF external interrupts */
- hws_alert = 1;
- irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-
- return 0;
-}
-
-/**
- * hwsampler_stop_all() - stop hardware sampling on all online CPUs
- *
- * Returns 0 on success, !0 on failure.
- */
-int hwsampler_stop_all(void)
-{
- int tmp_rc, rc, cpu;
- struct hws_cpu_buffer *cb;
-
- mutex_lock(&hws_sem);
-
- rc = 0;
- if (hws_state == HWS_INIT) {
- mutex_unlock(&hws_sem);
- return 0;
- }
- hws_state = HWS_STOPPING;
- mutex_unlock(&hws_sem);
-
- for_each_online_cpu(cpu) {
- cb = &per_cpu(sampler_cpu_buffer, cpu);
- cb->stop_mode = 1;
- tmp_rc = stop_sampling(cpu);
- if (tmp_rc)
- rc = tmp_rc;
- }
-
- if (hws_wq)
- flush_workqueue(hws_wq);
-
- mutex_lock(&hws_sem);
- if (hws_oom) {
- unregister_oom_notifier(&hws_oom_notifier);
- hws_oom = 0;
- }
- hws_state = HWS_STOPPED;
- mutex_unlock(&hws_sem);
-
- return rc;
-}
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
deleted file mode 100644
index a483d06f2fa7..000000000000
--- a/arch/s390/oprofile/hwsampler.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * CPUMF HW sampler functions and internal structures
- *
- * Copyright IBM Corp. 2010
- * Author(s): Heinz Graalfs <graalfs@de.ibm.com>
- */
-
-#ifndef HWSAMPLER_H_
-#define HWSAMPLER_H_
-
-#include <linux/workqueue.h>
-#include <asm/cpu_mf.h>
-
-struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */
-{ /* bytes 0 - 7 Bit(s) */
- unsigned int s:1; /* 0: maximum buffer indicator */
- unsigned int h:1; /* 1: part. level reserved for VM use*/
- unsigned long b2_53:52; /* 2-53: zeros */
- unsigned int es:1; /* 54: sampling enable control */
- unsigned int b55_61:7; /* 55-61: - zeros */
- unsigned int cs:1; /* 62: sampling activation control */
- unsigned int b63:1; /* 63: zero */
- unsigned long interval; /* 8-15: sampling interval */
- unsigned long tear; /* 16-23: TEAR contents */
- unsigned long dear; /* 24-31: DEAR contents */
- /* 32-63: */
- unsigned long rsvrd1; /* reserved */
- unsigned long rsvrd2; /* reserved */
- unsigned long rsvrd3; /* reserved */
- unsigned long rsvrd4; /* reserved */
-};
-
-struct hws_cpu_buffer {
- unsigned long first_sdbt; /* @ of 1st SDB-Table for this CP*/
- unsigned long worker_entry;
- unsigned long sample_overflow; /* taken from SDB ... */
- struct hws_qsi_info_block qsi;
- struct hws_ssctl_request_block ssctl;
- struct work_struct worker;
- atomic_t ext_params;
- unsigned long req_alert;
- unsigned long loss_of_sample_data;
- unsigned long invalid_entry_address;
- unsigned long incorrect_sdbt_entry;
- unsigned long sample_auth_change_alert;
- unsigned int finish:1;
- unsigned int oom:1;
- unsigned int stop_mode:1;
-};
-
-int hwsampler_setup(void);
-int hwsampler_shutdown(void);
-int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
-int hwsampler_deallocate(void);
-unsigned long hwsampler_query_min_interval(void);
-unsigned long hwsampler_query_max_interval(void);
-int hwsampler_start_all(unsigned long interval);
-int hwsampler_stop_all(void);
-int hwsampler_deactivate(unsigned int cpu);
-int hwsampler_activate(unsigned int cpu);
-unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu);
-
-#endif /*HWSAMPLER_H_*/
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 791935a65800..16f4c3960b87 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -10,488 +10,8 @@
*/
#include <linux/oprofile.h>
-#include <linux/perf_event.h>
#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/module.h>
#include <asm/processor.h>
-#include <asm/perf_event.h>
-
-#include "../../../drivers/oprofile/oprof.h"
-
-#include "hwsampler.h"
-#include "op_counter.h"
-
-#define DEFAULT_INTERVAL 4127518
-
-#define DEFAULT_SDBT_BLOCKS 1
-#define DEFAULT_SDB_BLOCKS 511
-
-static unsigned long oprofile_hw_interval = DEFAULT_INTERVAL;
-static unsigned long oprofile_min_interval;
-static unsigned long oprofile_max_interval;
-
-static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS;
-static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS;
-
-static int hwsampler_enabled;
-static int hwsampler_running; /* start_mutex must be held to change */
-static int hwsampler_available;
-
-static struct oprofile_operations timer_ops;
-
-struct op_counter_config counter_config;
-
-enum __force_cpu_type {
- reserved = 0, /* do not force */
- timer,
-};
-static int force_cpu_type;
-
-static int set_cpu_type(const char *str, struct kernel_param *kp)
-{
- if (!strcmp(str, "timer")) {
- force_cpu_type = timer;
- printk(KERN_INFO "oprofile: forcing timer to be returned "
- "as cpu type\n");
- } else {
- force_cpu_type = 0;
- }
-
- return 0;
-}
-module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
-MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
- "(report cpu_type \"timer\"");
-
-static int __oprofile_hwsampler_start(void)
-{
- int retval;
-
- retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
- if (retval)
- return retval;
-
- retval = hwsampler_start_all(oprofile_hw_interval);
- if (retval)
- hwsampler_deallocate();
-
- return retval;
-}
-
-static int oprofile_hwsampler_start(void)
-{
- int retval;
-
- hwsampler_running = hwsampler_enabled;
-
- if (!hwsampler_running)
- return timer_ops.start();
-
- retval = perf_reserve_sampling();
- if (retval)
- return retval;
-
- retval = __oprofile_hwsampler_start();
- if (retval)
- perf_release_sampling();
-
- return retval;
-}
-
-static void oprofile_hwsampler_stop(void)
-{
- if (!hwsampler_running) {
- timer_ops.stop();
- return;
- }
-
- hwsampler_stop_all();
- hwsampler_deallocate();
- perf_release_sampling();
- return;
-}
-
-/*
- * File ops used for:
- * /dev/oprofile/0/enabled
- * /dev/oprofile/hwsampling/hwsampler (cpu_type = timer)
- */
-
-static ssize_t hwsampler_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(hwsampler_enabled, buf, count, offset);
-}
-
-static ssize_t hwsampler_write(struct file *file, char const __user *buf,
- size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- if (val != 0 && val != 1)
- return -EINVAL;
-
- if (oprofile_started)
- /*
- * save to do without locking as we set
- * hwsampler_running in start() when start_mutex is
- * held
- */
- return -EBUSY;
-
- hwsampler_enabled = val;
-
- return count;
-}
-
-static const struct file_operations hwsampler_fops = {
- .read = hwsampler_read,
- .write = hwsampler_write,
-};
-
-/*
- * File ops used for:
- * /dev/oprofile/0/count
- * /dev/oprofile/hwsampling/hw_interval (cpu_type = timer)
- *
- * Make sure that the value is within the hardware range.
- */
-
-static ssize_t hw_interval_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(oprofile_hw_interval, buf,
- count, offset);
-}
-
-static ssize_t hw_interval_write(struct file *file, char const __user *buf,
- size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
- if (val < oprofile_min_interval)
- oprofile_hw_interval = oprofile_min_interval;
- else if (val > oprofile_max_interval)
- oprofile_hw_interval = oprofile_max_interval;
- else
- oprofile_hw_interval = val;
-
- return count;
-}
-
-static const struct file_operations hw_interval_fops = {
- .read = hw_interval_read,
- .write = hw_interval_write,
-};
-
-/*
- * File ops used for:
- * /dev/oprofile/0/event
- * Only a single event with number 0 is supported with this counter.
- *
- * /dev/oprofile/0/unit_mask
- * This is a dummy file needed by the user space tools.
- * No value other than 0 is accepted or returned.
- */
-
-static ssize_t hwsampler_zero_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(0, buf, count, offset);
-}
-
-static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
- size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
- if (val != 0)
- return -EINVAL;
- return count;
-}
-
-static const struct file_operations zero_fops = {
- .read = hwsampler_zero_read,
- .write = hwsampler_zero_write,
-};
-
-/* /dev/oprofile/0/kernel file ops. */
-
-static ssize_t hwsampler_kernel_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(counter_config.kernel,
- buf, count, offset);
-}
-
-static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
- size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- if (val != 0 && val != 1)
- return -EINVAL;
-
- counter_config.kernel = val;
-
- return count;
-}
-
-static const struct file_operations kernel_fops = {
- .read = hwsampler_kernel_read,
- .write = hwsampler_kernel_write,
-};
-
-/* /dev/oprofile/0/user file ops. */
-
-static ssize_t hwsampler_user_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(counter_config.user,
- buf, count, offset);
-}
-
-static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
- size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- if (val != 0 && val != 1)
- return -EINVAL;
-
- counter_config.user = val;
-
- return count;
-}
-
-static const struct file_operations user_fops = {
- .read = hwsampler_user_read,
- .write = hwsampler_user_write,
-};
-
-
-/*
- * File ops used for: /dev/oprofile/timer/enabled
- * The value always has to be the inverted value of hwsampler_enabled. So
- * no separate variable is created. That way we do not need locking.
- */
-
-static ssize_t timer_enabled_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(!hwsampler_enabled, buf, count, offset);
-}
-
-static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
- size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- if (val != 0 && val != 1)
- return -EINVAL;
-
- /* Timer cannot be disabled without having hardware sampling. */
- if (val == 0 && !hwsampler_available)
- return -EINVAL;
-
- if (oprofile_started)
- /*
- * save to do without locking as we set
- * hwsampler_running in start() when start_mutex is
- * held
- */
- return -EBUSY;
-
- hwsampler_enabled = !val;
-
- return count;
-}
-
-static const struct file_operations timer_enabled_fops = {
- .read = timer_enabled_read,
- .write = timer_enabled_write,
-};
-
-
-static int oprofile_create_hwsampling_files(struct dentry *root)
-{
- struct dentry *dir;
-
- dir = oprofilefs_mkdir(root, "timer");
- if (!dir)
- return -EINVAL;
-
- oprofilefs_create_file(dir, "enabled", &timer_enabled_fops);
-
- if (!hwsampler_available)
- return 0;
-
- /* reinitialize default values */
- hwsampler_enabled = 1;
- counter_config.kernel = 1;
- counter_config.user = 1;
-
- if (!force_cpu_type) {
- /*
- * Create the counter file system. A single virtual
- * counter is created which can be used to
- * enable/disable hardware sampling dynamically from
- * user space. The user space will configure a single
- * counter with a single event. The value of 'event'
- * and 'unit_mask' are not evaluated by the kernel code
- * and can only be set to 0.
- */
-
- dir = oprofilefs_mkdir(root, "0");
- if (!dir)
- return -EINVAL;
-
- oprofilefs_create_file(dir, "enabled", &hwsampler_fops);
- oprofilefs_create_file(dir, "event", &zero_fops);
- oprofilefs_create_file(dir, "count", &hw_interval_fops);
- oprofilefs_create_file(dir, "unit_mask", &zero_fops);
- oprofilefs_create_file(dir, "kernel", &kernel_fops);
- oprofilefs_create_file(dir, "user", &user_fops);
- oprofilefs_create_ulong(dir, "hw_sdbt_blocks",
- &oprofile_sdbt_blocks);
-
- } else {
- /*
- * Hardware sampling can be used but the cpu_type is
- * forced to timer in order to deal with legacy user
- * space tools. The /dev/oprofile/hwsampling fs is
- * provided in that case.
- */
- dir = oprofilefs_mkdir(root, "hwsampling");
- if (!dir)
- return -EINVAL;
-
- oprofilefs_create_file(dir, "hwsampler",
- &hwsampler_fops);
- oprofilefs_create_file(dir, "hw_interval",
- &hw_interval_fops);
- oprofilefs_create_ro_ulong(dir, "hw_min_interval",
- &oprofile_min_interval);
- oprofilefs_create_ro_ulong(dir, "hw_max_interval",
- &oprofile_max_interval);
- oprofilefs_create_ulong(dir, "hw_sdbt_blocks",
- &oprofile_sdbt_blocks);
- }
- return 0;
-}
-
-static int oprofile_hwsampler_init(struct oprofile_operations *ops)
-{
- /*
- * Initialize the timer mode infrastructure as well in order
- * to be able to switch back dynamically. oprofile_timer_init
- * is not supposed to fail.
- */
- if (oprofile_timer_init(ops))
- BUG();
-
- memcpy(&timer_ops, ops, sizeof(timer_ops));
- ops->create_files = oprofile_create_hwsampling_files;
-
- /*
- * If the user space tools do not support newer cpu types,
- * the force_cpu_type module parameter
- * can be used to always return \"timer\" as cpu type.
- */
- if (force_cpu_type != timer) {
- struct cpuid id;
-
- get_cpu_id (&id);
-
- switch (id.machine) {
- case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
- case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
- case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
- case 0x2964: case 0x2965: ops->cpu_type = "s390/z13"; break;
- default: return -ENODEV;
- }
- }
-
- if (hwsampler_setup())
- return -ENODEV;
-
- /*
- * Query the range for the sampling interval from the
- * hardware.
- */
- oprofile_min_interval = hwsampler_query_min_interval();
- if (oprofile_min_interval == 0)
- return -ENODEV;
- oprofile_max_interval = hwsampler_query_max_interval();
- if (oprofile_max_interval == 0)
- return -ENODEV;
-
- /* The initial value should be sane */
- if (oprofile_hw_interval < oprofile_min_interval)
- oprofile_hw_interval = oprofile_min_interval;
- if (oprofile_hw_interval > oprofile_max_interval)
- oprofile_hw_interval = oprofile_max_interval;
-
- printk(KERN_INFO "oprofile: System z hardware sampling "
- "facility found.\n");
-
- ops->start = oprofile_hwsampler_start;
- ops->stop = oprofile_hwsampler_stop;
-
- return 0;
-}
-
-static void oprofile_hwsampler_exit(void)
-{
- hwsampler_shutdown();
-}
static int __s390_backtrace(void *data, unsigned long address)
{
@@ -514,18 +34,9 @@ static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = s390_backtrace;
-
- /*
- * -ENODEV is not reported to the caller. The module itself
- * will use the timer mode sampling as fallback and this is
- * always available.
- */
- hwsampler_available = oprofile_hwsampler_init(ops) == 0;
-
return 0;
}
void oprofile_arch_exit(void)
{
- oprofile_hwsampler_exit();
}
diff --git a/arch/s390/oprofile/op_counter.h b/arch/s390/oprofile/op_counter.h
deleted file mode 100644
index 61b2531eef17..000000000000
--- a/arch/s390/oprofile/op_counter.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright IBM Corp. 2011
- * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
- *
- * @remark Copyright 2011 OProfile authors
- */
-
-#ifndef OP_COUNTER_H
-#define OP_COUNTER_H
-
-struct op_counter_config {
- /* `enabled' maps to the hwsampler_file variable. */
- /* `count' maps to the oprofile_hw_interval variable. */
- /* `event' and `unit_mask' are unused. */
- unsigned long kernel;
- unsigned long user;
-};
-
-extern struct op_counter_config counter_config;
-
-#endif /* OP_COUNTER_H */
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 1ea8c07eab84..070f1ae5cfad 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -226,7 +226,8 @@ static unsigned long __dma_alloc_iommu(struct device *dev,
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
- start, size, 0, boundary_size, 0);
+ start, size, zdev->start_dma >> PAGE_SHIFT,
+ boundary_size, 0);
}
static unsigned long dma_alloc_iommu(struct device *dev, int size)
@@ -469,6 +470,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
* Also set zdev->end_dma to the actual end address of the usable
* range, instead of the theoretical maximum as reported by hardware.
*/
+ zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
zdev->iommu_size = min3((u64) high_memory,
ZPCI_TABLE_SIZE_RT - zdev->start_dma,
zdev->end_dma - zdev->start_dma + 1);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index fb2a9a560fdc..c2b27ad8e94d 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -145,8 +145,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
default:
break;
}
- if (pdev)
- pci_dev_put(pdev);
+ pci_dev_put(pdev);
}
void zpci_event_availability(void *data)
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 10ca15dcab11..fa8d7d4b9751 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -99,7 +99,7 @@ void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
}
/* PCI Load */
-static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
@@ -116,6 +116,16 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
: "d" (__offset)
: "cc");
*status = __req >> 24 & 0xff;
+ *data = __data;
+ return cc;
+}
+
+static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ u64 __data;
+ int cc;
+
+ cc = ____pcilg(&__data, req, offset, status);
if (!cc)
*data = __data;
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 2e067657db98..49b012d78c1a 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
- PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
@@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 37a6c2e0e969..995b71e4db4b 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -111,7 +111,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index e803a836cb7c..0d5f3a9bb315 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -264,7 +264,6 @@ config CPU_SUBTYPE_SH7203
select CPU_HAS_FPU
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_MTU2
- select ARCH_WANT_OPTIONAL_GPIOLIB
select PINCTRL
config CPU_SUBTYPE_SH7206
@@ -353,7 +352,6 @@ config CPU_SUBTYPE_SH7720
select CPU_SH3
select CPU_HAS_DSP
select SYS_SUPPORTS_SH_CMT
- select ARCH_WANT_OPTIONAL_GPIOLIB
select USB_OHCI_SH if USB_OHCI_HCD
select PINCTRL
help
@@ -419,7 +417,6 @@ config CPU_SUBTYPE_SH7723
select ARCH_SHMOBILE
select ARCH_SPARSEMEM_ENABLE
select SYS_SUPPORTS_SH_CMT
- select ARCH_WANT_OPTIONAL_GPIOLIB
select PINCTRL
help
Select SH7723 if you have an SH-MobileR2 CPU.
@@ -431,7 +428,6 @@ config CPU_SUBTYPE_SH7724
select ARCH_SHMOBILE
select ARCH_SPARSEMEM_ENABLE
select SYS_SUPPORTS_SH_CMT
- select ARCH_WANT_OPTIONAL_GPIOLIB
select PINCTRL
help
Select SH7724 if you have an SH-MobileR2R CPU.
@@ -440,7 +436,6 @@ config CPU_SUBTYPE_SH7734
bool "Support SH7734 processor"
select CPU_SH4A
select CPU_SHX2
- select ARCH_WANT_OPTIONAL_GPIOLIB
select PINCTRL
help
Select SH7734 if you have a SH4A SH7734 CPU.
@@ -449,7 +444,6 @@ config CPU_SUBTYPE_SH7757
bool "Support SH7757 processor"
select CPU_SH4A
select CPU_SHX2
- select ARCH_WANT_OPTIONAL_GPIOLIB
select PINCTRL
help
Select SH7757 if you have a SH4A SH7757 CPU.
@@ -475,7 +469,6 @@ config CPU_SUBTYPE_SH7785
select CPU_SHX2
select ARCH_SPARSEMEM_ENABLE
select SYS_SUPPORTS_NUMA
- select ARCH_WANT_OPTIONAL_GPIOLIB
select PINCTRL
config CPU_SUBTYPE_SH7786
@@ -484,7 +477,6 @@ config CPU_SUBTYPE_SH7786
select CPU_SHX3
select CPU_HAS_PTEAEX
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
- select ARCH_WANT_OPTIONAL_GPIOLIB
select USB_OHCI_SH if USB_OHCI_HCD
select USB_EHCI_SH if USB_EHCI_HCD
select PINCTRL
@@ -494,7 +486,7 @@ config CPU_SUBTYPE_SHX3
select CPU_SH4A
select CPU_SHX3
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select PINCTRL
# SH4AL-DSP Processor Support
@@ -513,7 +505,6 @@ config CPU_SUBTYPE_SH7722
select ARCH_SPARSEMEM_ENABLE
select SYS_SUPPORTS_NUMA
select SYS_SUPPORTS_SH_CMT
- select ARCH_WANT_OPTIONAL_GPIOLIB
select PINCTRL
config CPU_SUBTYPE_SH7366
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 5e52d5362292..e0db04664e2e 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -70,7 +70,7 @@ config SH_7724_SOLUTION_ENGINE
bool "SolutionEngine7724"
select SOLUTION_ENGINE
depends on CPU_SUBTYPE_SH7724
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select SND_SOC_AK4642 if SND_SIMPLE_CARD
select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
@@ -174,7 +174,6 @@ config SH_SDK7786
depends on CPU_SUBTYPE_SH7786
select SYS_SUPPORTS_PCI
select NO_IOPORT_MAP if !PCI
- select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_SRAM_POOL
select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
@@ -190,7 +189,7 @@ config SH_HIGHLANDER
config SH_SH7757LCR
bool "SH7757LCR"
depends on CPU_SUBTYPE_SH7757
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
config SH_SH7785LCR
@@ -217,14 +216,14 @@ config SH_SH7785LCR_PT
config SH_URQUELL
bool "Urquell"
depends on CPU_SUBTYPE_SH7786
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select SYS_SUPPORTS_PCI
select NO_IOPORT_MAP if !PCI
config SH_MIGOR
bool "Migo-R"
depends on CPU_SUBTYPE_SH7722
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Select Migo-R if configuring for the SH7722 Migo-R platform
@@ -233,7 +232,7 @@ config SH_MIGOR
config SH_AP325RXA
bool "AP-325RXA"
depends on CPU_SUBTYPE_SH7723
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Renesas "AP-325RXA" support.
@@ -242,7 +241,7 @@ config SH_AP325RXA
config SH_KFR2R09
bool "KFR2R09"
depends on CPU_SUBTYPE_SH7724
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
"Kit For R2R for 2009" support.
@@ -250,7 +249,7 @@ config SH_KFR2R09
config SH_ECOVEC
bool "EcoVec"
depends on CPU_SUBTYPE_SH7724
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select SND_SOC_DA7210 if SND_SIMPLE_CARD
select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
@@ -327,7 +326,7 @@ config SH_X3PROTO
config SH_MAGIC_PANEL_R2
bool "Magic Panel R2"
depends on CPU_SUBTYPE_SH7720
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Select Magic Panel R2 if configuring for Magic Panel R2.
diff --git a/arch/sh/boards/mach-highlander/Kconfig b/arch/sh/boards/mach-highlander/Kconfig
index def49cc0a7b9..42f5589b4bf3 100644
--- a/arch/sh/boards/mach-highlander/Kconfig
+++ b/arch/sh/boards/mach-highlander/Kconfig
@@ -18,7 +18,7 @@ config SH_R7780MP
config SH_R7785RP
bool "R7785RP board support"
depends on CPU_SUBTYPE_SH7785
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
endchoice
diff --git a/arch/sh/boards/mach-rsk/Kconfig b/arch/sh/boards/mach-rsk/Kconfig
index 458a11ffd022..0b9b2c4952c1 100644
--- a/arch/sh/boards/mach-rsk/Kconfig
+++ b/arch/sh/boards/mach-rsk/Kconfig
@@ -10,17 +10,17 @@ config SH_RSK7201
config SH_RSK7203
bool "RSK7203"
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
depends on CPU_SUBTYPE_SH7203
config SH_RSK7264
bool "RSK2+SH7264"
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
depends on CPU_SUBTYPE_SH7264
config SH_RSK7269
bool "RSK2+SH7269"
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
depends on CPU_SUBTYPE_SH7269
endchoice
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index bf3a166a5407..911ffb9f115b 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -9,9 +9,7 @@
*/
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
-#include <linux/of_iommu.h>
#include <linux/clocksource.h>
#include <linux/irqchip.h>
#include <linux/clk-provider.h>
@@ -180,17 +178,3 @@ void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
void __init plat_irq_setup(void)
{
}
-
-static int __init sh_of_device_init(void)
-{
- pr_info("SH generic board support: populating platform devices\n");
- if (of_have_populated_dt()) {
- of_iommu_init();
- of_platform_populate(NULL, of_default_bus_match_table,
- NULL, NULL);
- } else {
- pr_crit("Device tree not populated\n");
- }
- return 0;
-}
-arch_initcall_sync(sh_of_device_init);
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index b94df40e5f2d..d755e96c3064 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -43,16 +43,42 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return tmp; \
}
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int res, tmp; \
+ \
+ __asm__ __volatile__ ( \
+ " .align 2 \n\t" \
+ " mova 1f, r0 \n\t" /* r0 = end point */ \
+ " mov r15, r1 \n\t" /* r1 = saved sp */ \
+ " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
+ " mov.l @%2, %0 \n\t" /* load old value */ \
+ " mov %0, %1 \n\t" /* save old value */ \
+ " " #op " %3, %0 \n\t" /* $op */ \
+ " mov.l %0, @%2 \n\t" /* store new value */ \
+ "1: mov r1, r15 \n\t" /* LOGOUT */ \
+ : "=&r" (tmp), "=&r" (res), "+r" (v) \
+ : "r" (i) \
+ : "memory" , "r0", "r1"); \
+ \
+ return res; \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 23fcdad5773e..8e2da5fa0178 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -33,15 +33,38 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
-#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long temp, flags; \
+ \
+ raw_local_irq_save(flags); \
+ temp = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ \
+ return temp; \
+}
+
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_OP_RETURN(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or, |=)
+ATOMIC_OPS(xor, ^=)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 33d34b16d4d6..caea2c45f6c2 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -48,15 +48,39 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long res, temp; \
+ \
+ __asm__ __volatile__ ( \
+"1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \
+" mov %0, %1 \n" \
+" " #op " %2, %0 \n" \
+" movco.l %0, @%3 \n" \
+" bf 1b \n" \
+" synco \n" \
+ : "=&z" (temp), "=&z" (res) \
+ : "r" (i), "r" (&v->counter) \
+ : "t"); \
+ \
+ return res; \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index a33673b3687d..f3f42c84c40f 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page;
void *pg;
- pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg)
return NULL;
page = virt_to_page(pg);
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index bdc0f3b6c56a..416834b60ad0 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -19,14 +19,20 @@
#error "Need movli.l/movco.l for spinlocks"
#endif
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
#define arch_spin_is_locked(x) ((x)->lock <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
- do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->lock, VAL > 0);
+}
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 62f80d2a9df9..025cdb1032f6 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -101,7 +101,7 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
+ return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
@@ -109,6 +109,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_page(tlb, page);
}
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return tlb_remove_page(tlb, page);
+}
+
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 4dca18347ee9..ba3269a8304b 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -352,28 +352,12 @@ static struct pmu pmu = {
.read = sh_pmu_read,
};
-static void sh_pmu_setup(int cpu)
+static int sh_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
-}
-
-static int
-sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- sh_pmu_setup(cpu);
- break;
-
- default:
- break;
- }
-
- return NOTIFY_OK;
+ return 0;
}
int register_sh_pmu(struct sh_pmu *_pmu)
@@ -394,6 +378,7 @@ int register_sh_pmu(struct sh_pmu *_pmu)
WARN_ON(_pmu->num_events > MAX_HWEVENTS);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
- perf_cpu_notifier(sh_pmu_notifier);
+ cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu,
+ NULL);
return 0;
}
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 79d8276377d1..9bf876780cef 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -487,7 +487,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
if (mm_fault_error(regs, error_code, address, fault))
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
index 26e03a1f7ca4..a62bd8696779 100644
--- a/arch/sh/mm/pgtable.c
+++ b/arch/sh/mm/pgtable.c
@@ -1,7 +1,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
-#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
static struct kmem_cache *pgd_cachep;
#if PAGETABLE_LEVELS > 2
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 7dcbebbcaec6..ee3f11c43cda 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -20,9 +20,10 @@
#define ATOMIC_INIT(i) { (i) }
int atomic_add_return(int, atomic_t *);
-void atomic_and(int, atomic_t *);
-void atomic_or(int, atomic_t *);
-void atomic_xor(int, atomic_t *);
+int atomic_fetch_add(int, atomic_t *);
+int atomic_fetch_and(int, atomic_t *);
+int atomic_fetch_or(int, atomic_t *);
+int atomic_fetch_xor(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int);
int __atomic_add_unless(atomic_t *, int, int);
@@ -35,7 +36,13 @@ void atomic_set(atomic_t *, int);
#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
+#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
+#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
+#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v)))
+
#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
+#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
+
#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index f2fbf9e16faf..24827a3f733a 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -28,16 +28,24 @@ void atomic64_##op(long, atomic64_t *);
int atomic_##op##_return(int, atomic_t *); \
long atomic64_##op##_return(long, atomic64_t *);
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+int atomic_fetch_##op(int, atomic_t *); \
+long atomic64_fetch_##op(long, atomic64_t *);
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
index 10e9dabc4c41..f0700cfeedd7 100644
--- a/arch/sparc/include/asm/head_64.h
+++ b/arch/sparc/include/asm/head_64.h
@@ -15,6 +15,10 @@
#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
+#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+
#define __CHEETAH_ID 0x003e0014
#define __JALAPENO_ID 0x003e0016
#define __SERRANO_ID 0x003e0022
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 139e711ff80c..dcbf985ab243 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -31,14 +31,6 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor,
- unsigned long ceiling)
-{
- free_pgd_range(tlb, addr, end, floor, ceiling);
-}
-
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
@@ -82,4 +74,8 @@ static inline void arch_clear_hugepage_flags(struct page *page)
{
}
+void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
+
#endif /* _ASM_SPARC64_HUGETLB_H */
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index 70067ce184b1..f7de0dbc38af 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -92,7 +92,8 @@ struct tsb_config {
typedef struct {
spinlock_t lock;
unsigned long sparc64_ctx_val;
- unsigned long huge_pte_count;
+ unsigned long hugetlb_pte_count;
+ unsigned long thp_pte_count;
struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
} mm_context_t;
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 5e3187185b4a..3529f1378cd8 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache,
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache,
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index e7d82803a48f..1fb317fbc0b3 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -395,7 +395,7 @@ static inline unsigned long __pte_huge_mask(void)
static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | __pte_huge_mask());
+ return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask());
}
static inline bool is_hugetlb_pte(pte_t pte)
@@ -403,6 +403,11 @@ static inline bool is_hugetlb_pte(pte_t pte)
return !!(pte_val(pte) & __pte_huge_mask());
}
+static inline bool is_hugetlb_pmd(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index bcc98fc35281..d9c5876c6121 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -9,12 +9,15 @@
#ifndef __ASSEMBLY__
#include <asm/psr.h>
+#include <asm/barrier.h>
#include <asm/processor.h> /* for cpu_relax */
#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->lock, !VAL);
+}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 968917694978..87990b7c6b0d 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -8,6 +8,9 @@
#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
/* To get debugging spinlocks which detect and catch
* deadlock situations, set CONFIG_DEBUG_SPINLOCK
* and rebuild your kernel.
@@ -23,9 +26,10 @@
#define arch_spin_is_locked(lp) ((lp)->lock != 0)
-#define arch_spin_unlock_wait(lp) \
- do { rmb(); \
- } while((lp)->lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->lock, !VAL);
+}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index c6a155c3904e..32258e08da03 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -203,7 +203,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
* We have to propagate the 4MB bit of the virtual address
* because we are fabricating 8MB pages using 4MB hw pages.
*/
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
brz,pn REG1, FAIL_LABEL; \
sethi %uhi(_PAGE_PMD_HUGE), REG2; \
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
index 71b5a67522ab..781b9f1dbdc2 100644
--- a/arch/sparc/include/asm/ttable.h
+++ b/arch/sparc/include/asm/ttable.h
@@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
restored; \
nop; nop; nop; nop; nop; nop; \
nop; nop; nop; nop; nop; \
- ba,a,pt %xcc, user_rtt_fill_fixup; \
- ba,a,pt %xcc, user_rtt_fill_fixup; \
+ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
+ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
ba,a,pt %xcc, user_rtt_fill_fixup;
@@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
restored; \
nop; nop; nop; nop; nop; \
nop; nop; nop; \
- ba,a,pt %xcc, user_rtt_fill_fixup; \
- ba,a,pt %xcc, user_rtt_fill_fixup; \
+ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
+ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
ba,a,pt %xcc, user_rtt_fill_fixup;
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 7cf9c6ea3f1f..fdb13327fded 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
CFLAGS_REMOVE_pcr.o := -pg
endif
+obj-$(CONFIG_SPARC64) += urtt_fill.o
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
obj-$(CONFIG_SPARC32) += etrap_32.o
obj-$(CONFIG_SPARC32) += rtrap_32.o
diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
index d668ca149e64..4087a62f96b0 100644
--- a/arch/sparc/kernel/dtlb_prot.S
+++ b/arch/sparc/kernel/dtlb_prot.S
@@ -25,13 +25,13 @@
/* PROT ** ICACHE line 2: More real fault processing */
ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
+ srlx %g5, PAGE_SHIFT, %g5
+ sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
nop
nop
- nop
- nop
/* PROT ** ICACHE line 3: Unused... */
nop
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index a979e99f8751..cac4a5554c0e 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -165,7 +165,7 @@ void irq_link(unsigned int irq)
p = &irq_table[irq];
pil = p->pil;
- BUG_ON(pil > SUN4D_MAX_IRQ);
+ BUG_ON(pil >= SUN4D_MAX_IRQ);
p->next = irq_map[pil];
irq_map[pil] = p;
@@ -182,7 +182,7 @@ void irq_unlink(unsigned int irq)
spin_lock_irqsave(&irq_map_lock, flags);
p = &irq_table[irq];
- BUG_ON(p->pil > SUN4D_MAX_IRQ);
+ BUG_ON(p->pil >= SUN4D_MAX_IRQ);
pnext = &irq_map[p->pil];
while (*pnext != p)
pnext = &(*pnext)->next;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e22416ce56ea..34a7930b76ef 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -242,7 +242,7 @@ unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
{
int irq;
- irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL);
+ irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL);
if (irq <= 0)
goto out;
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index ef0d8e9e1210..f22bec0db645 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -20,6 +20,10 @@ kvmap_itlb:
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_IMMU, %g4
+ /* The kernel executes in context zero, therefore we do not
+ * need to clear the context ID bits out of %g4 here.
+ */
+
/* sun4v_itlb_miss branches here with the missing virtual
* address already loaded into %g4
*/
@@ -128,6 +132,10 @@ kvmap_dtlb:
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g4
+ /* The kernel executes in context zero, therefore we do not
+ * need to clear the context ID bits out of %g4 here.
+ */
+
/* sun4v_dtlb_miss branches here with the missing virtual
* address already loaded into %g4
*/
@@ -251,6 +259,10 @@ kvmap_dtlb_longpath:
nop
.previous
+ /* The kernel executes in context zero, therefore we do not
+ * need to clear the context ID bits out of %g5 here.
+ */
+
be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index d08bdaffdbfc..216948ca4382 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -14,10 +14,6 @@
#include <asm/visasm.h>
#include <asm/processor.h>
-#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
-#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
-#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
-
#ifdef CONFIG_CONTEXT_TRACKING
# define SCHEDULE_USER schedule_user
#else
@@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit
-user_rtt_fill_fixup:
- rdpr %cwp, %g1
- add %g1, 1, %g1
- wrpr %g1, 0x0, %cwp
-
- rdpr %wstate, %g2
- sll %g2, 3, %g2
- wrpr %g2, 0x0, %wstate
-
- /* We know %canrestore and %otherwin are both zero. */
-
- sethi %hi(sparc64_kern_pri_context), %g2
- ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
- mov PRIMARY_CONTEXT, %g1
-
-661: stxa %g2, [%g1] ASI_DMMU
- .section .sun4v_1insn_patch, "ax"
- .word 661b
- stxa %g2, [%g1] ASI_MMU
- .previous
-
- sethi %hi(KERNBASE), %g1
- flush %g1
+user_rtt_fill_fixup_dax:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ mov 1, %g3
- or %g4, FAULT_CODE_WINFIXUP, %g4
- stb %g4, [%g6 + TI_FAULT_CODE]
- stx %g5, [%g6 + TI_FAULT_ADDR]
+user_rtt_fill_fixup_mna:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ mov 2, %g3
- mov %g6, %l1
- wrpr %g0, 0x0, %tl
-
-661: nop
- .section .sun4v_1insn_patch, "ax"
- .word 661b
- SET_GL(0)
- .previous
-
- wrpr %g0, RTRAP_PSTATE, %pstate
-
- mov %l1, %g6
- ldx [%g6 + TI_TASK], %g4
- LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
- call do_sparc64_fault
- add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+user_rtt_fill_fixup:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ clr %g3
user_rtt_pre_restore:
add %g1, 1, %g1
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 3c25241fa5cb..91cc2f4ae4d9 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
return 0;
}
+/* Checks if the fp is valid. We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+ if ((((unsigned long) fp) & 15) ||
+ ((unsigned long)fp) > 0x100000000ULL - fplen)
+ return true;
+ return false;
+}
+
void do_sigreturn32(struct pt_regs *regs)
{
struct signal_frame32 __user *sf;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
- unsigned int psr;
+ unsigned int psr, ufp;
unsigned int pc, npc;
sigset_t set;
compat_sigset_t seta;
@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 3))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
+ goto segv;
+
+ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
goto segv;
- if (get_user(pc, &sf->info.si_regs.pc) ||
+ if (__get_user(pc, &sf->info.si_regs.pc) ||
__get_user(npc, &sf->info.si_regs.npc))
goto segv;
@@ -227,7 +244,7 @@ segv:
asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
{
struct rt_signal_frame32 __user *sf;
- unsigned int psr, pc, npc;
+ unsigned int psr, pc, npc, ufp;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
sigset_t set;
@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 3))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
- if (get_user(pc, &sf->regs.pc) ||
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
+ goto segv;
+
+ if (__get_user(pc, &sf->regs.pc) ||
__get_user(npc, &sf->regs.npc))
goto segv;
@@ -307,14 +329,6 @@ segv:
force_sig(SIGSEGV, current);
}
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp, int fplen)
-{
- if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
- return 1;
- return 0;
-}
-
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 52aa5e4ce5e7..c3c12efe0bc0 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -60,10 +60,22 @@ struct rt_signal_frame {
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
+/* Checks if the fp is valid. We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static inline bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+ if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
+ return true;
+
+ return false;
+}
+
asmlinkage void do_sigreturn(struct pt_regs *regs)
{
+ unsigned long up_psr, pc, npc, ufp;
struct signal_frame __user *sf;
- unsigned long up_psr, pc, npc;
sigset_t set;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+ if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ goto segv_and_exit;
+
+ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
goto segv_and_exit;
- if (((unsigned long) sf) & 3)
+ if (ufp & 0x7)
goto segv_and_exit;
err = __get_user(pc, &sf->info.si_regs.pc);
@@ -127,7 +142,7 @@ segv_and_exit:
asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
{
struct rt_signal_frame __user *sf;
- unsigned int psr, pc, npc;
+ unsigned int psr, pc, npc, ufp;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
sigset_t set;
@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
synchronize_user_stack();
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 0x03))
+ if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ goto segv;
+
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
goto segv;
err = __get_user(pc, &sf->regs.pc);
@@ -178,15 +198,6 @@ segv:
force_sig(SIGSEGV, current);
}
-/* Checks if the fp is valid */
-static inline int invalid_frame_pointer(void __user *fp, int fplen)
-{
- if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
- return 1;
-
- return 0;
-}
-
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp = regs->u_regs[UREG_FP];
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 39aaec173f66..5ee930c48f4c 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -234,6 +234,17 @@ do_sigsegv:
goto out;
}
+/* Checks if the fp is valid. We always build rt signal frames which
+ * are 16-byte aligned, therefore we can always enforce that the
+ * restore frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp)
+{
+ if (((unsigned long) fp) & 15)
+ return true;
+ return false;
+}
+
struct rt_signal_frame {
struct sparc_stackf ss;
siginfo_t info;
@@ -246,8 +257,8 @@ struct rt_signal_frame {
void do_rt_sigreturn(struct pt_regs *regs)
{
+ unsigned long tpc, tnpc, tstate, ufp;
struct rt_signal_frame __user *sf;
- unsigned long tpc, tnpc, tstate;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
sigset_t set;
@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
(regs->u_regs [UREG_FP] + STACK_BIAS);
/* 1. Make sure we are not getting garbage from the user */
- if (((unsigned long) sf) & 3)
+ if (invalid_frame_pointer(sf))
+ goto segv;
+
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
goto segv;
- err = get_user(tpc, &sf->regs.tpc);
+ if ((ufp + STACK_BIAS) & 0x7)
+ goto segv;
+
+ err = __get_user(tpc, &sf->regs.tpc);
err |= __get_user(tnpc, &sf->regs.tnpc);
if (test_thread_flag(TIF_32BIT)) {
tpc &= 0xffffffff;
@@ -308,14 +325,6 @@ segv:
force_sig(SIGSEGV, current);
}
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp)
-{
- if (((unsigned long) fp) & 15)
- return 1;
- return 0;
-}
-
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
index 0f6eebe71e6c..e5fe8cef9a69 100644
--- a/arch/sparc/kernel/sigutil_32.c
+++ b/arch/sparc/kernel/sigutil_32.c
@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
int err;
+
+ if (((unsigned long) fpu) & 3)
+ return -EFAULT;
+
#ifdef CONFIG_SMP
if (test_tsk_thread_flag(current, TIF_USEDFPU))
regs->psr &= ~PSR_EF;
@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
struct thread_info *t = current_thread_info();
int i, wsaved, err;
- __get_user(wsaved, &rp->wsaved);
+ if (((unsigned long) rp) & 3)
+ return -EFAULT;
+
+ get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
index 387834a9c56a..36aadcbeac69 100644
--- a/arch/sparc/kernel/sigutil_64.c
+++ b/arch/sparc/kernel/sigutil_64.c
@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
unsigned long fprs;
int err;
- err = __get_user(fprs, &fpu->si_fprs);
+ if (((unsigned long) fpu) & 7)
+ return -EFAULT;
+
+ err = get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
struct thread_info *t = current_thread_info();
int i, wsaved, err;
- __get_user(wsaved, &rp->wsaved);
+ if (((unsigned long) rp) & 7)
+ return -EFAULT;
+
+ get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index be98685c14c6..d568c8207af7 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -29,13 +29,17 @@
*/
tsb_miss_dtlb:
mov TLB_TAG_ACCESS, %g4
+ ldxa [%g4] ASI_DMMU, %g4
+ srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk
- ldxa [%g4] ASI_DMMU, %g4
+ sllx %g4, PAGE_SHIFT, %g4
tsb_miss_itlb:
mov TLB_TAG_ACCESS, %g4
+ ldxa [%g4] ASI_IMMU, %g4
+ srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk
- ldxa [%g4] ASI_IMMU, %g4
+ sllx %g4, PAGE_SHIFT, %g4
/* At this point we have:
* %g1 -- PAGE_SIZE TSB entry address
@@ -284,6 +288,10 @@ tsb_do_dtlb_fault:
nop
.previous
+ /* Clear context ID bits. */
+ srlx %g5, PAGE_SHIFT, %g5
+ sllx %g5, PAGE_SHIFT, %g5
+
be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
new file mode 100644
index 000000000000..5604a2b051d4
--- /dev/null
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -0,0 +1,98 @@
+#include <asm/thread_info.h>
+#include <asm/trap_block.h>
+#include <asm/spitfire.h>
+#include <asm/ptrace.h>
+#include <asm/head.h>
+
+ .text
+ .align 8
+ .globl user_rtt_fill_fixup_common
+user_rtt_fill_fixup_common:
+ rdpr %cwp, %g1
+ add %g1, 1, %g1
+ wrpr %g1, 0x0, %cwp
+
+ rdpr %wstate, %g2
+ sll %g2, 3, %g2
+ wrpr %g2, 0x0, %wstate
+
+ /* We know %canrestore and %otherwin are both zero. */
+
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
+ mov PRIMARY_CONTEXT, %g1
+
+661: stxa %g2, [%g1] ASI_DMMU
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ stxa %g2, [%g1] ASI_MMU
+ .previous
+
+ sethi %hi(KERNBASE), %g1
+ flush %g1
+
+ mov %g4, %l4
+ mov %g5, %l5
+ brnz,pn %g3, 1f
+ mov %g3, %l3
+
+ or %g4, FAULT_CODE_WINFIXUP, %g4
+ stb %g4, [%g6 + TI_FAULT_CODE]
+ stx %g5, [%g6 + TI_FAULT_ADDR]
+1:
+ mov %g6, %l1
+ wrpr %g0, 0x0, %tl
+
+661: nop
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ SET_GL(0)
+ .previous
+
+ wrpr %g0, RTRAP_PSTATE, %pstate
+
+ mov %l1, %g6
+ ldx [%g6 + TI_TASK], %g4
+ LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+
+ brnz,pn %l3, 1f
+ nop
+
+ call do_sparc64_fault
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ nop
+
+1: cmp %g3, 2
+ bne,pn %xcc, 2f
+ nop
+
+ sethi %hi(tlb_type), %g1
+ lduw [%g1 + %lo(tlb_type)], %g1
+ cmp %g1, 3
+ bne,pt %icc, 1f
+ add %sp, PTREGS_OFF, %o0
+ mov %l4, %o2
+ call sun4v_do_mna
+ mov %l5, %o1
+ ba,a,pt %xcc, rtrap
+1: mov %l4, %o1
+ mov %l5, %o2
+ call mem_address_unaligned
+ nop
+ ba,a,pt %xcc, rtrap
+
+2: sethi %hi(tlb_type), %g1
+ mov %l4, %o1
+ lduw [%g1 + %lo(tlb_type)], %g1
+ mov %l5, %o2
+ cmp %g1, 3
+ bne,pt %icc, 1f
+ add %sp, PTREGS_OFF, %o0
+ call sun4v_data_access_exception
+ nop
+ ba,a,pt %xcc, rtrap
+
+1: call spitfire_data_access_exception
+ nop
+ ba,a,pt %xcc, rtrap
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index b9d63c0a7aab..2c373329d5cb 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,39 +27,44 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */
-#define ATOMIC_OP_RETURN(op, c_op) \
-int atomic_##op##_return(int i, atomic_t *v) \
+#define ATOMIC_FETCH_OP(op, c_op) \
+int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
- ret = (v->counter c_op i); \
+ ret = v->counter; \
+ v->counter c_op i; \
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_##op##_return);
+EXPORT_SYMBOL(atomic_fetch_##op);
-#define ATOMIC_OP(op, c_op) \
-void atomic_##op(int i, atomic_t *v) \
+#define ATOMIC_OP_RETURN(op, c_op) \
+int atomic_##op##_return(int i, atomic_t *v) \
{ \
+ int ret; \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
- v->counter c_op i; \
+ ret = (v->counter c_op i); \
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
+ return ret; \
} \
-EXPORT_SYMBOL(atomic_##op);
+EXPORT_SYMBOL(atomic_##op##_return);
ATOMIC_OP_RETURN(add, +=)
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
+ATOMIC_FETCH_OP(add, +=)
+ATOMIC_FETCH_OP(and, &=)
+ATOMIC_FETCH_OP(or, |=)
+ATOMIC_FETCH_OP(xor, ^=)
+
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
int atomic_xchg(atomic_t *v, int new)
{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index d6b0363f345b..a5c5a0279ccc 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -9,10 +9,11 @@
.text
- /* Two versions of the atomic routines, one that
+ /* Three versions of the atomic routines, one that
* does not return a value and does not perform
- * memory barriers, and a second which returns
- * a value and does the barriers.
+ * memory barriers, and a two which return
+ * a value, the new and old value resp. and does the
+ * barriers.
*/
#define ATOMIC_OP(op) \
@@ -43,15 +44,34 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op##_return);
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+1: lduw [%o1], %g1; \
+ op %g1, %o0, %g7; \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+ nop; \
+ retl; \
+ sra %g1, 0, %o0; \
+2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ENDPROC(atomic_fetch_##op);
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -83,15 +103,34 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op##_return);
-#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
+#define ATOMIC64_FETCH_OP(op) \
+ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+1: ldx [%o1], %g1; \
+ op %g1, %o0, %g7; \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+ nop; \
+ retl; \
+ mov %g1, %o0; \
+2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ENDPROC(atomic64_fetch_##op);
+
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
+
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 8eb454cfe05c..de5e97817bdb 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -107,15 +107,24 @@ EXPORT_SYMBOL(atomic64_##op);
EXPORT_SYMBOL(atomic_##op##_return); \
EXPORT_SYMBOL(atomic64_##op##_return);
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+EXPORT_SYMBOL(atomic_fetch_##op); \
+EXPORT_SYMBOL(atomic64_fetch_##op);
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index b6c559cbd64d..4714061d6cd3 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -241,7 +241,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
@@ -411,7 +411,7 @@ good_area:
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
- switch (handle_mm_fault(mm, vma, address, flags)) {
+ switch (handle_mm_fault(vma, address, flags)) {
case VM_FAULT_SIGBUS:
case VM_FAULT_OOM:
goto do_sigbus;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index cb841a33da59..e16fdd28a931 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -111,8 +111,8 @@ static unsigned int get_user_insn(unsigned long tpc)
if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
goto out_irq_enable;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pmd_trans_huge(*pmdp)) {
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ if (is_hugetlb_pmd(*pmdp)) {
pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
pa += tpc & ~HPAGE_MASK;
@@ -436,7 +436,7 @@ good_area:
goto bad_area;
}
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
goto exit_exception;
@@ -476,14 +476,14 @@ good_area:
up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm);
-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
#endif
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- mm_rss = mm->context.huge_pte_count;
+ mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index ba52e6466a82..988acc8b1b80 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -12,6 +12,7 @@
#include <asm/mman.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -131,23 +132,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
{
pgd_t *pgd;
pud_t *pud;
- pmd_t *pmd;
pte_t *pte = NULL;
- /* We must align the address, because our caller will run
- * set_huge_pte_at() on whatever we return, which writes out
- * all of the sub-ptes for the hugepage range. So we have
- * to give it the first such sub-pte.
- */
- addr &= HPAGE_MASK;
-
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
- if (pud) {
- pmd = pmd_alloc(mm, pud, addr);
- if (pmd)
- pte = pte_alloc_map(mm, pmd, addr);
- }
+ if (pud)
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
return pte;
}
@@ -155,19 +146,13 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
- pmd_t *pmd;
pte_t *pte = NULL;
- addr &= HPAGE_MASK;
-
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
- if (!pud_none(*pud)) {
- pmd = pmd_offset(pud, addr);
- if (!pmd_none(*pmd))
- pte = pte_offset_map(pmd, addr);
- }
+ if (!pud_none(*pud))
+ pte = (pte_t *)pmd_offset(pud, addr);
}
return pte;
}
@@ -175,70 +160,143 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
- int i;
- pte_t orig[2];
- unsigned long nptes;
+ pte_t orig;
if (!pte_present(*ptep) && pte_present(entry))
- mm->context.huge_pte_count++;
+ mm->context.hugetlb_pte_count++;
addr &= HPAGE_MASK;
-
- nptes = 1 << HUGETLB_PAGE_ORDER;
- orig[0] = *ptep;
- orig[1] = *(ptep + nptes / 2);
- for (i = 0; i < nptes; i++) {
- *ptep = entry;
- ptep++;
- addr += PAGE_SIZE;
- pte_val(entry) += PAGE_SIZE;
- }
+ orig = *ptep;
+ *ptep = entry;
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
- addr -= REAL_HPAGE_SIZE;
- ptep -= nptes / 2;
- maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
- addr -= REAL_HPAGE_SIZE;
- ptep -= nptes / 2;
- maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
+ maybe_tlb_batch_add(mm, addr, ptep, orig, 0);
+ maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t entry;
- int i;
- unsigned long nptes;
entry = *ptep;
if (pte_present(entry))
- mm->context.huge_pte_count--;
+ mm->context.hugetlb_pte_count--;
addr &= HPAGE_MASK;
- nptes = 1 << HUGETLB_PAGE_ORDER;
- for (i = 0; i < nptes; i++) {
- *ptep = __pte(0UL);
- addr += PAGE_SIZE;
- ptep++;
- }
+ *ptep = __pte(0UL);
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
- addr -= REAL_HPAGE_SIZE;
- ptep -= nptes / 2;
- maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
- addr -= REAL_HPAGE_SIZE;
- ptep -= nptes / 2;
maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
+ maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0);
return entry;
}
int pmd_huge(pmd_t pmd)
{
- return 0;
+ return !pmd_none(pmd) &&
+ (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
}
int pud_huge(pud_t pud)
{
return 0;
}
+
+static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long addr)
+{
+ pgtable_t token = pmd_pgtable(*pmd);
+
+ pmd_clear(pmd);
+ pte_free_tlb(tlb, token, addr);
+ atomic_long_dec(&tlb->mm->nr_ptes);
+}
+
+static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ pmd_t *pmd;
+ unsigned long next;
+ unsigned long start;
+
+ start = addr;
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(*pmd))
+ continue;
+ if (is_hugetlb_pmd(*pmd))
+ pmd_clear(pmd);
+ else
+ hugetlb_free_pte_range(tlb, pmd, addr);
+ } while (pmd++, addr = next, addr != end);
+
+ start &= PUD_MASK;
+ if (start < floor)
+ return;
+ if (ceiling) {
+ ceiling &= PUD_MASK;
+ if (!ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ return;
+
+ pmd = pmd_offset(pud, start);
+ pud_clear(pud);
+ pmd_free_tlb(tlb, pmd, start);
+ mm_dec_nr_pmds(tlb->mm);
+}
+
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ pud_t *pud;
+ unsigned long next;
+ unsigned long start;
+
+ start = addr;
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
+ ceiling);
+ } while (pud++, addr = next, addr != end);
+
+ start &= PGDIR_MASK;
+ if (start < floor)
+ return;
+ if (ceiling) {
+ ceiling &= PGDIR_MASK;
+ if (!ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ return;
+
+ pud = pud_offset(pgd, start);
+ pgd_clear(pgd);
+ pud_free_tlb(tlb, pud, start);
+}
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ pgd_t *pgd;
+ unsigned long next;
+
+ pgd = pgd_offset(tlb->mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+ } while (pgd++, addr = next, addr != end);
+}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 652683cb4b4b..65457c9f1365 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -346,10 +346,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
+ if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
+ is_hugetlb_pte(pte)) {
+ /* We are fabricating 8MB pages using 4MB real hw pages. */
+ pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
address, pte_val(pte));
- else
+ } else
#endif
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
address, pte_val(pte));
@@ -2704,8 +2707,7 @@ void __flush_tlb_all(void)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pte_t *pte = NULL;
if (page)
@@ -2717,8 +2719,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
@@ -2824,9 +2825,10 @@ void hugetlb_setup(struct pt_regs *regs)
* the Data-TLB for huge pages.
*/
if (tlb_type == cheetah_plus) {
+ bool need_context_reload = false;
unsigned long ctx;
- spin_lock(&ctx_alloc_lock);
+ spin_lock_irq(&ctx_alloc_lock);
ctx = mm->context.sparc64_ctx_val;
ctx &= ~CTX_PGSZ_MASK;
ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
@@ -2845,9 +2847,12 @@ void hugetlb_setup(struct pt_regs *regs)
* also executing in this address space.
*/
mm->context.sparc64_ctx_val = ctx;
- on_each_cpu(context_reload, mm, 0);
+ need_context_reload = true;
}
- spin_unlock(&ctx_alloc_lock);
+ spin_unlock_irq(&ctx_alloc_lock);
+
+ if (need_context_reload)
+ on_each_cpu(context_reload, mm, 0);
}
}
#endif
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index f81cd9736700..3659d37b4d81 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -175,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
if (pmd_val(pmd) & _PAGE_PMD_HUGE)
- mm->context.huge_pte_count++;
+ mm->context.thp_pte_count++;
else
- mm->context.huge_pte_count--;
+ mm->context.thp_pte_count--;
/* Do not try to allocate the TSB hash table if we
* don't have one already. We have various locks held
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index a0604a493a36..6725ed45580e 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -470,7 +470,7 @@ retry_tsb_alloc:
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- unsigned long huge_pte_count;
+ unsigned long total_huge_pte_count;
#endif
unsigned int i;
@@ -479,12 +479,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm->context.sparc64_ctx_val = 0UL;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- /* We reset it to zero because the fork() page copying
+ /* We reset them to zero because the fork() page copying
* will re-increment the counters as the parent PTEs are
* copied into the child address space.
*/
- huge_pte_count = mm->context.huge_pte_count;
- mm->context.huge_pte_count = 0;
+ total_huge_pte_count = mm->context.hugetlb_pte_count +
+ mm->context.thp_pte_count;
+ mm->context.hugetlb_pte_count = 0;
+ mm->context.thp_pte_count = 0;
#endif
/* copy_mm() copies over the parent's mm_struct before calling
@@ -500,8 +502,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (unlikely(huge_pte_count))
- tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
+ if (unlikely(total_huge_pte_count))
+ tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
#endif
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index 9fc0107a9c5e..8dda3c8ff5ab 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -46,6 +46,8 @@ static inline int atomic_read(const atomic_t *v)
*/
#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v))
+#define atomic_fetch_sub(i, v) atomic_fetch_add(-(int)(i), (v))
+
/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index d320ce253d86..a93774255136 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -34,18 +34,29 @@ static inline void atomic_add(int i, atomic_t *v)
_atomic_xchg_add(&v->counter, i);
}
-#define ATOMIC_OP(op) \
-unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
+#define ATOMIC_OPS(op) \
+unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- _atomic_##op((unsigned long *)&v->counter, i); \
+ _atomic_fetch_##op((unsigned long *)&v->counter, i); \
+} \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ smp_mb(); \
+ return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
}
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
-#undef ATOMIC_OP
+#undef ATOMIC_OPS
+
+static inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ smp_mb();
+ return _atomic_xchg_add(&v->counter, i);
+}
/**
* atomic_add_return - add integer and return
@@ -126,16 +137,29 @@ static inline void atomic64_add(long long i, atomic64_t *v)
_atomic64_xchg_add(&v->counter, i);
}
-#define ATOMIC64_OP(op) \
-long long _atomic64_##op(long long *v, long long n); \
+#define ATOMIC64_OPS(op) \
+long long _atomic64_fetch_##op(long long *v, long long n); \
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
- _atomic64_##op(&v->counter, i); \
+ _atomic64_fetch_##op(&v->counter, i); \
+} \
+static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
+{ \
+ smp_mb(); \
+ return _atomic64_fetch_##op(&v->counter, i); \
}
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
+
+#undef ATOMIC64_OPS
+
+static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+{
+ smp_mb();
+ return _atomic64_xchg_add(&v->counter, i);
+}
/**
* atomic64_add_return - add integer and return
@@ -186,6 +210,7 @@ static inline void atomic64_set(atomic64_t *v, long long n)
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
+#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_dec(v) atomic64_sub(1LL, (v))
@@ -193,7 +218,6 @@ static inline void atomic64_set(atomic64_t *v, long long n)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-
#endif /* !__ASSEMBLY__ */
/*
@@ -242,16 +266,16 @@ struct __get_user {
unsigned long val;
int err;
};
-extern struct __get_user __atomic_cmpxchg(volatile int *p,
+extern struct __get_user __atomic32_cmpxchg(volatile int *p,
int *lock, int o, int n);
-extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
+extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
int *lock, int o, int n);
-extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
long long o, long long n);
extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
@@ -259,9 +283,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
long long n);
extern long long __atomic64_xchg_add_unless(volatile long long *p,
int *lock, long long o, long long n);
-extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
-extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
-extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
/* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index b0531a623653..4cefa0c9fd81 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -32,11 +32,6 @@
* on any routine which updates memory and returns a value.
*/
-static inline void atomic_add(int i, atomic_t *v)
-{
- __insn_fetchadd4((void *)&v->counter, i);
-}
-
/*
* Note a subtlety of the locking here. We are required to provide a
* full memory barrier before and after the operation. However, we
@@ -59,28 +54,39 @@ static inline int atomic_add_return(int i, atomic_t *v)
return val;
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+#define ATOMIC_OPS(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int val; \
+ smp_mb(); \
+ val = __insn_fetch##op##4((void *)&v->counter, i); \
+ smp_mb(); \
+ return val; \
+} \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ __insn_fetch##op##4((void *)&v->counter, i); \
+}
+
+ATOMIC_OPS(add)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+
+#undef ATOMIC_OPS
+
+static inline int atomic_fetch_xor(int i, atomic_t *v)
{
int guess, oldval = v->counter;
+ smp_mb();
do {
- if (oldval == u)
- break;
guess = oldval;
- oldval = cmpxchg(&v->counter, guess, guess + a);
+ __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+ oldval = __insn_cmpexch4(&v->counter, guess ^ i);
} while (guess != oldval);
+ smp_mb();
return oldval;
}
-static inline void atomic_and(int i, atomic_t *v)
-{
- __insn_fetchand4((void *)&v->counter, i);
-}
-
-static inline void atomic_or(int i, atomic_t *v)
-{
- __insn_fetchor4((void *)&v->counter, i);
-}
-
static inline void atomic_xor(int i, atomic_t *v)
{
int guess, oldval = v->counter;
@@ -91,6 +97,18 @@ static inline void atomic_xor(int i, atomic_t *v)
} while (guess != oldval);
}
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int guess, oldval = v->counter;
+ do {
+ if (oldval == u)
+ break;
+ guess = oldval;
+ oldval = cmpxchg(&v->counter, guess, guess + a);
+ } while (guess != oldval);
+ return oldval;
+}
+
/* Now the true 64-bit operations. */
#define ATOMIC64_INIT(i) { (i) }
@@ -98,11 +116,6 @@ static inline void atomic_xor(int i, atomic_t *v)
#define atomic64_read(v) READ_ONCE((v)->counter)
#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
-static inline void atomic64_add(long i, atomic64_t *v)
-{
- __insn_fetchadd((void *)&v->counter, i);
-}
-
static inline long atomic64_add_return(long i, atomic64_t *v)
{
int val;
@@ -112,26 +125,37 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
return val;
}
-static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+#define ATOMIC64_OPS(op) \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
+{ \
+ long val; \
+ smp_mb(); \
+ val = __insn_fetch##op((void *)&v->counter, i); \
+ smp_mb(); \
+ return val; \
+} \
+static inline void atomic64_##op(long i, atomic64_t *v) \
+{ \
+ __insn_fetch##op((void *)&v->counter, i); \
+}
+
+ATOMIC64_OPS(add)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+
+#undef ATOMIC64_OPS
+
+static inline long atomic64_fetch_xor(long i, atomic64_t *v)
{
long guess, oldval = v->counter;
+ smp_mb();
do {
- if (oldval == u)
- break;
guess = oldval;
- oldval = cmpxchg(&v->counter, guess, guess + a);
+ __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+ oldval = __insn_cmpexch(&v->counter, guess ^ i);
} while (guess != oldval);
- return oldval != u;
-}
-
-static inline void atomic64_and(long i, atomic64_t *v)
-{
- __insn_fetchand((void *)&v->counter, i);
-}
-
-static inline void atomic64_or(long i, atomic64_t *v)
-{
- __insn_fetchor((void *)&v->counter, i);
+ smp_mb();
+ return oldval;
}
static inline void atomic64_xor(long i, atomic64_t *v)
@@ -144,7 +168,20 @@ static inline void atomic64_xor(long i, atomic64_t *v)
} while (guess != oldval);
}
+static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long guess, oldval = v->counter;
+ do {
+ if (oldval == u)
+ break;
+ guess = oldval;
+ oldval = cmpxchg(&v->counter, guess, guess + a);
+ } while (guess != oldval);
+ return oldval != u;
+}
+
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
+#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index d55222806c2f..4c419ab95ab7 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -87,6 +87,13 @@ mb_incoherent(void)
#define __smp_mb__after_atomic() __smp_mb()
#endif
+/*
+ * The TILE architecture does not do speculative reads; this ensures
+ * that a control dependency also orders against loads and already provides
+ * a LOAD->{LOAD,STORE} order and can forgo the additional RMB.
+ */
+#define smp_acquire__after_ctrl_dep() barrier()
+
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index bbf7b666f21d..d1406a95f6b7 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -19,9 +19,9 @@
#include <asm/barrier.h>
/* Tile-specific routines to support <asm/bitops.h>. */
-unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
-unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
-unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
+unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask);
+unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask);
+unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask);
/**
* set_bit - Atomically set a bit in memory
@@ -35,7 +35,7 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
*/
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
{
- _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
+ _atomic_fetch_or(addr + BIT_WORD(nr), BIT_MASK(nr));
}
/**
@@ -54,7 +54,7 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)
*/
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
{
- _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
+ _atomic_fetch_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
}
/**
@@ -69,7 +69,7 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
*/
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
{
- _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
+ _atomic_fetch_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
}
/**
@@ -85,7 +85,7 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
unsigned long mask = BIT_MASK(nr);
addr += BIT_WORD(nr);
smp_mb(); /* barrier for proper semantics */
- return (_atomic_or(addr, mask) & mask) != 0;
+ return (_atomic_fetch_or(addr, mask) & mask) != 0;
}
/**
@@ -101,7 +101,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
unsigned long mask = BIT_MASK(nr);
addr += BIT_WORD(nr);
smp_mb(); /* barrier for proper semantics */
- return (_atomic_andn(addr, mask) & mask) != 0;
+ return (_atomic_fetch_andn(addr, mask) & mask) != 0;
}
/**
@@ -118,7 +118,7 @@ static inline int test_and_change_bit(unsigned nr,
unsigned long mask = BIT_MASK(nr);
addr += BIT_WORD(nr);
smp_mb(); /* barrier for proper semantics */
- return (_atomic_xor(addr, mask) & mask) != 0;
+ return (_atomic_fetch_xor(addr, mask) & mask) != 0;
}
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index c505d77e4d06..e9d54a06736f 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -129,6 +129,7 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack);
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index 1a6ef1b69cb1..e64a1b75fc38 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -80,16 +80,16 @@
ret = gu.err; \
}
-#define __futex_set() __futex_call(__atomic_xchg)
-#define __futex_add() __futex_call(__atomic_xchg_add)
-#define __futex_or() __futex_call(__atomic_or)
-#define __futex_andn() __futex_call(__atomic_andn)
-#define __futex_xor() __futex_call(__atomic_xor)
+#define __futex_set() __futex_call(__atomic32_xchg)
+#define __futex_add() __futex_call(__atomic32_xchg_add)
+#define __futex_or() __futex_call(__atomic32_fetch_or)
+#define __futex_andn() __futex_call(__atomic32_fetch_andn)
+#define __futex_xor() __futex_call(__atomic32_fetch_xor)
#define __futex_cmpxchg() \
{ \
- struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
- lock, oldval, oparg); \
+ struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \
+ lock, oldval, oparg); \
val = gu.val; \
ret = gu.err; \
}
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index e98909033e5b..2a0347af0702 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -25,7 +25,12 @@
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
int tile_console_write(const char *buf, int count);
+
+#ifdef CONFIG_EARLY_PRINTK
void early_panic(const char *fmt, ...);
+#else
+#define early_panic panic
+#endif
/* Init-time routine to do tile-specific per-cpu setup. */
void setup_cpu(int boot);
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 4b7cef9e94e0..c1467ac59ce6 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -78,7 +78,7 @@ struct thread_info {
#ifndef __ASSEMBLY__
-void arch_release_thread_info(struct thread_info *info);
+void arch_release_thread_stack(unsigned long *stack);
/* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp");
diff --git a/arch/tile/include/uapi/asm/auxvec.h b/arch/tile/include/uapi/asm/auxvec.h
index c93e92709f14..f497123ed980 100644
--- a/arch/tile/include/uapi/asm/auxvec.h
+++ b/arch/tile/include/uapi/asm/auxvec.h
@@ -18,4 +18,6 @@
/* The vDSO location. */
#define AT_SYSINFO_EHDR 33
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
#endif /* _ASM_TILE_AUXVEC_H */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 49120843ff96..bdaf71d31a4a 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -23,42 +23,50 @@
#include <linux/uaccess.h>
#include <linux/signal.h>
#include <asm/syscalls.h>
+#include <asm/byteorder.h>
/*
* Syscalls that take 64-bit numbers traditionally take them in 32-bit
* "high" and "low" value parts on 32-bit architectures.
* In principle, one could imagine passing some register arguments as
* fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to
- * adapt the usual convention.
+ * adopt the usual convention.
*/
+#ifdef __BIG_ENDIAN
+#define SYSCALL_PAIR(name) u32, name ## _hi, u32, name ## _lo
+#else
+#define SYSCALL_PAIR(name) u32, name ## _lo, u32, name ## _hi
+#endif
+
COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
- u32, low, u32, high)
+ SYSCALL_PAIR(length))
{
- return sys_truncate(filename, ((loff_t)high << 32) | low);
+ return sys_truncate(filename, ((loff_t)length_hi << 32) | length_lo);
}
COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
- u32, low, u32, high)
+ SYSCALL_PAIR(length))
{
- return sys_ftruncate(fd, ((loff_t)high << 32) | low);
+ return sys_ftruncate(fd, ((loff_t)length_hi << 32) | length_lo);
}
COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
- size_t, count, u32, dummy, u32, low, u32, high)
+ size_t, count, u32, dummy, SYSCALL_PAIR(offset))
{
- return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
+ return sys_pread64(fd, ubuf, count,
+ ((loff_t)offset_hi << 32) | offset_lo);
}
COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
- size_t, count, u32, dummy, u32, low, u32, high)
+ size_t, count, u32, dummy, SYSCALL_PAIR(offset))
{
- return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
+ return sys_pwrite64(fd, ubuf, count,
+ ((loff_t)offset_hi << 32) | offset_lo);
}
COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
- u32, offset_lo, u32, offset_hi,
- u32, nbytes_lo, u32, nbytes_hi)
+ SYSCALL_PAIR(offset), SYSCALL_PAIR(nbytes))
{
return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)nbytes_hi << 32) | nbytes_lo,
@@ -66,8 +74,7 @@ COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
}
COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
- u32, offset_lo, u32, offset_hi,
- u32, len_lo, u32, len_hi)
+ SYSCALL_PAIR(offset), SYSCALL_PAIR(len))
{
return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)len_hi << 32) | len_lo);
@@ -77,6 +84,8 @@ COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
* Avoid bug in generic sys_llseek() that specifies offset_high and
* offset_low as "unsigned long", thus making it possible to pass
* a sign-extended high 32 bits in offset_low.
+ * Note that we do not use SYSCALL_PAIR here since glibc passes the
+ * high and low parts explicitly in that order.
*/
COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
unsigned int, offset_low, loff_t __user *, result,
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 6b705ccc9cc1..a465d8372edd 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -73,8 +73,9 @@ void arch_cpu_idle(void)
/*
* Release a thread_info structure
*/
-void arch_release_thread_info(struct thread_info *info)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *info = (void *)stack;
struct single_step_state *step_state = info->step_state;
if (step_state) {
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 54e7b723db99..d89b7011667c 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -255,14 +255,15 @@ int do_syscall_trace_enter(struct pt_regs *regs)
{
u32 work = ACCESS_ONCE(current_thread_info()->flags);
- if (secure_computing() == -1)
+ if ((work & _TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs)) {
+ regs->regs[TREG_SYSCALL_NR] = -1;
return -1;
-
- if (work & _TIF_SYSCALL_TRACE) {
- if (tracehook_report_syscall_entry(regs))
- regs->regs[TREG_SYSCALL_NR] = -1;
}
+ if (secure_computing(NULL) == -1)
+ return -1;
+
if (work & _TIF_SYSCALL_TRACEPOINT)
trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]);
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index 38debe706061..c7418dcbbb08 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -33,6 +33,7 @@
#include <asm/pgtable.h>
#include <asm/homecache.h>
#include <asm/cachectl.h>
+#include <asm/byteorder.h>
#include <arch/chip.h>
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
@@ -59,13 +60,19 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
#if !defined(__tilegx__) || defined(CONFIG_COMPAT)
-ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count)
+#ifdef __BIG_ENDIAN
+#define SYSCALL_PAIR(name) u32 name ## _hi, u32 name ## _lo
+#else
+#define SYSCALL_PAIR(name) u32 name ## _lo, u32 name ## _hi
+#endif
+
+ssize_t sys32_readahead(int fd, SYSCALL_PAIR(offset), u32 count)
{
return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count);
}
-int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
- u32 len_lo, u32 len_hi, int advice)
+int sys32_fadvise64_64(int fd, SYSCALL_PAIR(offset),
+ SYSCALL_PAIR(len), int advice)
{
return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)len_hi << 32) | len_lo, advice);
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 298df1e9912a..f8128800dbf5 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -61,13 +61,13 @@ static inline int *__atomic_setup(volatile void *v)
int _atomic_xchg(int *v, int n)
{
- return __atomic_xchg(v, __atomic_setup(v), n).val;
+ return __atomic32_xchg(v, __atomic_setup(v), n).val;
}
EXPORT_SYMBOL(_atomic_xchg);
int _atomic_xchg_add(int *v, int i)
{
- return __atomic_xchg_add(v, __atomic_setup(v), i).val;
+ return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
}
EXPORT_SYMBOL(_atomic_xchg_add);
@@ -78,39 +78,39 @@ int _atomic_xchg_add_unless(int *v, int a, int u)
* to use the first argument consistently as the "old value"
* in the assembly, as is done for _atomic_cmpxchg().
*/
- return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
+ return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
}
EXPORT_SYMBOL(_atomic_xchg_add_unless);
int _atomic_cmpxchg(int *v, int o, int n)
{
- return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
+ return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
}
EXPORT_SYMBOL(_atomic_cmpxchg);
-unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
+unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
{
- return __atomic_or((int *)p, __atomic_setup(p), mask).val;
+ return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
}
-EXPORT_SYMBOL(_atomic_or);
+EXPORT_SYMBOL(_atomic_fetch_or);
-unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
+unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
{
- return __atomic_and((int *)p, __atomic_setup(p), mask).val;
+ return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
}
-EXPORT_SYMBOL(_atomic_and);
+EXPORT_SYMBOL(_atomic_fetch_and);
-unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
+unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
{
- return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
+ return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
}
-EXPORT_SYMBOL(_atomic_andn);
+EXPORT_SYMBOL(_atomic_fetch_andn);
-unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
+unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
{
- return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
+ return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
}
-EXPORT_SYMBOL(_atomic_xor);
+EXPORT_SYMBOL(_atomic_fetch_xor);
long long _atomic64_xchg(long long *v, long long n)
@@ -142,23 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
}
EXPORT_SYMBOL(_atomic64_cmpxchg);
-long long _atomic64_and(long long *v, long long n)
+long long _atomic64_fetch_and(long long *v, long long n)
{
- return __atomic64_and(v, __atomic_setup(v), n);
+ return __atomic64_fetch_and(v, __atomic_setup(v), n);
}
-EXPORT_SYMBOL(_atomic64_and);
+EXPORT_SYMBOL(_atomic64_fetch_and);
-long long _atomic64_or(long long *v, long long n)
+long long _atomic64_fetch_or(long long *v, long long n)
{
- return __atomic64_or(v, __atomic_setup(v), n);
+ return __atomic64_fetch_or(v, __atomic_setup(v), n);
}
-EXPORT_SYMBOL(_atomic64_or);
+EXPORT_SYMBOL(_atomic64_fetch_or);
-long long _atomic64_xor(long long *v, long long n)
+long long _atomic64_fetch_xor(long long *v, long long n)
{
- return __atomic64_xor(v, __atomic_setup(v), n);
+ return __atomic64_fetch_xor(v, __atomic_setup(v), n);
}
-EXPORT_SYMBOL(_atomic64_xor);
+EXPORT_SYMBOL(_atomic64_fetch_xor);
/*
* If any of the atomic or futex routines hit a bad address (not in
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index f611265633d6..1a70e6c0f259 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -172,15 +172,20 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic)
.endif
.endm
-atomic_op _cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
-atomic_op _xchg, 32, "move r24, r2"
-atomic_op _xchg_add, 32, "add r24, r22, r2"
-atomic_op _xchg_add_unless, 32, \
+
+/*
+ * Use __atomic32 prefix to avoid collisions with GCC builtin __atomic functions.
+ */
+
+atomic_op 32_cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
+atomic_op 32_xchg, 32, "move r24, r2"
+atomic_op 32_xchg_add, 32, "add r24, r22, r2"
+atomic_op 32_xchg_add_unless, 32, \
"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
-atomic_op _or, 32, "or r24, r22, r2"
-atomic_op _and, 32, "and r24, r22, r2"
-atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
-atomic_op _xor, 32, "xor r24, r22, r2"
+atomic_op 32_fetch_or, 32, "or r24, r22, r2"
+atomic_op 32_fetch_and, 32, "and r24, r22, r2"
+atomic_op 32_fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2"
+atomic_op 32_fetch_xor, 32, "xor r24, r22, r2"
atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \
{ bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }"
@@ -192,9 +197,9 @@ atomic_op 64_xchg_add_unless, 64, \
{ bbns r26, 3f; add r24, r22, r4 }; \
{ bbns r27, 3f; add r25, r23, r5 }; \
slt_u r26, r24, r22; add r25, r25, r26"
-atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
-atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
-atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
+atomic_op 64_fetch_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
+atomic_op 64_fetch_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
+atomic_op 64_fetch_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
jrp lr /* happy backtracer */
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 9d171ca4302c..c5369fe643c7 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -77,7 +77,11 @@ uint64_t __umoddi3(uint64_t dividend, uint64_t divisor);
EXPORT_SYMBOL(__umoddi3);
int64_t __moddi3(int64_t dividend, int64_t divisor);
EXPORT_SYMBOL(__moddi3);
-#ifndef __tilegx__
+#ifdef __tilegx__
+typedef int TItype __attribute__((mode(TI)));
+TItype __multi3(TItype a, TItype b);
+EXPORT_SYMBOL(__multi3); /* required for gcc 7 and later */
+#else
int64_t __muldi3(int64_t, int64_t);
EXPORT_SYMBOL(__muldi3);
uint64_t __lshrdi3(uint64_t, unsigned int);
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
index 88c2a53362e7..076c6cc43113 100644
--- a/arch/tile/lib/spinlock_32.c
+++ b/arch/tile/lib/spinlock_32.c
@@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock)
do {
delay_backoff(iterations++);
} while (READ_ONCE(lock->current_ticket) == curr);
+
+ /*
+ * The TILE architecture doesn't do read speculation; therefore
+ * a control dependency guarantees a LOAD->{LOAD,STORE} order.
+ */
+ barrier();
}
EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c
index c8d1f94ff1fe..a4b5b2cbce93 100644
--- a/arch/tile/lib/spinlock_64.c
+++ b/arch/tile/lib/spinlock_64.c
@@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock)
do {
delay_backoff(iterations++);
} while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
+
+ /*
+ * The TILE architecture doesn't do read speculation; therefore
+ * a control dependency guarantees a LOAD->{LOAD,STORE} order.
+ */
+ barrier();
}
EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 26734214818c..beba986589e5 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -434,7 +434,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return 0;
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 7bf2491a9c1f..7cc6ee7f1a58 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -45,20 +45,20 @@ void show_mem(unsigned int filter)
struct zone *zone;
pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
- (global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_ACTIVE_FILE)),
- (global_page_state(NR_INACTIVE_ANON) +
- global_page_state(NR_INACTIVE_FILE)),
- global_page_state(NR_FILE_DIRTY),
- global_page_state(NR_WRITEBACK),
- global_page_state(NR_UNSTABLE_NFS),
+ (global_node_page_state(NR_ACTIVE_ANON) +
+ global_node_page_state(NR_ACTIVE_FILE)),
+ (global_node_page_state(NR_INACTIVE_ANON) +
+ global_node_page_state(NR_INACTIVE_FILE)),
+ global_node_page_state(NR_FILE_DIRTY),
+ global_node_page_state(NR_WRITEBACK),
+ global_node_page_state(NR_UNSTABLE_NFS),
global_page_state(NR_FREE_PAGES),
(global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE)),
- global_page_state(NR_FILE_MAPPED),
+ global_node_page_state(NR_FILE_MAPPED),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE),
- global_page_state(NR_FILE_PAGES),
+ global_node_page_state(NR_FILE_PAGES),
get_nr_swap_pages());
for_each_zone(zone) {
@@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
int order)
{
- gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
+ gfp_t flags = GFP_KERNEL|__GFP_ZERO;
struct page *p;
int i;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 17e96dc29596..f3540270d096 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -801,6 +801,7 @@ static void ubd_device_release(struct device *dev)
static int ubd_disk_register(int major, u64 size, int unit,
struct gendisk **disk_out)
{
+ struct device *parent = NULL;
struct gendisk *disk;
disk = alloc_disk(1 << UBD_SHIFT);
@@ -823,12 +824,12 @@ static int ubd_disk_register(int major, u64 size, int unit,
ubd_devs[unit].pdev.dev.release = ubd_device_release;
dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
platform_device_register(&ubd_devs[unit].pdev);
- disk->driverfs_dev = &ubd_devs[unit].pdev.dev;
+ parent = &ubd_devs[unit].pdev.dev;
}
disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
- add_disk(disk);
+ device_add_disk(parent, disk);
*disk_out = disk;
return 0;
@@ -1286,7 +1287,7 @@ static void do_ubd_request(struct request_queue *q)
req = dev->request;
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
io_req = kmalloc(sizeof(struct io_thread_req),
GFP_ATOMIC);
if (io_req == NULL) {
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 16eb63fac57d..821ff0acfe17 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -102,7 +102,7 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
+ return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
@@ -110,6 +110,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_page(tlb, page);
}
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return tlb_remove_page(tlb, page);
+}
+
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index b2a2dff50b4e..e7437ec62710 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
@@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
- pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 48b0dcbd87be..ef4b8f949b51 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -20,12 +20,12 @@ void handle_syscall(struct uml_pt_regs *r)
UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
- /* Do the secure computing check first; failures should be fast. */
- if (secure_computing() == -1)
+ if (syscall_trace_enter(regs))
return;
- if (syscall_trace_enter(regs))
- goto out;
+ /* Do the seccomp check after ptrace; failures should be fast. */
+ if (secure_computing(NULL) == -1)
+ return;
/* Update the syscall number after orig_ax has potentially been updated
* with ptrace.
@@ -37,6 +37,5 @@ void handle_syscall(struct uml_pt_regs *r)
PT_REGS_SET_SYSCALL_RETURN(regs,
EXECUTE_SYSCALL(syscall, regs));
-out:
syscall_trace_leave(regs);
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 98783dd0fa2e..ad8f206ab5e8 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -73,7 +73,7 @@ good_area:
do {
int fault;
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
goto out_nosemaphore;
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index e5602ee9c610..0769066929c6 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -80,7 +80,7 @@ config ARCH_PUV3
select CPU_UCV2
select GENERIC_CLOCKEVENTS
select HAVE_CLK
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
# CONFIGs for ARCH_PUV3
diff --git a/arch/unicore32/configs/unicore32_defconfig b/arch/unicore32/configs/unicore32_defconfig
index 45f47f88d86a..aebd01fc28e5 100644
--- a/arch/unicore32/configs/unicore32_defconfig
+++ b/arch/unicore32/configs/unicore32_defconfig
@@ -161,7 +161,7 @@ CONFIG_LEDS_GPIO=y
# LED Triggers
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
# Real Time Clock
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
index 2e02d1356fdf..26775793c204 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/*
* Allocate one PTE table.
diff --git a/arch/unicore32/kernel/gpio.c b/arch/unicore32/kernel/gpio.c
index 49347a0e9288..bf164bb4dba2 100644
--- a/arch/unicore32/kernel/gpio.c
+++ b/arch/unicore32/kernel/gpio.c
@@ -27,7 +27,7 @@ static const struct gpio_led puv3_gpio_leds[] = {
{ .name = "cpuhealth", .gpio = GPO_CPU_HEALTH, .active_low = 0,
.default_trigger = "heartbeat", },
{ .name = "hdd_led", .gpio = GPO_HDD_LED, .active_low = 1,
- .default_trigger = "ide-disk", },
+ .default_trigger = "disk-activity", },
};
static const struct gpio_led_platform_data puv3_gpio_led_data = {
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 2ec3d3adcefc..6c7f70bcaae3 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -194,7 +194,7 @@ good_area:
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the fault.
*/
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+ fault = handle_mm_fault(vma, addr & PAGE_MASK, flags);
return fault;
check_stack:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0a7b885964ba..2fa55851d2a9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,7 @@ config X86
select ANON_INODES
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_DEVMEM_IS_ALLOWED
@@ -49,7 +50,6 @@ config X86
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION if X86_32
- select ARCH_WANT_OPTIONAL_GPIOLIB
select BUILDTIME_EXTABLE_SORT
select CLKEVT_I8253
select CLKSRC_I8253 if X86_32
@@ -294,11 +294,6 @@ config X86_32_LAZY_GS
def_bool y
depends on X86_32 && !CC_STACKPROTECTOR
-config ARCH_HWEIGHT_CFLAGS
- string
- default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
- default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
-
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -643,7 +638,7 @@ config STA2X11
select X86_DMA_REMAP
select SWIOTLB
select MFD_STA2X11
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
default n
---help---
This adds support for boards based on the STA2X11 IO-Hub,
@@ -1934,21 +1929,26 @@ config RANDOMIZE_BASE
attempts relying on knowledge of the location of kernel
code internals.
- The kernel physical and virtual address can be randomized
- from 16MB up to 1GB on 64-bit and 512MB on 32-bit. (Note that
- using RANDOMIZE_BASE reduces the memory space available to
- kernel modules from 1.5GB to 1GB.)
+ On 64-bit, the kernel physical and virtual addresses are
+ randomized separately. The physical address will be anywhere
+ between 16MB and the top of physical memory (up to 64TB). The
+ virtual address will be randomized from 16MB up to 1GB (9 bits
+ of entropy). Note that this also reduces the memory space
+ available to kernel modules from 1.5GB to 1GB.
+
+ On 32-bit, the kernel physical and virtual addresses are
+ randomized together. They will be randomized from 16MB up to
+ 512MB (8 bits of entropy).
Entropy is generated using the RDRAND instruction if it is
supported. If RDTSC is supported, its value is mixed into
the entropy pool as well. If neither RDRAND nor RDTSC are
- supported, then entropy is read from the i8254 timer.
-
- Since the kernel is built using 2GB addressing, and
- PHYSICAL_ALIGN must be at a minimum of 2MB, only 10 bits of
- entropy is theoretically possible. Currently, with the
- default value for PHYSICAL_ALIGN and due to page table
- layouts, 64-bit uses 9 bits of entropy and 32-bit uses 8 bits.
+ supported, then entropy is read from the i8254 timer. The
+ usable entropy is limited by the kernel being built using
+ 2GB addressing, and that PHYSICAL_ALIGN must be at a
+ minimum of 2MB. As a result, only 10 bits of entropy are
+ theoretically possible, but the implementations are further
+ limited due to memory layouts.
If CONFIG_HIBERNATE is also enabled, KASLR is disabled at boot
time. To enable it, boot with "kaslr" on the kernel command
@@ -1988,6 +1988,38 @@ config PHYSICAL_ALIGN
Don't change this unless you know what you are doing.
+config RANDOMIZE_MEMORY
+ bool "Randomize the kernel memory sections"
+ depends on X86_64
+ depends on RANDOMIZE_BASE
+ default RANDOMIZE_BASE
+ ---help---
+ Randomizes the base virtual address of kernel memory sections
+ (physical memory mapping, vmalloc & vmemmap). This security feature
+ makes exploits relying on predictable memory locations less reliable.
+
+ The order of allocations remains unchanged. Entropy is generated in
+ the same way as RANDOMIZE_BASE. Current implementation in the optimal
+ configuration have in average 30,000 different possible virtual
+ addresses for each memory section.
+
+ If unsure, say N.
+
+config RANDOMIZE_MEMORY_PHYSICAL_PADDING
+ hex "Physical memory mapping padding" if EXPERT
+ depends on RANDOMIZE_MEMORY
+ default "0xa" if MEMORY_HOTPLUG
+ default "0x0"
+ range 0x1 0x40 if MEMORY_HOTPLUG
+ range 0x0 0x40
+ ---help---
+ Define the padding in terabytes added to the existing physical
+ memory size during kernel memory randomization. It is useful
+ for memory hotplug support but reduces the entropy available for
+ address randomization.
+
+ If unsure, leave at the default value.
+
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
@@ -2439,6 +2471,15 @@ config PCI_CNB20LE_QUIRK
source "drivers/pci/Kconfig"
+config ISA_BUS
+ bool "ISA-style bus support on modern systems" if EXPERT
+ select ISA_BUS_API
+ help
+ Enables ISA-style drivers on modern systems. This is necessary to
+ support PC/104 devices on X86_64 platforms.
+
+ If unsure, say N.
+
# x86_64 have no ISA slots, but can have ISA-style DMA.
config ISA_DMA_API
bool "ISA-style DMA support" if (X86_64 && EXPERT)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 6fce7f096b88..830ed391e7ef 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -126,14 +126,6 @@ else
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
endif
-# Make sure compiler does not have buggy stack-protector support.
-ifdef CONFIG_CC_STACKPROTECTOR
- cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
- ifneq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
- $(warning stack-protector enabled but compiler support broken)
- endif
-endif
-
ifdef CONFIG_X86_X32
x32_ld_ok := $(call try-run,\
/bin/echo -e '1: .quad 1b' | \
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 700a9c6e6159..be8e688fa0d4 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
for i in lib lib64 share end ; do \
if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+ if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
+ cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
+ fi ; \
break ; \
fi ; \
if [ $$i = end ] ; then exit 1 ; fi ; \
diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
index 878e4b9940d9..0d41d68131cc 100644
--- a/arch/x86/boot/bitops.h
+++ b/arch/x86/boot/bitops.h
@@ -16,14 +16,16 @@
#define BOOT_BITOPS_H
#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */
-static inline int constant_test_bit(int nr, const void *addr)
+#include <linux/types.h>
+
+static inline bool constant_test_bit(int nr, const void *addr)
{
const u32 *p = (const u32 *)addr;
return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0;
}
-static inline int variable_test_bit(int nr, const void *addr)
+static inline bool variable_test_bit(int nr, const void *addr)
{
- u8 v;
+ bool v;
const u32 *p = (const u32 *)addr;
asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 9011a88353de..e5612f3e3b57 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/edd.h>
#include <asm/setup.h>
+#include <asm/asm.h>
#include "bitops.h"
#include "ctype.h"
#include "cpuflags.h"
@@ -176,18 +177,18 @@ static inline void wrgs32(u32 v, addr_t addr)
}
/* Note: these only return true/false, not a signed return value! */
-static inline int memcmp_fs(const void *s1, addr_t s2, size_t len)
+static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
{
- u8 diff;
- asm volatile("fs; repe; cmpsb; setnz %0"
- : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ bool diff;
+ asm volatile("fs; repe; cmpsb" CC_SET(nz)
+ : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
-static inline int memcmp_gs(const void *s1, addr_t s2, size_t len)
+static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
{
- u8 diff;
- asm volatile("gs; repe; cmpsb; setnz %0"
- : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ bool diff;
+ asm volatile("gs; repe; cmpsb" CC_SET(nz)
+ : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
@@ -294,6 +295,7 @@ static inline int cmdline_find_option_bool(const char *option)
/* cpu.c, cpucheck.c */
int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
+int check_knl_erratum(void);
int validate_cpu(void);
/* early_serial_console.c */
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index f1356889204e..536ccfcc01c6 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -85,7 +85,25 @@ vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
$(objtree)/drivers/firmware/efi/libstub/lib.a
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
+# The compressed kernel is built with -fPIC/-fPIE so that a boot loader
+# can place it anywhere in memory and it will still run. However, since
+# it is executed as-is without any ELF relocation processing performed
+# (and has already had all relocation sections stripped from the binary),
+# none of the code can use data relocations (e.g. static assignments of
+# pointer values), since they will be meaningless at runtime. This check
+# will refuse to link the vmlinux if any of these relocations are found.
+quiet_cmd_check_data_rel = DATAREL $@
+define cmd_check_data_rel
+ for obj in $(filter %.o,$^); do \
+ readelf -S $$obj | grep -qF .rel.local && { \
+ echo "error: $$obj has data relocations!" >&2; \
+ exit 1; \
+ } || true; \
+ done
+endef
+
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+ $(call if_changed,check_data_rel)
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 52fef606bc54..ff574dad95cc 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -757,7 +757,6 @@ struct boot_params *make_boot_params(struct efi_config *c)
struct boot_params *boot_params;
struct apm_bios_info *bi;
struct setup_header *hdr;
- struct efi_info *efi;
efi_loaded_image_t *image;
void *options, *handle;
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
@@ -800,7 +799,6 @@ struct boot_params *make_boot_params(struct efi_config *c)
memset(boot_params, 0x0, 0x4000);
hdr = &boot_params->hdr;
- efi = &boot_params->efi_info;
bi = &boot_params->apm_bios_info;
/* Copy the second sector to boot_params */
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index cfeb0259ed81..a66854d99ee1 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -12,10 +12,6 @@
#include "misc.h"
#include "error.h"
-#include <asm/msr.h>
-#include <asm/archrandom.h>
-#include <asm/e820.h>
-
#include <generated/compile.h>
#include <linux/module.h>
#include <linux/uts.h>
@@ -26,26 +22,6 @@
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
-#define I8254_PORT_CONTROL 0x43
-#define I8254_PORT_COUNTER0 0x40
-#define I8254_CMD_READBACK 0xC0
-#define I8254_SELECT_COUNTER0 0x02
-#define I8254_STATUS_NOTREADY 0x40
-static inline u16 i8254(void)
-{
- u16 status, timer;
-
- do {
- outb(I8254_PORT_CONTROL,
- I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
- status = inb(I8254_PORT_COUNTER0);
- timer = inb(I8254_PORT_COUNTER0);
- timer |= inb(I8254_PORT_COUNTER0) << 8;
- } while (status & I8254_STATUS_NOTREADY);
-
- return timer;
-}
-
static unsigned long rotate_xor(unsigned long hash, const void *area,
size_t size)
{
@@ -62,7 +38,7 @@ static unsigned long rotate_xor(unsigned long hash, const void *area,
}
/* Attempt to create a simple but unpredictable starting entropy. */
-static unsigned long get_random_boot(void)
+static unsigned long get_boot_seed(void)
{
unsigned long hash = 0;
@@ -72,50 +48,8 @@ static unsigned long get_random_boot(void)
return hash;
}
-static unsigned long get_random_long(const char *purpose)
-{
-#ifdef CONFIG_X86_64
- const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
-#else
- const unsigned long mix_const = 0x3f39e593UL;
-#endif
- unsigned long raw, random = get_random_boot();
- bool use_i8254 = true;
-
- debug_putstr(purpose);
- debug_putstr(" KASLR using");
-
- if (has_cpuflag(X86_FEATURE_RDRAND)) {
- debug_putstr(" RDRAND");
- if (rdrand_long(&raw)) {
- random ^= raw;
- use_i8254 = false;
- }
- }
-
- if (has_cpuflag(X86_FEATURE_TSC)) {
- debug_putstr(" RDTSC");
- raw = rdtsc();
-
- random ^= raw;
- use_i8254 = false;
- }
-
- if (use_i8254) {
- debug_putstr(" i8254");
- random ^= i8254();
- }
-
- /* Circular multiply for better bit diffusion */
- asm("mul %3"
- : "=a" (random), "=d" (raw)
- : "a" (random), "rm" (mix_const));
- random += raw;
-
- debug_putstr("...\n");
-
- return random;
-}
+#define KASLR_COMPRESSED_BOOT
+#include "../../lib/kaslr.c"
struct mem_vector {
unsigned long start;
@@ -132,17 +66,6 @@ enum mem_avoid_index {
static struct mem_vector mem_avoid[MEM_AVOID_MAX];
-static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
-{
- /* Item at least partially before region. */
- if (item->start < region->start)
- return false;
- /* Item at least partially after region. */
- if (item->start + item->size > region->start + region->size)
- return false;
- return true;
-}
-
static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
{
/* Item one is entirely before item two. */
@@ -296,6 +219,7 @@ static bool mem_avoid_overlap(struct mem_vector *img,
if (mem_overlaps(img, &mem_avoid[i]) &&
mem_avoid[i].start < earliest) {
*overlap = mem_avoid[i];
+ earliest = overlap->start;
is_overlapping = true;
}
}
@@ -310,6 +234,7 @@ static bool mem_avoid_overlap(struct mem_vector *img,
if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
*overlap = avoid;
+ earliest = overlap->start;
is_overlapping = true;
}
@@ -319,8 +244,6 @@ static bool mem_avoid_overlap(struct mem_vector *img,
return is_overlapping;
}
-static unsigned long slots[KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN];
-
struct slot_area {
unsigned long addr;
int num;
@@ -351,36 +274,44 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
}
}
-static void slots_append(unsigned long addr)
-{
- /* Overflowing the slots list should be impossible. */
- if (slot_max >= KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN)
- return;
-
- slots[slot_max++] = addr;
-}
-
static unsigned long slots_fetch_random(void)
{
+ unsigned long slot;
+ int i;
+
/* Handle case of no slots stored. */
if (slot_max == 0)
return 0;
- return slots[get_random_long("Physical") % slot_max];
+ slot = kaslr_get_random_long("Physical") % slot_max;
+
+ for (i = 0; i < slot_area_index; i++) {
+ if (slot >= slot_areas[i].num) {
+ slot -= slot_areas[i].num;
+ continue;
+ }
+ return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
+ }
+
+ if (i == slot_area_index)
+ debug_putstr("slots_fetch_random() failed!?\n");
+ return 0;
}
static void process_e820_entry(struct e820entry *entry,
unsigned long minimum,
unsigned long image_size)
{
- struct mem_vector region, img, overlap;
+ struct mem_vector region, overlap;
+ struct slot_area slot_area;
+ unsigned long start_orig;
/* Skip non-RAM entries. */
if (entry->type != E820_RAM)
return;
- /* Ignore entries entirely above our maximum. */
- if (entry->addr >= KERNEL_IMAGE_SIZE)
+ /* On 32-bit, ignore entries entirely above our maximum. */
+ if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE)
return;
/* Ignore entries entirely below our minimum. */
@@ -390,31 +321,55 @@ static void process_e820_entry(struct e820entry *entry,
region.start = entry->addr;
region.size = entry->size;
- /* Potentially raise address to minimum location. */
- if (region.start < minimum)
- region.start = minimum;
+ /* Give up if slot area array is full. */
+ while (slot_area_index < MAX_SLOT_AREA) {
+ start_orig = region.start;
- /* Potentially raise address to meet alignment requirements. */
- region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
+ /* Potentially raise address to minimum location. */
+ if (region.start < minimum)
+ region.start = minimum;
- /* Did we raise the address above the bounds of this e820 region? */
- if (region.start > entry->addr + entry->size)
- return;
+ /* Potentially raise address to meet alignment needs. */
+ region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
- /* Reduce size by any delta from the original address. */
- region.size -= region.start - entry->addr;
+ /* Did we raise the address above this e820 region? */
+ if (region.start > entry->addr + entry->size)
+ return;
- /* Reduce maximum size to fit end of image within maximum limit. */
- if (region.start + region.size > KERNEL_IMAGE_SIZE)
- region.size = KERNEL_IMAGE_SIZE - region.start;
+ /* Reduce size by any delta from the original address. */
+ region.size -= region.start - start_orig;
- /* Walk each aligned slot and check for avoided areas. */
- for (img.start = region.start, img.size = image_size ;
- mem_contains(&region, &img) ;
- img.start += CONFIG_PHYSICAL_ALIGN) {
- if (mem_avoid_overlap(&img, &overlap))
- continue;
- slots_append(img.start);
+ /* On 32-bit, reduce region size to fit within max size. */
+ if (IS_ENABLED(CONFIG_X86_32) &&
+ region.start + region.size > KERNEL_IMAGE_SIZE)
+ region.size = KERNEL_IMAGE_SIZE - region.start;
+
+ /* Return if region can't contain decompressed kernel */
+ if (region.size < image_size)
+ return;
+
+ /* If nothing overlaps, store the region and return. */
+ if (!mem_avoid_overlap(&region, &overlap)) {
+ store_slot_info(&region, image_size);
+ return;
+ }
+
+ /* Store beginning of region if holds at least image_size. */
+ if (overlap.start > region.start + image_size) {
+ struct mem_vector beginning;
+
+ beginning.start = region.start;
+ beginning.size = overlap.start - region.start;
+ store_slot_info(&beginning, image_size);
+ }
+
+ /* Return if overlap extends to or past end of region. */
+ if (overlap.start + overlap.size >= region.start + region.size)
+ return;
+
+ /* Clip off the overlapping region and start over. */
+ region.size -= overlap.start - region.start + overlap.size;
+ region.start = overlap.start + overlap.size;
}
}
@@ -431,6 +386,10 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
for (i = 0; i < boot_params->e820_entries; i++) {
process_e820_entry(&boot_params->e820_map[i], minimum,
image_size);
+ if (slot_area_index == MAX_SLOT_AREA) {
+ debug_putstr("Aborted e820 scan (slot_areas full)!\n");
+ break;
+ }
}
return slots_fetch_random();
@@ -454,7 +413,7 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
CONFIG_PHYSICAL_ALIGN + 1;
- random_addr = get_random_long("Virtual") % slots;
+ random_addr = kaslr_get_random_long("Virtual") % slots;
return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
}
@@ -463,48 +422,54 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
* Since this function examines addresses much more numerically,
* it takes the input and output pointers as 'unsigned long'.
*/
-unsigned char *choose_random_location(unsigned long input,
- unsigned long input_size,
- unsigned long output,
- unsigned long output_size)
+void choose_random_location(unsigned long input,
+ unsigned long input_size,
+ unsigned long *output,
+ unsigned long output_size,
+ unsigned long *virt_addr)
{
- unsigned long choice = output;
- unsigned long random_addr;
+ unsigned long random_addr, min_addr;
+
+ /* By default, keep output position unchanged. */
+ *virt_addr = *output;
-#ifdef CONFIG_HIBERNATION
- if (!cmdline_find_option_bool("kaslr")) {
- warn("KASLR disabled: 'kaslr' not on cmdline (hibernation selected).");
- goto out;
- }
-#else
if (cmdline_find_option_bool("nokaslr")) {
warn("KASLR disabled: 'nokaslr' on cmdline.");
- goto out;
+ return;
}
-#endif
boot_params->hdr.loadflags |= KASLR_FLAG;
+ /* Prepare to add new identity pagetables on demand. */
+ initialize_identity_maps();
+
/* Record the various known unsafe memory ranges. */
- mem_avoid_init(input, input_size, output);
+ mem_avoid_init(input, input_size, *output);
+
+ /*
+ * Low end of the randomization range should be the
+ * smaller of 512M or the initial kernel image
+ * location:
+ */
+ min_addr = min(*output, 512UL << 20);
/* Walk e820 and find a random address. */
- random_addr = find_random_phys_addr(output, output_size);
+ random_addr = find_random_phys_addr(min_addr, output_size);
if (!random_addr) {
warn("KASLR disabled: could not find suitable E820 region!");
- goto out;
+ } else {
+ /* Update the new physical address location. */
+ if (*output != random_addr) {
+ add_identity_map(random_addr, output_size);
+ *output = random_addr;
+ }
}
- /* Always enforce the minimum. */
- if (random_addr < choice)
- goto out;
-
- choice = random_addr;
-
- add_identity_map(choice, output_size);
-
/* This actually loads the identity pagetable on x86_64. */
finalize_identity_maps();
-out:
- return (unsigned char *)choice;
+
+ /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
+ if (IS_ENABLED(CONFIG_X86_64))
+ random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
+ *virt_addr = random_addr;
}
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index f14db4e21654..b3c5a5f030ce 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -170,7 +170,8 @@ void __puthex(unsigned long value)
}
#if CONFIG_X86_NEED_RELOCS
-static void handle_relocations(void *output, unsigned long output_len)
+static void handle_relocations(void *output, unsigned long output_len,
+ unsigned long virt_addr)
{
int *reloc;
unsigned long delta, map, ptr;
@@ -182,11 +183,6 @@ static void handle_relocations(void *output, unsigned long output_len)
* and where it was actually loaded.
*/
delta = min_addr - LOAD_PHYSICAL_ADDR;
- if (!delta) {
- debug_putstr("No relocation needed... ");
- return;
- }
- debug_putstr("Performing relocations... ");
/*
* The kernel contains a table of relocation addresses. Those
@@ -198,6 +194,20 @@ static void handle_relocations(void *output, unsigned long output_len)
map = delta - __START_KERNEL_map;
/*
+ * 32-bit always performs relocations. 64-bit relocations are only
+ * needed if KASLR has chosen a different starting address offset
+ * from __START_KERNEL_map.
+ */
+ if (IS_ENABLED(CONFIG_X86_64))
+ delta = virt_addr - LOAD_PHYSICAL_ADDR;
+
+ if (!delta) {
+ debug_putstr("No relocation needed... ");
+ return;
+ }
+ debug_putstr("Performing relocations... ");
+
+ /*
* Process relocations: 32 bit relocations first then 64 bit after.
* Three sets of binary relocations are added to the end of the kernel
* before compression. Each relocation table entry is the kernel
@@ -250,7 +260,8 @@ static void handle_relocations(void *output, unsigned long output_len)
#endif
}
#else
-static inline void handle_relocations(void *output, unsigned long output_len)
+static inline void handle_relocations(void *output, unsigned long output_len,
+ unsigned long virt_addr)
{ }
#endif
@@ -327,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
unsigned long output_len)
{
const unsigned long kernel_total_size = VO__end - VO__text;
- unsigned char *output_orig = output;
+ unsigned long virt_addr = (unsigned long)output;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
@@ -366,13 +377,16 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
* the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections.
*/
- output = choose_random_location((unsigned long)input_data, input_len,
- (unsigned long)output,
- max(output_len, kernel_total_size));
+ choose_random_location((unsigned long)input_data, input_len,
+ (unsigned long *)&output,
+ max(output_len, kernel_total_size),
+ &virt_addr);
/* Validate memory location choices. */
if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
- error("Destination address inappropriately aligned");
+ error("Destination physical address inappropriately aligned");
+ if (virt_addr & (MIN_KERNEL_ALIGN - 1))
+ error("Destination virtual address inappropriately aligned");
#ifdef CONFIG_X86_64
if (heap > 0x3fffffffffffUL)
error("Destination address too large");
@@ -382,19 +396,16 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
#endif
#ifndef CONFIG_RELOCATABLE
if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
- error("Wrong destination address");
+ error("Destination address does not match LOAD_PHYSICAL_ADDR");
+ if ((unsigned long)output != virt_addr)
+ error("Destination virtual address changed when not relocatable");
#endif
debug_putstr("\nDecompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, output_len,
NULL, error);
parse_elf(output);
- /*
- * 32-bit always performs relocations. 64-bit relocations are only
- * needed if kASLR has chosen a different load address.
- */
- if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
- handle_relocations(output, output_len);
+ handle_relocations(output, output_len, virt_addr);
debug_putstr("done.\nBooting the kernel.\n");
return output;
}
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index b6fec1ff10e4..1c8355eadbd1 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -67,28 +67,33 @@ int cmdline_find_option_bool(const char *option);
#if CONFIG_RANDOMIZE_BASE
/* kaslr.c */
-unsigned char *choose_random_location(unsigned long input_ptr,
- unsigned long input_size,
- unsigned long output_ptr,
- unsigned long output_size);
+void choose_random_location(unsigned long input,
+ unsigned long input_size,
+ unsigned long *output,
+ unsigned long output_size,
+ unsigned long *virt_addr);
/* cpuflags.c */
bool has_cpuflag(int flag);
#else
-static inline
-unsigned char *choose_random_location(unsigned long input_ptr,
- unsigned long input_size,
- unsigned long output_ptr,
- unsigned long output_size)
+static inline void choose_random_location(unsigned long input,
+ unsigned long input_size,
+ unsigned long *output,
+ unsigned long output_size,
+ unsigned long *virt_addr)
{
- return (unsigned char *)output_ptr;
+ /* No change from existing output location. */
+ *virt_addr = *output;
}
#endif
#ifdef CONFIG_X86_64
+void initialize_identity_maps(void);
void add_identity_map(unsigned long start, unsigned long size);
void finalize_identity_maps(void);
extern unsigned char _pgtable[];
#else
+static inline void initialize_identity_maps(void)
+{ }
static inline void add_identity_map(unsigned long start, unsigned long size)
{ }
static inline void finalize_identity_maps(void)
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index 34b95df14e69..56589d0a804b 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -2,6 +2,9 @@
* This code is used on x86_64 to create page table identity mappings on
* demand by building up a new set of page tables (or appending to the
* existing ones), and then switching over to them when ready.
+ *
+ * Copyright (C) 2015-2016 Yinghai Lu
+ * Copyright (C) 2016 Kees Cook
*/
/*
@@ -17,6 +20,9 @@
/* These actually do the work of building the kernel identity maps. */
#include <asm/init.h>
#include <asm/pgtable.h>
+/* Use the static base for this part of the boot process */
+#undef __PAGE_OFFSET
+#define __PAGE_OFFSET __PAGE_OFFSET_BASE
#include "../../mm/ident_map.c"
/* Used by pgtable.h asm code to force instruction serialization. */
@@ -59,9 +65,21 @@ static struct alloc_pgt_data pgt_data;
/* The top level page table entry pointer. */
static unsigned long level4p;
+/*
+ * Mapping information structure passed to kernel_ident_mapping_init().
+ * Due to relocation, pointers must be assigned at run time not build time.
+ */
+static struct x86_mapping_info mapping_info = {
+ .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
+};
+
/* Locates and clears a region for a new top level page table. */
-static void prepare_level4(void)
+void initialize_identity_maps(void)
{
+ /* Init mapping_info with run-time function/buffer pointers. */
+ mapping_info.alloc_pgt_page = alloc_pgt_page;
+ mapping_info.context = &pgt_data;
+
/*
* It should be impossible for this not to already be true,
* but since calling this a second time would rewind the other
@@ -96,17 +114,8 @@ static void prepare_level4(void)
*/
void add_identity_map(unsigned long start, unsigned long size)
{
- struct x86_mapping_info mapping_info = {
- .alloc_pgt_page = alloc_pgt_page,
- .context = &pgt_data,
- .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
- };
unsigned long end = start + size;
- /* Make sure we have a top level page table ready to use. */
- if (!level4p)
- prepare_level4();
-
/* Align boundary to 2M. */
start = round_down(start, PMD_SIZE);
end = round_up(end, PMD_SIZE);
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 29207f69ae8c..26240dde081e 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -93,6 +93,8 @@ int validate_cpu(void)
show_cap_strs(err_flags);
putchar('\n');
return -1;
+ } else if (check_knl_erratum()) {
+ return -1;
} else {
return 0;
}
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 1fd7d575092e..4ad7d70e8739 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -24,6 +24,7 @@
# include "boot.h"
#endif
#include <linux/types.h>
+#include <asm/intel-family.h>
#include <asm/processor-flags.h>
#include <asm/required-features.h>
#include <asm/msr-index.h>
@@ -175,6 +176,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
}
}
+ if (!err)
+ err = check_knl_erratum();
if (err_flags_ptr)
*err_flags_ptr = err ? err_flags : NULL;
@@ -185,3 +188,33 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
return (cpu.level < req_level || err) ? -1 : 0;
}
+
+int check_knl_erratum(void)
+{
+ /*
+ * First check for the affected model/family:
+ */
+ if (!is_intel() ||
+ cpu.family != 6 ||
+ cpu.model != INTEL_FAM6_XEON_PHI_KNL)
+ return 0;
+
+ /*
+ * This erratum affects the Accessed/Dirty bits, and can
+ * cause stray bits to be set in !Present PTEs. We have
+ * enough bits in our 64-bit PTEs (which we have on real
+ * 64-bit mode or PAE) to avoid using these troublesome
+ * bits. But, we do not have enough space in our 32-bit
+ * PTEs. So, refuse to run on 32-bit non-PAE kernels.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
+ return 0;
+
+ puts("This 32-bit kernel can not run on this Xeon Phi x200\n"
+ "processor due to a processor erratum. Use a 64-bit\n"
+ "kernel, or enable PAE in this 32-bit kernel.\n\n");
+
+ return -1;
+}
+
+
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
index 431fa5f84537..6687ab953257 100644
--- a/arch/x86/boot/cpuflags.c
+++ b/arch/x86/boot/cpuflags.c
@@ -102,6 +102,7 @@ void get_cpuflags(void)
cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
&cpu.flags[0]);
cpu.level = (tfms >> 8) & 15;
+ cpu.family = cpu.level;
cpu.model = (tfms >> 4) & 15;
if (cpu.level >= 6)
cpu.model += ((tfms >> 16) & 0xf) << 4;
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
index 4cb404fd45ce..15ad56a3f905 100644
--- a/arch/x86/boot/cpuflags.h
+++ b/arch/x86/boot/cpuflags.h
@@ -6,6 +6,7 @@
struct cpu_features {
int level; /* Family, or 64 for x86-64 */
+ int family; /* Family, always */
int model;
u32 flags[NCAPINTS];
};
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 318b8465d302..cc3bd583dce1 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -17,7 +17,7 @@
int memcmp(const void *s1, const void *s2, size_t len)
{
- u8 diff;
+ bool diff;
asm("repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index b9b912a44d61..34b3fa2889d1 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -49,7 +49,9 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
+ obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/
+ obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/
+ obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/
endif
aes-i586-y := aes-i586-asm_32.o aes_glue.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 5b7fa1471007..0ab5ee1c26af 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -59,17 +59,6 @@ struct aesni_rfc4106_gcm_ctx {
u8 nonce[4];
};
-struct aesni_gcm_set_hash_subkey_result {
- int err;
- struct completion completion;
-};
-
-struct aesni_hash_subkey_req_data {
- u8 iv[16];
- struct aesni_gcm_set_hash_subkey_result result;
- struct scatterlist sg;
-};
-
struct aesni_lrw_ctx {
struct lrw_table_ctx lrw_table;
u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
@@ -809,71 +798,28 @@ static void rfc4106_exit(struct crypto_aead *aead)
cryptd_free_aead(*ctx);
}
-static void
-rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
-{
- struct aesni_gcm_set_hash_subkey_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
- result->err = err;
- complete(&result->completion);
-}
-
static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
- struct crypto_ablkcipher *ctr_tfm;
- struct ablkcipher_request *req;
- int ret = -EINVAL;
- struct aesni_hash_subkey_req_data *req_data;
+ struct crypto_cipher *tfm;
+ int ret;
- ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
- if (IS_ERR(ctr_tfm))
- return PTR_ERR(ctr_tfm);
+ tfm = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
- ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
+ ret = crypto_cipher_setkey(tfm, key, key_len);
if (ret)
- goto out_free_ablkcipher;
-
- ret = -ENOMEM;
- req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
- if (!req)
- goto out_free_ablkcipher;
-
- req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data)
- goto out_free_request;
-
- memset(req_data->iv, 0, sizeof(req_data->iv));
+ goto out_free_cipher;
/* Clear the data in the hash sub key container to zero.*/
/* We want to cipher all zeros to create the hash sub key. */
memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
- init_completion(&req_data->result.completion);
- sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
- ablkcipher_request_set_tfm(req, ctr_tfm);
- ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- rfc4106_set_hash_subkey_done,
- &req_data->result);
-
- ablkcipher_request_set_crypt(req, &req_data->sg,
- &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
-
- ret = crypto_ablkcipher_encrypt(req);
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- ret = wait_for_completion_interruptible
- (&req_data->result.completion);
- if (!ret)
- ret = req_data->result.err;
- }
- kfree(req_data);
-out_free_request:
- ablkcipher_request_free(req);
-out_free_ablkcipher:
- crypto_free_ablkcipher(ctr_tfm);
+ crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
+
+out_free_cipher:
+ crypto_free_cipher(tfm);
return ret;
}
@@ -1098,9 +1044,12 @@ static int rfc4106_encrypt(struct aead_request *req)
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
struct cryptd_aead *cryptd_tfm = *ctx;
- aead_request_set_tfm(req, irq_fpu_usable() ?
- cryptd_aead_child(cryptd_tfm) :
- &cryptd_tfm->base);
+ tfm = &cryptd_tfm->base;
+ if (irq_fpu_usable() && (!in_atomic() ||
+ !cryptd_aead_queued(cryptd_tfm)))
+ tfm = cryptd_aead_child(cryptd_tfm);
+
+ aead_request_set_tfm(req, tfm);
return crypto_aead_encrypt(req);
}
@@ -1111,9 +1060,12 @@ static int rfc4106_decrypt(struct aead_request *req)
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
struct cryptd_aead *cryptd_tfm = *ctx;
- aead_request_set_tfm(req, irq_fpu_usable() ?
- cryptd_aead_child(cryptd_tfm) :
- &cryptd_tfm->base);
+ tfm = &cryptd_tfm->base;
+ if (irq_fpu_usable() && (!in_atomic() ||
+ !cryptd_aead_queued(cryptd_tfm)))
+ tfm = cryptd_aead_child(cryptd_tfm);
+
+ aead_request_set_tfm(req, tfm);
return crypto_aead_decrypt(req);
}
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 2d5c2e0bd939..f910d1d449f0 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -70,7 +70,7 @@ static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
struct blkcipher_walk walk;
int err;
- if (!may_use_simd())
+ if (nbytes <= CHACHA20_BLOCK_SIZE || !may_use_simd())
return crypto_chacha20_crypt(desc, dst, src, nbytes);
state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index a69321a77783..0420bab19efb 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -168,30 +168,23 @@ static int ghash_async_init(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
- if (!irq_fpu_usable()) {
- memcpy(cryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
- return crypto_ahash_init(cryptd_req);
- } else {
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
-
- desc->tfm = child;
- desc->flags = req->base.flags;
- return crypto_shash_init(desc);
- }
+ desc->tfm = child;
+ desc->flags = req->base.flags;
+ return crypto_shash_init(desc);
}
static int ghash_async_update(struct ahash_request *req)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable()) {
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
+ if (!irq_fpu_usable() ||
+ (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_update(cryptd_req);
@@ -204,12 +197,12 @@ static int ghash_async_update(struct ahash_request *req)
static int ghash_async_final(struct ahash_request *req)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable()) {
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
+ if (!irq_fpu_usable() ||
+ (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_final(cryptd_req);
@@ -249,7 +242,8 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable()) {
+ if (!irq_fpu_usable() ||
+ (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_digest(cryptd_req);
diff --git a/arch/x86/crypto/sha-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
index 2f8756375df5..2f8756375df5 100644
--- a/arch/x86/crypto/sha-mb/Makefile
+++ b/arch/x86/crypto/sha1-mb/Makefile
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index 9c5af331a956..9e5b67127a09 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -67,7 +67,7 @@
#include <asm/byteorder.h>
#include <linux/hardirq.h>
#include <asm/fpu/api.h>
-#include "sha_mb_ctx.h"
+#include "sha1_mb_ctx.h"
#define FLUSH_INTERVAL 1000 /* in usec */
@@ -77,30 +77,34 @@ struct sha1_mb_ctx {
struct mcryptd_ahash *mcryptd_tfm;
};
-static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
+static inline struct mcryptd_hash_request_ctx
+ *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
{
- struct shash_desc *desc;
+ struct ahash_request *areq;
- desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
- return container_of(desc, struct mcryptd_hash_request_ctx, desc);
+ areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
+ return container_of(areq, struct mcryptd_hash_request_ctx, areq);
}
-static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
+static inline struct ahash_request
+ *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
{
return container_of((void *) ctx, struct ahash_request, __ctx);
}
static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct shash_desc *desc)
+ struct ahash_request *areq)
{
rctx->flag = HASH_UPDATE;
}
static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state,
- struct job_sha1 *job);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state);
+static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
+ (struct sha1_mb_mgr *state, struct job_sha1 *job);
+static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
+ (struct sha1_mb_mgr *state);
+static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
+ (struct sha1_mb_mgr *state);
static inline void sha1_init_digest(uint32_t *digest)
{
@@ -131,7 +135,8 @@ static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
return i >> SHA1_LOG2_BLOCK_SIZE;
}
-static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx)
+static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
+ struct sha1_hash_ctx *ctx)
{
while (ctx) {
if (ctx->status & HASH_CTX_STS_COMPLETE) {
@@ -177,8 +182,8 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
ctx->job.buffer = (uint8_t *) buffer;
ctx->job.len = len;
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr,
- &ctx->job);
+ ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
+ &ctx->job);
continue;
}
}
@@ -191,13 +196,15 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
if (ctx->status & HASH_CTX_STS_LAST) {
uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length);
+ uint32_t n_extra_blocks =
+ sha1_pad(buf, ctx->total_length);
ctx->status = (HASH_CTX_STS_PROCESSING |
HASH_CTX_STS_COMPLETE);
ctx->job.buffer = buf;
ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
+ ctx = (struct sha1_hash_ctx *)
+ sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
continue;
}
@@ -208,14 +215,17 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
return NULL;
}
-static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
+static struct sha1_hash_ctx
+ *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
{
/*
* If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to the user.
+ * If get_comp_job returns a job, verify that it is safe to return to
+ * the user.
* If it is not ready, resubmit the job to finish processing.
* If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing.
+ * Otherwise, all jobs currently being managed by the hash_ctx_mgr
+ * still need processing.
*/
struct sha1_hash_ctx *ctx;
@@ -235,7 +245,10 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
int flags)
{
if (flags & (~HASH_ENTIRE)) {
- /* User should not pass anything other than FIRST, UPDATE, or LAST */
+ /*
+ * User should not pass anything other than FIRST, UPDATE, or
+ * LAST
+ */
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
return ctx;
}
@@ -264,14 +277,20 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
ctx->partial_block_buffer_length = 0;
}
- /* If we made it here, there were no errors during this call to submit */
+ /*
+ * If we made it here, there were no errors during this call to
+ * submit
+ */
ctx->error = HASH_CTX_ERROR_NONE;
/* Store buffer ptr info from user */
ctx->incoming_buffer = buffer;
ctx->incoming_buffer_length = len;
- /* Store the user's request flags and mark this ctx as currently being processed. */
+ /*
+ * Store the user's request flags and mark this ctx as currently
+ * being processed.
+ */
ctx->status = (flags & HASH_LAST) ?
(HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
HASH_CTX_STS_PROCESSING;
@@ -285,9 +304,13 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
* Or if the user's buffer contains less than a whole block,
* append as much as possible to the extra block.
*/
- if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) {
- /* Compute how many bytes to copy from user buffer into extra block */
- uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length;
+ if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
+ /*
+ * Compute how many bytes to copy from user buffer into
+ * extra block
+ */
+ uint32_t copy_len = SHA1_BLOCK_SIZE -
+ ctx->partial_block_buffer_length;
if (len < copy_len)
copy_len = len;
@@ -297,20 +320,28 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
buffer, copy_len);
ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len);
+ ctx->incoming_buffer = (const void *)
+ ((const char *)buffer + copy_len);
ctx->incoming_buffer_length = len - copy_len;
}
- /* The extra block should never contain more than 1 block here */
+ /*
+ * The extra block should never contain more than 1 block
+ * here
+ */
assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
- /* If the extra block buffer contains exactly 1 block, it can be hashed. */
+ /*
+ * If the extra block buffer contains exactly 1 block, it can
+ * be hashed.
+ */
if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
ctx->partial_block_buffer_length = 0;
ctx->job.buffer = ctx->partial_block_buffer;
ctx->job.len = 1;
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
+ ctx = (struct sha1_hash_ctx *)
+ sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
}
}
@@ -329,23 +360,24 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
return NULL;
/*
- * If flush returned a job, resubmit the job to finish processing.
+ * If flush returned a job, resubmit the job to finish
+ * processing.
*/
ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
/*
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
- * Otherwise, all jobs currently being managed by the sha1_ctx_mgr
- * still need processing. Loop.
+ * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
+ * returned. Otherwise, all jobs currently being managed by the
+ * sha1_ctx_mgr still need processing. Loop.
*/
if (ctx)
return ctx;
}
}
-static int sha1_mb_init(struct shash_desc *desc)
+static int sha1_mb_init(struct ahash_request *areq)
{
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
hash_ctx_init(sctx);
sctx->job.result_digest[0] = SHA1_H0;
@@ -363,7 +395,7 @@ static int sha1_mb_init(struct shash_desc *desc)
static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
{
int i;
- struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc);
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
__be32 *dst = (__be32 *) rctx->out;
for (i = 0; i < 5; ++i)
@@ -394,9 +426,11 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
flag |= HASH_LAST;
}
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc);
+ sha_ctx = (struct sha1_hash_ctx *)
+ ahash_request_ctx(&rctx->areq);
kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
+ sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
+ rctx->walk.data, nbytes, flag);
if (!sha_ctx) {
if (flush)
sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
@@ -485,11 +519,10 @@ static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
mcryptd_arm_flusher(cstate, delay);
}
-static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha1_mb_update(struct ahash_request *areq)
{
struct mcryptd_hash_request_ctx *rctx =
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
struct mcryptd_alg_cstate *cstate =
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
@@ -505,7 +538,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
}
/* need to init context */
- req_ctx_init(rctx, desc);
+ req_ctx_init(rctx, areq);
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
@@ -518,10 +551,11 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
rctx->flag |= HASH_DONE;
/* submit */
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
sha1_mb_add_list(rctx, cstate);
kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE);
+ sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
+ nbytes, HASH_UPDATE);
kernel_fpu_end();
/* check if anything is returned */
@@ -544,11 +578,10 @@ done:
return ret;
}
-static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int sha1_mb_finup(struct ahash_request *areq)
{
struct mcryptd_hash_request_ctx *rctx =
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
struct mcryptd_alg_cstate *cstate =
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
@@ -563,7 +596,7 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
}
/* need to init context */
- req_ctx_init(rctx, desc);
+ req_ctx_init(rctx, areq);
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
@@ -576,15 +609,15 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
rctx->flag |= HASH_DONE;
flag = HASH_LAST;
}
- rctx->out = out;
/* submit */
rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
sha1_mb_add_list(rctx, cstate);
kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
+ sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
+ nbytes, flag);
kernel_fpu_end();
/* check if anything is returned */
@@ -605,10 +638,10 @@ done:
return ret;
}
-static int sha1_mb_final(struct shash_desc *desc, u8 *out)
+static int sha1_mb_final(struct ahash_request *areq)
{
struct mcryptd_hash_request_ctx *rctx =
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
struct mcryptd_alg_cstate *cstate =
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
@@ -623,16 +656,16 @@ static int sha1_mb_final(struct shash_desc *desc, u8 *out)
}
/* need to init context */
- req_ctx_init(rctx, desc);
+ req_ctx_init(rctx, areq);
- rctx->out = out;
rctx->flag |= HASH_DONE | HASH_FINAL;
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
/* flag HASH_FINAL and 0 data size */
sha1_mb_add_list(rctx, cstate);
kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST);
+ sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
+ HASH_LAST);
kernel_fpu_end();
/* check if anything is returned */
@@ -654,48 +687,98 @@ done:
return ret;
}
-static int sha1_mb_export(struct shash_desc *desc, void *out)
+static int sha1_mb_export(struct ahash_request *areq, void *out)
{
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
-static int sha1_mb_import(struct shash_desc *desc, const void *in)
+static int sha1_mb_import(struct ahash_request *areq, const void *in)
{
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
+static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
+{
+ struct mcryptd_ahash *mcryptd_tfm;
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mcryptd_hash_ctx *mctx;
-static struct shash_alg sha1_mb_shash_alg = {
- .digestsize = SHA1_DIGEST_SIZE,
+ mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(mcryptd_tfm))
+ return PTR_ERR(mcryptd_tfm);
+ mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
+ mctx->alg_state = &sha1_mb_alg_state;
+ ctx->mcryptd_tfm = mcryptd_tfm;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(&mcryptd_tfm->base));
+
+ return 0;
+}
+
+static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
+}
+
+static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ sizeof(struct sha1_hash_ctx));
+
+ return 0;
+}
+
+static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
+}
+
+static struct ahash_alg sha1_mb_areq_alg = {
.init = sha1_mb_init,
.update = sha1_mb_update,
.final = sha1_mb_final,
.finup = sha1_mb_finup,
.export = sha1_mb_export,
.import = sha1_mb_import,
- .descsize = sizeof(struct sha1_hash_ctx),
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "__sha1-mb",
- .cra_driver_name = "__intel_sha1-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread sleep
- */
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_hash_ctx),
+ .base = {
+ .cra_name = "__sha1-mb",
+ .cra_driver_name = "__intel_sha1-mb",
+ .cra_priority = 100,
+ /*
+ * use ASYNC flag as some buffers in multi-buffer
+ * algo may not have completed before hashing thread
+ * sleep
+ */
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT
+ (sha1_mb_areq_alg.halg.base.cra_list),
+ .cra_init = sha1_mb_areq_init_tfm,
+ .cra_exit = sha1_mb_areq_exit_tfm,
+ .cra_ctxsize = sizeof(struct sha1_hash_ctx),
+ }
}
};
@@ -780,46 +863,20 @@ static int sha1_mb_async_import(struct ahash_request *req, const void *in)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm);
+ struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
struct mcryptd_hash_request_ctx *rctx;
- struct shash_desc *desc;
+ struct ahash_request *areq;
memcpy(mcryptd_req, req, sizeof(*req));
ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
rctx = ahash_request_ctx(mcryptd_req);
- desc = &rctx->desc;
- desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
+ areq = &rctx->areq;
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha1_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
+ ahash_request_set_tfm(areq, child);
+ ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ rctx->complete, req);
- return 0;
-}
-
-static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
+ return crypto_ahash_import(mcryptd_req, in);
}
static struct ahash_alg sha1_mb_async_alg = {
@@ -866,7 +923,8 @@ static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
if (time_before(cur_time, rctx->tag.expire))
break;
kernel_fpu_begin();
- sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr);
+ sha_ctx = (struct sha1_hash_ctx *)
+ sha1_ctx_mgr_flush(cstate->mgr);
kernel_fpu_end();
if (!sha_ctx) {
pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
@@ -927,7 +985,7 @@ static int __init sha1_mb_mod_init(void)
}
sha1_mb_alg_state.flusher = &sha1_mb_flusher;
- err = crypto_register_shash(&sha1_mb_shash_alg);
+ err = crypto_register_ahash(&sha1_mb_areq_alg);
if (err)
goto err2;
err = crypto_register_ahash(&sha1_mb_async_alg);
@@ -937,7 +995,7 @@ static int __init sha1_mb_mod_init(void)
return 0;
err1:
- crypto_unregister_shash(&sha1_mb_shash_alg);
+ crypto_unregister_ahash(&sha1_mb_areq_alg);
err2:
for_each_possible_cpu(cpu) {
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
@@ -953,7 +1011,7 @@ static void __exit sha1_mb_mod_fini(void)
struct mcryptd_alg_cstate *cpu_state;
crypto_unregister_ahash(&sha1_mb_async_alg);
- crypto_unregister_shash(&sha1_mb_shash_alg);
+ crypto_unregister_ahash(&sha1_mb_areq_alg);
for_each_possible_cpu(cpu) {
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
kfree(cpu_state->mgr);
diff --git a/arch/x86/crypto/sha-mb/sha_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
index e36069d0c1bd..98a35bcc6f4a 100644
--- a/arch/x86/crypto/sha-mb/sha_mb_ctx.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
@@ -54,7 +54,7 @@
#ifndef _SHA_MB_CTX_INTERNAL_H
#define _SHA_MB_CTX_INTERNAL_H
-#include "sha_mb_mgr.h"
+#include "sha1_mb_mgr.h"
#define HASH_UPDATE 0x00
#define HASH_FIRST 0x01
diff --git a/arch/x86/crypto/sha-mb/sha_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
index 08ad1a9acfd7..08ad1a9acfd7 100644
--- a/arch/x86/crypto/sha-mb/sha_mb_mgr.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
index 86688c6e7a25..86688c6e7a25 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_datastruct.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 96df6a39d7e2..96df6a39d7e2 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
index 822acb5b464c..d2add0d35f43 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
@@ -51,7 +51,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "sha_mb_mgr.h"
+#include "sha1_mb_mgr.h"
void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
{
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
index 63a0d9c8e31f..63a0d9c8e31f 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
diff --git a/arch/x86/crypto/sha-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
index c9dae1cd2919..c9dae1cd2919 100644
--- a/arch/x86/crypto/sha-mb/sha1_x8_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 1024e378a358..fc61739150e7 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -374,3 +374,9 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS_CRYPTO("sha1");
+MODULE_ALIAS_CRYPTO("sha1-ssse3");
+MODULE_ALIAS_CRYPTO("sha1-avx");
+MODULE_ALIAS_CRYPTO("sha1-avx2");
+#ifdef CONFIG_AS_SHA1_NI
+MODULE_ALIAS_CRYPTO("sha1-ni");
+#endif
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
new file mode 100644
index 000000000000..41089e7c400c
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/Makefile
@@ -0,0 +1,11 @@
+#
+# Arch-specific CryptoAPI modules.
+#
+
+avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
+ $(comma)4)$(comma)%ymm2,yes,no)
+ifeq ($(avx2_supported),yes)
+ obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o
+ sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \
+ sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o
+endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
new file mode 100644
index 000000000000..89fa85e8b10c
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -0,0 +1,1030 @@
+/*
+ * Multi buffer SHA256 algorithm Glue Code
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/mcryptd.h>
+#include <crypto/crypto_wq.h>
+#include <asm/byteorder.h>
+#include <linux/hardirq.h>
+#include <asm/fpu/api.h>
+#include "sha256_mb_ctx.h"
+
+#define FLUSH_INTERVAL 1000 /* in usec */
+
+static struct mcryptd_alg_state sha256_mb_alg_state;
+
+struct sha256_mb_ctx {
+ struct mcryptd_ahash *mcryptd_tfm;
+};
+
+static inline struct mcryptd_hash_request_ctx
+ *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
+{
+ struct ahash_request *areq;
+
+ areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
+ return container_of(areq, struct mcryptd_hash_request_ctx, areq);
+}
+
+static inline struct ahash_request
+ *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
+{
+ return container_of((void *) ctx, struct ahash_request, __ctx);
+}
+
+static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
+ struct ahash_request *areq)
+{
+ rctx->flag = HASH_UPDATE;
+}
+
+static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
+static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
+ (struct sha256_mb_mgr *state, struct job_sha256 *job);
+static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
+ (struct sha256_mb_mgr *state);
+static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
+ (struct sha256_mb_mgr *state);
+
+inline void sha256_init_digest(uint32_t *digest)
+{
+ static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7};
+ memcpy(digest, initial_digest, sizeof(initial_digest));
+}
+
+inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
+ uint32_t total_len)
+{
+ uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
+
+ memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
+ padblock[i] = 0x80;
+
+ i += ((SHA256_BLOCK_SIZE - 1) &
+ (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
+ + 1 + SHA256_PADLENGTHFIELD_SIZE;
+
+#if SHA256_PADLENGTHFIELD_SIZE == 16
+ *((uint64_t *) &padblock[i - 16]) = 0;
+#endif
+
+ *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
+
+ /* Number of extra blocks to hash */
+ return i >> SHA256_LOG2_BLOCK_SIZE;
+}
+
+static struct sha256_hash_ctx
+ *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
+ struct sha256_hash_ctx *ctx)
+{
+ while (ctx) {
+ if (ctx->status & HASH_CTX_STS_COMPLETE) {
+ /* Clear PROCESSING bit */
+ ctx->status = HASH_CTX_STS_COMPLETE;
+ return ctx;
+ }
+
+ /*
+ * If the extra blocks are empty, begin hashing what remains
+ * in the user's buffer.
+ */
+ if (ctx->partial_block_buffer_length == 0 &&
+ ctx->incoming_buffer_length) {
+
+ const void *buffer = ctx->incoming_buffer;
+ uint32_t len = ctx->incoming_buffer_length;
+ uint32_t copy_len;
+
+ /*
+ * Only entire blocks can be hashed.
+ * Copy remainder to extra blocks buffer.
+ */
+ copy_len = len & (SHA256_BLOCK_SIZE-1);
+
+ if (copy_len) {
+ len -= copy_len;
+ memcpy(ctx->partial_block_buffer,
+ ((const char *) buffer + len),
+ copy_len);
+ ctx->partial_block_buffer_length = copy_len;
+ }
+
+ ctx->incoming_buffer_length = 0;
+
+ /* len should be a multiple of the block size now */
+ assert((len % SHA256_BLOCK_SIZE) == 0);
+
+ /* Set len to the number of blocks to be hashed */
+ len >>= SHA256_LOG2_BLOCK_SIZE;
+
+ if (len) {
+
+ ctx->job.buffer = (uint8_t *) buffer;
+ ctx->job.len = len;
+ ctx = (struct sha256_hash_ctx *)
+ sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
+ continue;
+ }
+ }
+
+ /*
+ * If the extra blocks are not empty, then we are
+ * either on the last block(s) or we need more
+ * user input before continuing.
+ */
+ if (ctx->status & HASH_CTX_STS_LAST) {
+
+ uint8_t *buf = ctx->partial_block_buffer;
+ uint32_t n_extra_blocks =
+ sha256_pad(buf, ctx->total_length);
+
+ ctx->status = (HASH_CTX_STS_PROCESSING |
+ HASH_CTX_STS_COMPLETE);
+ ctx->job.buffer = buf;
+ ctx->job.len = (uint32_t) n_extra_blocks;
+ ctx = (struct sha256_hash_ctx *)
+ sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
+ continue;
+ }
+
+ ctx->status = HASH_CTX_STS_IDLE;
+ return ctx;
+ }
+
+ return NULL;
+}
+
+static struct sha256_hash_ctx
+ *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
+{
+ /*
+ * If get_comp_job returns NULL, there are no jobs complete.
+ * If get_comp_job returns a job, verify that it is safe to return to
+ * the user. If it is not ready, resubmit the job to finish processing.
+ * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
+ * returned. Otherwise, all jobs currently being managed by the
+ * hash_ctx_mgr still need processing.
+ */
+ struct sha256_hash_ctx *ctx;
+
+ ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
+ return sha256_ctx_mgr_resubmit(mgr, ctx);
+}
+
+static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
+{
+ sha256_job_mgr_init(&mgr->mgr);
+}
+
+static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
+ struct sha256_hash_ctx *ctx,
+ const void *buffer,
+ uint32_t len,
+ int flags)
+{
+ if (flags & (~HASH_ENTIRE)) {
+ /* User should not pass anything other than FIRST, UPDATE
+ * or LAST
+ */
+ ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
+ return ctx;
+ }
+
+ if (ctx->status & HASH_CTX_STS_PROCESSING) {
+ /* Cannot submit to a currently processing job. */
+ ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
+ return ctx;
+ }
+
+ if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
+ /* Cannot update a finished job. */
+ ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
+ return ctx;
+ }
+
+ if (flags & HASH_FIRST) {
+ /* Init digest */
+ sha256_init_digest(ctx->job.result_digest);
+
+ /* Reset byte counter */
+ ctx->total_length = 0;
+
+ /* Clear extra blocks */
+ ctx->partial_block_buffer_length = 0;
+ }
+
+ /* If we made it here, there was no error during this call to submit */
+ ctx->error = HASH_CTX_ERROR_NONE;
+
+ /* Store buffer ptr info from user */
+ ctx->incoming_buffer = buffer;
+ ctx->incoming_buffer_length = len;
+
+ /*
+ * Store the user's request flags and mark this ctx as currently
+ * being processed.
+ */
+ ctx->status = (flags & HASH_LAST) ?
+ (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
+ HASH_CTX_STS_PROCESSING;
+
+ /* Advance byte counter */
+ ctx->total_length += len;
+
+ /*
+ * If there is anything currently buffered in the extra blocks,
+ * append to it until it contains a whole block.
+ * Or if the user's buffer contains less than a whole block,
+ * append as much as possible to the extra block.
+ */
+ if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
+ /*
+ * Compute how many bytes to copy from user buffer into
+ * extra block
+ */
+ uint32_t copy_len = SHA256_BLOCK_SIZE -
+ ctx->partial_block_buffer_length;
+ if (len < copy_len)
+ copy_len = len;
+
+ if (copy_len) {
+ /* Copy and update relevant pointers and counters */
+ memcpy(
+ &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
+ buffer, copy_len);
+
+ ctx->partial_block_buffer_length += copy_len;
+ ctx->incoming_buffer = (const void *)
+ ((const char *)buffer + copy_len);
+ ctx->incoming_buffer_length = len - copy_len;
+ }
+
+ /* The extra block should never contain more than 1 block */
+ assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
+
+ /*
+ * If the extra block buffer contains exactly 1 block,
+ * it can be hashed.
+ */
+ if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
+ ctx->partial_block_buffer_length = 0;
+
+ ctx->job.buffer = ctx->partial_block_buffer;
+ ctx->job.len = 1;
+ ctx = (struct sha256_hash_ctx *)
+ sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
+ }
+ }
+
+ return sha256_ctx_mgr_resubmit(mgr, ctx);
+}
+
+static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
+{
+ struct sha256_hash_ctx *ctx;
+
+ while (1) {
+ ctx = (struct sha256_hash_ctx *)
+ sha256_job_mgr_flush(&mgr->mgr);
+
+ /* If flush returned 0, there are no more jobs in flight. */
+ if (!ctx)
+ return NULL;
+
+ /*
+ * If flush returned a job, resubmit the job to finish
+ * processing.
+ */
+ ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
+
+ /*
+ * If sha256_ctx_mgr_resubmit returned a job, it is ready to
+ * be returned. Otherwise, all jobs currently being managed by
+ * the sha256_ctx_mgr still need processing. Loop.
+ */
+ if (ctx)
+ return ctx;
+ }
+}
+
+static int sha256_mb_init(struct ahash_request *areq)
+{
+ struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
+
+ hash_ctx_init(sctx);
+ sctx->job.result_digest[0] = SHA256_H0;
+ sctx->job.result_digest[1] = SHA256_H1;
+ sctx->job.result_digest[2] = SHA256_H2;
+ sctx->job.result_digest[3] = SHA256_H3;
+ sctx->job.result_digest[4] = SHA256_H4;
+ sctx->job.result_digest[5] = SHA256_H5;
+ sctx->job.result_digest[6] = SHA256_H6;
+ sctx->job.result_digest[7] = SHA256_H7;
+ sctx->total_length = 0;
+ sctx->partial_block_buffer_length = 0;
+ sctx->status = HASH_CTX_STS_IDLE;
+
+ return 0;
+}
+
+static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
+{
+ int i;
+ struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
+ __be32 *dst = (__be32 *) rctx->out;
+
+ for (i = 0; i < 8; ++i)
+ dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
+
+ return 0;
+}
+
+static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
+ struct mcryptd_alg_cstate *cstate, bool flush)
+{
+ int flag = HASH_UPDATE;
+ int nbytes, err = 0;
+ struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
+ struct sha256_hash_ctx *sha_ctx;
+
+ /* more work ? */
+ while (!(rctx->flag & HASH_DONE)) {
+ nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
+ if (nbytes < 0) {
+ err = nbytes;
+ goto out;
+ }
+ /* check if the walk is done */
+ if (crypto_ahash_walk_last(&rctx->walk)) {
+ rctx->flag |= HASH_DONE;
+ if (rctx->flag & HASH_FINAL)
+ flag |= HASH_LAST;
+
+ }
+ sha_ctx = (struct sha256_hash_ctx *)
+ ahash_request_ctx(&rctx->areq);
+ kernel_fpu_begin();
+ sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
+ rctx->walk.data, nbytes, flag);
+ if (!sha_ctx) {
+ if (flush)
+ sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
+ }
+ kernel_fpu_end();
+ if (sha_ctx)
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ else {
+ rctx = NULL;
+ goto out;
+ }
+ }
+
+ /* copy the results */
+ if (rctx->flag & HASH_FINAL)
+ sha256_mb_set_results(rctx);
+
+out:
+ *ret_rctx = rctx;
+ return err;
+}
+
+static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
+ struct mcryptd_alg_cstate *cstate,
+ int err)
+{
+ struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
+ struct sha256_hash_ctx *sha_ctx;
+ struct mcryptd_hash_request_ctx *req_ctx;
+ int ret;
+
+ /* remove from work list */
+ spin_lock(&cstate->work_lock);
+ list_del(&rctx->waiter);
+ spin_unlock(&cstate->work_lock);
+
+ if (irqs_disabled())
+ rctx->complete(&req->base, err);
+ else {
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+ }
+
+ /* check to see if there are other jobs that are done */
+ sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
+ while (sha_ctx) {
+ req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ ret = sha_finish_walk(&req_ctx, cstate, false);
+ if (req_ctx) {
+ spin_lock(&cstate->work_lock);
+ list_del(&req_ctx->waiter);
+ spin_unlock(&cstate->work_lock);
+
+ req = cast_mcryptd_ctx_to_req(req_ctx);
+ if (irqs_disabled())
+ rctx->complete(&req->base, ret);
+ else {
+ local_bh_disable();
+ rctx->complete(&req->base, ret);
+ local_bh_enable();
+ }
+ }
+ sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
+ }
+
+ return 0;
+}
+
+static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
+ struct mcryptd_alg_cstate *cstate)
+{
+ unsigned long next_flush;
+ unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
+
+ /* initialize tag */
+ rctx->tag.arrival = jiffies; /* tag the arrival time */
+ rctx->tag.seq_num = cstate->next_seq_num++;
+ next_flush = rctx->tag.arrival + delay;
+ rctx->tag.expire = next_flush;
+
+ spin_lock(&cstate->work_lock);
+ list_add_tail(&rctx->waiter, &cstate->work_list);
+ spin_unlock(&cstate->work_lock);
+
+ mcryptd_arm_flusher(cstate, delay);
+}
+
+static int sha256_mb_update(struct ahash_request *areq)
+{
+ struct mcryptd_hash_request_ctx *rctx =
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
+ struct mcryptd_alg_cstate *cstate =
+ this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
+
+ struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
+ struct sha256_hash_ctx *sha_ctx;
+ int ret = 0, nbytes;
+
+ /* sanity check */
+ if (rctx->tag.cpu != smp_processor_id()) {
+ pr_err("mcryptd error: cpu clash\n");
+ goto done;
+ }
+
+ /* need to init context */
+ req_ctx_init(rctx, areq);
+
+ nbytes = crypto_ahash_walk_first(req, &rctx->walk);
+
+ if (nbytes < 0) {
+ ret = nbytes;
+ goto done;
+ }
+
+ if (crypto_ahash_walk_last(&rctx->walk))
+ rctx->flag |= HASH_DONE;
+
+ /* submit */
+ sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
+ sha256_mb_add_list(rctx, cstate);
+ kernel_fpu_begin();
+ sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
+ nbytes, HASH_UPDATE);
+ kernel_fpu_end();
+
+ /* check if anything is returned */
+ if (!sha_ctx)
+ return -EINPROGRESS;
+
+ if (sha_ctx->error) {
+ ret = sha_ctx->error;
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ goto done;
+ }
+
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ ret = sha_finish_walk(&rctx, cstate, false);
+
+ if (!rctx)
+ return -EINPROGRESS;
+done:
+ sha_complete_job(rctx, cstate, ret);
+ return ret;
+}
+
+static int sha256_mb_finup(struct ahash_request *areq)
+{
+ struct mcryptd_hash_request_ctx *rctx =
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
+ struct mcryptd_alg_cstate *cstate =
+ this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
+
+ struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
+ struct sha256_hash_ctx *sha_ctx;
+ int ret = 0, flag = HASH_UPDATE, nbytes;
+
+ /* sanity check */
+ if (rctx->tag.cpu != smp_processor_id()) {
+ pr_err("mcryptd error: cpu clash\n");
+ goto done;
+ }
+
+ /* need to init context */
+ req_ctx_init(rctx, areq);
+
+ nbytes = crypto_ahash_walk_first(req, &rctx->walk);
+
+ if (nbytes < 0) {
+ ret = nbytes;
+ goto done;
+ }
+
+ if (crypto_ahash_walk_last(&rctx->walk)) {
+ rctx->flag |= HASH_DONE;
+ flag = HASH_LAST;
+ }
+
+ /* submit */
+ rctx->flag |= HASH_FINAL;
+ sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
+ sha256_mb_add_list(rctx, cstate);
+
+ kernel_fpu_begin();
+ sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
+ nbytes, flag);
+ kernel_fpu_end();
+
+ /* check if anything is returned */
+ if (!sha_ctx)
+ return -EINPROGRESS;
+
+ if (sha_ctx->error) {
+ ret = sha_ctx->error;
+ goto done;
+ }
+
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ ret = sha_finish_walk(&rctx, cstate, false);
+ if (!rctx)
+ return -EINPROGRESS;
+done:
+ sha_complete_job(rctx, cstate, ret);
+ return ret;
+}
+
+static int sha256_mb_final(struct ahash_request *areq)
+{
+ struct mcryptd_hash_request_ctx *rctx =
+ container_of(areq, struct mcryptd_hash_request_ctx,
+ areq);
+ struct mcryptd_alg_cstate *cstate =
+ this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
+
+ struct sha256_hash_ctx *sha_ctx;
+ int ret = 0;
+ u8 data;
+
+ /* sanity check */
+ if (rctx->tag.cpu != smp_processor_id()) {
+ pr_err("mcryptd error: cpu clash\n");
+ goto done;
+ }
+
+ /* need to init context */
+ req_ctx_init(rctx, areq);
+
+ rctx->flag |= HASH_DONE | HASH_FINAL;
+
+ sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
+ /* flag HASH_FINAL and 0 data size */
+ sha256_mb_add_list(rctx, cstate);
+ kernel_fpu_begin();
+ sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
+ HASH_LAST);
+ kernel_fpu_end();
+
+ /* check if anything is returned */
+ if (!sha_ctx)
+ return -EINPROGRESS;
+
+ if (sha_ctx->error) {
+ ret = sha_ctx->error;
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ goto done;
+ }
+
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ ret = sha_finish_walk(&rctx, cstate, false);
+ if (!rctx)
+ return -EINPROGRESS;
+done:
+ sha_complete_job(rctx, cstate, ret);
+ return ret;
+}
+
+static int sha256_mb_export(struct ahash_request *areq, void *out)
+{
+ struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
+
+ memcpy(out, sctx, sizeof(*sctx));
+
+ return 0;
+}
+
+static int sha256_mb_import(struct ahash_request *areq, const void *in)
+{
+ struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
+
+ memcpy(sctx, in, sizeof(*sctx));
+
+ return 0;
+}
+
+static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
+{
+ struct mcryptd_ahash *mcryptd_tfm;
+ struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mcryptd_hash_ctx *mctx;
+
+ mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(mcryptd_tfm))
+ return PTR_ERR(mcryptd_tfm);
+ mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
+ mctx->alg_state = &sha256_mb_alg_state;
+ ctx->mcryptd_tfm = mcryptd_tfm;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(&mcryptd_tfm->base));
+
+ return 0;
+}
+
+static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
+}
+
+static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ sizeof(struct sha256_hash_ctx));
+
+ return 0;
+}
+
+static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
+}
+
+static struct ahash_alg sha256_mb_areq_alg = {
+ .init = sha256_mb_init,
+ .update = sha256_mb_update,
+ .final = sha256_mb_final,
+ .finup = sha256_mb_finup,
+ .export = sha256_mb_export,
+ .import = sha256_mb_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_hash_ctx),
+ .base = {
+ .cra_name = "__sha256-mb",
+ .cra_driver_name = "__intel_sha256-mb",
+ .cra_priority = 100,
+ /*
+ * use ASYNC flag as some buffers in multi-buffer
+ * algo may not have completed before hashing thread
+ * sleep
+ */
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT
+ (sha256_mb_areq_alg.halg.base.cra_list),
+ .cra_init = sha256_mb_areq_init_tfm,
+ .cra_exit = sha256_mb_areq_exit_tfm,
+ .cra_ctxsize = sizeof(struct sha256_hash_ctx),
+ }
+ }
+};
+
+static int sha256_mb_async_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_init(mcryptd_req);
+}
+
+static int sha256_mb_async_update(struct ahash_request *req)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_update(mcryptd_req);
+}
+
+static int sha256_mb_async_finup(struct ahash_request *req)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_finup(mcryptd_req);
+}
+
+static int sha256_mb_async_final(struct ahash_request *req)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_final(mcryptd_req);
+}
+
+static int sha256_mb_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_digest(mcryptd_req);
+}
+
+static int sha256_mb_async_export(struct ahash_request *req, void *out)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_export(mcryptd_req, out);
+}
+
+static int sha256_mb_async_import(struct ahash_request *req, const void *in)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+ struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
+ struct mcryptd_hash_request_ctx *rctx;
+ struct ahash_request *areq;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ rctx = ahash_request_ctx(mcryptd_req);
+ areq = &rctx->areq;
+
+ ahash_request_set_tfm(areq, child);
+ ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ rctx->complete, req);
+
+ return crypto_ahash_import(mcryptd_req, in);
+}
+
+static struct ahash_alg sha256_mb_async_alg = {
+ .init = sha256_mb_async_init,
+ .update = sha256_mb_async_update,
+ .final = sha256_mb_async_final,
+ .finup = sha256_mb_async_finup,
+ .export = sha256_mb_async_export,
+ .import = sha256_mb_async_import,
+ .digest = sha256_mb_async_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_hash_ctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256_mb",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT
+ (sha256_mb_async_alg.halg.base.cra_list),
+ .cra_init = sha256_mb_async_init_tfm,
+ .cra_exit = sha256_mb_async_exit_tfm,
+ .cra_ctxsize = sizeof(struct sha256_mb_ctx),
+ .cra_alignmask = 0,
+ },
+ },
+};
+
+static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
+{
+ struct mcryptd_hash_request_ctx *rctx;
+ unsigned long cur_time;
+ unsigned long next_flush = 0;
+ struct sha256_hash_ctx *sha_ctx;
+
+
+ cur_time = jiffies;
+
+ while (!list_empty(&cstate->work_list)) {
+ rctx = list_entry(cstate->work_list.next,
+ struct mcryptd_hash_request_ctx, waiter);
+ if (time_before(cur_time, rctx->tag.expire))
+ break;
+ kernel_fpu_begin();
+ sha_ctx = (struct sha256_hash_ctx *)
+ sha256_ctx_mgr_flush(cstate->mgr);
+ kernel_fpu_end();
+ if (!sha_ctx) {
+ pr_err("sha256_mb error: nothing got"
+ " flushed for non-empty list\n");
+ break;
+ }
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ sha_finish_walk(&rctx, cstate, true);
+ sha_complete_job(rctx, cstate, 0);
+ }
+
+ if (!list_empty(&cstate->work_list)) {
+ rctx = list_entry(cstate->work_list.next,
+ struct mcryptd_hash_request_ctx, waiter);
+ /* get the hash context and then flush time */
+ next_flush = rctx->tag.expire;
+ mcryptd_arm_flusher(cstate, get_delay(next_flush));
+ }
+ return next_flush;
+}
+
+static int __init sha256_mb_mod_init(void)
+{
+
+ int cpu;
+ int err;
+ struct mcryptd_alg_cstate *cpu_state;
+
+ /* check for dependent cpu features */
+ if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+ !boot_cpu_has(X86_FEATURE_BMI2))
+ return -ENODEV;
+
+ /* initialize multibuffer structures */
+ sha256_mb_alg_state.alg_cstate = alloc_percpu
+ (struct mcryptd_alg_cstate);
+
+ sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
+ sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
+ sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
+ sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
+
+ if (!sha256_mb_alg_state.alg_cstate)
+ return -ENOMEM;
+ for_each_possible_cpu(cpu) {
+ cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
+ cpu_state->next_flush = 0;
+ cpu_state->next_seq_num = 0;
+ cpu_state->flusher_engaged = false;
+ INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
+ cpu_state->cpu = cpu;
+ cpu_state->alg_state = &sha256_mb_alg_state;
+ cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
+ GFP_KERNEL);
+ if (!cpu_state->mgr)
+ goto err2;
+ sha256_ctx_mgr_init(cpu_state->mgr);
+ INIT_LIST_HEAD(&cpu_state->work_list);
+ spin_lock_init(&cpu_state->work_lock);
+ }
+ sha256_mb_alg_state.flusher = &sha256_mb_flusher;
+
+ err = crypto_register_ahash(&sha256_mb_areq_alg);
+ if (err)
+ goto err2;
+ err = crypto_register_ahash(&sha256_mb_async_alg);
+ if (err)
+ goto err1;
+
+
+ return 0;
+err1:
+ crypto_unregister_ahash(&sha256_mb_areq_alg);
+err2:
+ for_each_possible_cpu(cpu) {
+ cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
+ kfree(cpu_state->mgr);
+ }
+ free_percpu(sha256_mb_alg_state.alg_cstate);
+ return -ENODEV;
+}
+
+static void __exit sha256_mb_mod_fini(void)
+{
+ int cpu;
+ struct mcryptd_alg_cstate *cpu_state;
+
+ crypto_unregister_ahash(&sha256_mb_async_alg);
+ crypto_unregister_ahash(&sha256_mb_areq_alg);
+ for_each_possible_cpu(cpu) {
+ cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
+ kfree(cpu_state->mgr);
+ }
+ free_percpu(sha256_mb_alg_state.alg_cstate);
+}
+
+module_init(sha256_mb_mod_init);
+module_exit(sha256_mb_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
+
+MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
new file mode 100644
index 000000000000..edd252b73206
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
@@ -0,0 +1,136 @@
+/*
+ * Header file for multi buffer SHA256 context
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SHA_MB_CTX_INTERNAL_H
+#define _SHA_MB_CTX_INTERNAL_H
+
+#include "sha256_mb_mgr.h"
+
+#define HASH_UPDATE 0x00
+#define HASH_FIRST 0x01
+#define HASH_LAST 0x02
+#define HASH_ENTIRE 0x03
+#define HASH_DONE 0x04
+#define HASH_FINAL 0x08
+
+#define HASH_CTX_STS_IDLE 0x00
+#define HASH_CTX_STS_PROCESSING 0x01
+#define HASH_CTX_STS_LAST 0x02
+#define HASH_CTX_STS_COMPLETE 0x04
+
+enum hash_ctx_error {
+ HASH_CTX_ERROR_NONE = 0,
+ HASH_CTX_ERROR_INVALID_FLAGS = -1,
+ HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
+ HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
+
+#ifdef HASH_CTX_DEBUG
+ HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
+#endif
+};
+
+
+#define hash_ctx_user_data(ctx) ((ctx)->user_data)
+#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
+#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
+#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
+#define hash_ctx_status(ctx) ((ctx)->status)
+#define hash_ctx_error(ctx) ((ctx)->error)
+#define hash_ctx_init(ctx) \
+ do { \
+ (ctx)->error = HASH_CTX_ERROR_NONE; \
+ (ctx)->status = HASH_CTX_STS_COMPLETE; \
+ } while (0)
+
+
+/* Hash Constants and Typedefs */
+#define SHA256_DIGEST_LENGTH 8
+#define SHA256_LOG2_BLOCK_SIZE 6
+
+#define SHA256_PADLENGTHFIELD_SIZE 8
+
+#ifdef SHA_MB_DEBUG
+#define assert(expr) \
+do { \
+ if (unlikely(!(expr))) { \
+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ } \
+} while (0)
+#else
+#define assert(expr) do {} while (0)
+#endif
+
+struct sha256_ctx_mgr {
+ struct sha256_mb_mgr mgr;
+};
+
+/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */
+
+struct sha256_hash_ctx {
+ /* Must be at struct offset 0 */
+ struct job_sha256 job;
+ /* status flag */
+ int status;
+ /* error flag */
+ int error;
+
+ uint32_t total_length;
+ const void *incoming_buffer;
+ uint32_t incoming_buffer_length;
+ uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
+ uint32_t partial_block_buffer_length;
+ void *user_data;
+};
+
+#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
new file mode 100644
index 000000000000..b01ae408c56d
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
@@ -0,0 +1,108 @@
+/*
+ * Header file for multi buffer SHA256 algorithm manager
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __SHA_MB_MGR_H
+#define __SHA_MB_MGR_H
+
+#include <linux/types.h>
+
+#define NUM_SHA256_DIGEST_WORDS 8
+
+enum job_sts { STS_UNKNOWN = 0,
+ STS_BEING_PROCESSED = 1,
+ STS_COMPLETED = 2,
+ STS_INTERNAL_ERROR = 3,
+ STS_ERROR = 4
+};
+
+struct job_sha256 {
+ u8 *buffer;
+ u32 len;
+ u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32);
+ enum job_sts status;
+ void *user_data;
+};
+
+/* SHA256 out-of-order scheduler */
+
+/* typedef uint32_t sha8_digest_array[8][8]; */
+
+struct sha256_args_x8 {
+ uint32_t digest[8][8];
+ uint8_t *data_ptr[8];
+};
+
+struct sha256_lane_data {
+ struct job_sha256 *job_in_lane;
+};
+
+struct sha256_mb_mgr {
+ struct sha256_args_x8 args;
+
+ uint32_t lens[8];
+
+ /* each byte is index (0...7) of unused lanes */
+ uint64_t unused_lanes;
+ /* byte 4 is set to FF as a flag */
+ struct sha256_lane_data ldata[8];
+};
+
+
+#define SHA256_MB_MGR_NUM_LANES_AVX2 8
+
+void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state);
+struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
+ struct job_sha256 *job);
+struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
+struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
+
+#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
new file mode 100644
index 000000000000..5c377bac21d0
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
@@ -0,0 +1,304 @@
+/*
+ * Header file for multi buffer SHA256 algorithm data structure
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+# Macros for defining data structures
+
+# Usage example
+
+#START_FIELDS # JOB_AES
+### name size align
+#FIELD _plaintext, 8, 8 # pointer to plaintext
+#FIELD _ciphertext, 8, 8 # pointer to ciphertext
+#FIELD _IV, 16, 8 # IV
+#FIELD _keys, 8, 8 # pointer to keys
+#FIELD _len, 4, 4 # length in bytes
+#FIELD _status, 4, 4 # status enumeration
+#FIELD _user_data, 8, 8 # pointer to user data
+#UNION _union, size1, align1, \
+# size2, align2, \
+# size3, align3, \
+# ...
+#END_FIELDS
+#%assign _JOB_AES_size _FIELD_OFFSET
+#%assign _JOB_AES_align _STRUCT_ALIGN
+
+#########################################################################
+
+# Alternate "struc-like" syntax:
+# STRUCT job_aes2
+# RES_Q .plaintext, 1
+# RES_Q .ciphertext, 1
+# RES_DQ .IV, 1
+# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
+# RES_U .union, size1, align1, \
+# size2, align2, \
+# ...
+# ENDSTRUCT
+# # Following only needed if nesting
+# %assign job_aes2_size _FIELD_OFFSET
+# %assign job_aes2_align _STRUCT_ALIGN
+#
+# RES_* macros take a name, a count and an optional alignment.
+# The count in in terms of the base size of the macro, and the
+# default alignment is the base size.
+# The macros are:
+# Macro Base size
+# RES_B 1
+# RES_W 2
+# RES_D 4
+# RES_Q 8
+# RES_DQ 16
+# RES_Y 32
+# RES_Z 64
+#
+# RES_U defines a union. It's arguments are a name and two or more
+# pairs of "size, alignment"
+#
+# The two assigns are only needed if this structure is being nested
+# within another. Even if the assigns are not done, one can still use
+# STRUCT_NAME_size as the size of the structure.
+#
+# Note that for nesting, you still need to assign to STRUCT_NAME_size.
+#
+# The differences between this and using "struc" directly are that each
+# type is implicitly aligned to its natural length (although this can be
+# over-ridden with an explicit third parameter), and that the structure
+# is padded at the end to its overall alignment.
+#
+
+#########################################################################
+
+#ifndef _DATASTRUCT_ASM_
+#define _DATASTRUCT_ASM_
+
+#define SZ8 8*SHA256_DIGEST_WORD_SIZE
+#define ROUNDS 64*SZ8
+#define PTR_SZ 8
+#define SHA256_DIGEST_WORD_SIZE 4
+#define MAX_SHA256_LANES 8
+#define SHA256_DIGEST_WORDS 8
+#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
+#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS)
+#define SHA256_BLK_SZ 64
+
+# START_FIELDS
+.macro START_FIELDS
+ _FIELD_OFFSET = 0
+ _STRUCT_ALIGN = 0
+.endm
+
+# FIELD name size align
+.macro FIELD name size align
+ _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
+ \name = _FIELD_OFFSET
+ _FIELD_OFFSET = _FIELD_OFFSET + (\size)
+.if (\align > _STRUCT_ALIGN)
+ _STRUCT_ALIGN = \align
+.endif
+.endm
+
+# END_FIELDS
+.macro END_FIELDS
+ _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
+.endm
+
+########################################################################
+
+.macro STRUCT p1
+START_FIELDS
+.struc \p1
+.endm
+
+.macro ENDSTRUCT
+ tmp = _FIELD_OFFSET
+ END_FIELDS
+ tmp = (_FIELD_OFFSET - %%tmp)
+.if (tmp > 0)
+ .lcomm tmp
+.endif
+.endstruc
+.endm
+
+## RES_int name size align
+.macro RES_int p1 p2 p3
+ name = \p1
+ size = \p2
+ align = .\p3
+
+ _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
+.align align
+.lcomm name size
+ _FIELD_OFFSET = _FIELD_OFFSET + (size)
+.if (align > _STRUCT_ALIGN)
+ _STRUCT_ALIGN = align
+.endif
+.endm
+
+# macro RES_B name, size [, align]
+.macro RES_B _name, _size, _align=1
+RES_int _name _size _align
+.endm
+
+# macro RES_W name, size [, align]
+.macro RES_W _name, _size, _align=2
+RES_int _name 2*(_size) _align
+.endm
+
+# macro RES_D name, size [, align]
+.macro RES_D _name, _size, _align=4
+RES_int _name 4*(_size) _align
+.endm
+
+# macro RES_Q name, size [, align]
+.macro RES_Q _name, _size, _align=8
+RES_int _name 8*(_size) _align
+.endm
+
+# macro RES_DQ name, size [, align]
+.macro RES_DQ _name, _size, _align=16
+RES_int _name 16*(_size) _align
+.endm
+
+# macro RES_Y name, size [, align]
+.macro RES_Y _name, _size, _align=32
+RES_int _name 32*(_size) _align
+.endm
+
+# macro RES_Z name, size [, align]
+.macro RES_Z _name, _size, _align=64
+RES_int _name 64*(_size) _align
+.endm
+
+#endif
+
+
+########################################################################
+#### Define SHA256 Out Of Order Data Structures
+########################################################################
+
+START_FIELDS # LANE_DATA
+### name size align
+FIELD _job_in_lane, 8, 8 # pointer to job object
+END_FIELDS
+
+ _LANE_DATA_size = _FIELD_OFFSET
+ _LANE_DATA_align = _STRUCT_ALIGN
+
+########################################################################
+
+START_FIELDS # SHA256_ARGS_X4
+### name size align
+FIELD _digest, 4*8*8, 4 # transposed digest
+FIELD _data_ptr, 8*8, 8 # array of pointers to data
+END_FIELDS
+
+ _SHA256_ARGS_X4_size = _FIELD_OFFSET
+ _SHA256_ARGS_X4_align = _STRUCT_ALIGN
+ _SHA256_ARGS_X8_size = _FIELD_OFFSET
+ _SHA256_ARGS_X8_align = _STRUCT_ALIGN
+
+#######################################################################
+
+START_FIELDS # MB_MGR
+### name size align
+FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align
+FIELD _lens, 4*8, 8
+FIELD _unused_lanes, 8, 8
+FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
+END_FIELDS
+
+ _MB_MGR_size = _FIELD_OFFSET
+ _MB_MGR_align = _STRUCT_ALIGN
+
+_args_digest = _args + _digest
+_args_data_ptr = _args + _data_ptr
+
+#######################################################################
+
+START_FIELDS #STACK_FRAME
+### name size align
+FIELD _data, 16*SZ8, 1 # transposed digest
+FIELD _digest, 8*SZ8, 1 # array of pointers to data
+FIELD _ytmp, 4*SZ8, 1
+FIELD _rsp, 8, 1
+END_FIELDS
+
+ _STACK_FRAME_size = _FIELD_OFFSET
+ _STACK_FRAME_align = _STRUCT_ALIGN
+
+#######################################################################
+
+########################################################################
+#### Define constants
+########################################################################
+
+#define STS_UNKNOWN 0
+#define STS_BEING_PROCESSED 1
+#define STS_COMPLETED 2
+
+########################################################################
+#### Define JOB_SHA256 structure
+########################################################################
+
+START_FIELDS # JOB_SHA256
+
+### name size align
+FIELD _buffer, 8, 8 # pointer to buffer
+FIELD _len, 8, 8 # length in bytes
+FIELD _result_digest, 8*4, 32 # Digest (output)
+FIELD _status, 4, 4
+FIELD _user_data, 8, 8
+END_FIELDS
+
+ _JOB_SHA256_size = _FIELD_OFFSET
+ _JOB_SHA256_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
new file mode 100644
index 000000000000..b691da981cd9
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -0,0 +1,304 @@
+/*
+ * Flush routine for SHA256 multibuffer
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/linkage.h>
+#include <asm/frame.h>
+#include "sha256_mb_mgr_datastruct.S"
+
+.extern sha256_x8_avx2
+
+#LINUX register definitions
+#define arg1 %rdi
+#define arg2 %rsi
+
+# Common register definitions
+#define state arg1
+#define job arg2
+#define len2 arg2
+
+# idx must be a register not clobberred by sha1_mult
+#define idx %r8
+#define DWORD_idx %r8d
+
+#define unused_lanes %rbx
+#define lane_data %rbx
+#define tmp2 %rbx
+#define tmp2_w %ebx
+
+#define job_rax %rax
+#define tmp1 %rax
+#define size_offset %rax
+#define tmp %rax
+#define start_offset %rax
+
+#define tmp3 %arg1
+
+#define extra_blocks %arg2
+#define p %arg2
+
+.macro LABEL prefix n
+\prefix\n\():
+.endm
+
+.macro JNE_SKIP i
+jne skip_\i
+.endm
+
+.altmacro
+.macro SET_OFFSET _offset
+offset = \_offset
+.endm
+.noaltmacro
+
+# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
+# arg 1 : rcx : state
+ENTRY(sha256_mb_mgr_flush_avx2)
+ FRAME_BEGIN
+ push %rbx
+
+ # If bit (32+3) is set, then all lanes are empty
+ mov _unused_lanes(state), unused_lanes
+ bt $32+3, unused_lanes
+ jc return_null
+
+ # find a lane with a non-null job
+ xor idx, idx
+ offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne one(%rip), idx
+ offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne two(%rip), idx
+ offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne three(%rip), idx
+ offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne four(%rip), idx
+ offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne five(%rip), idx
+ offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne six(%rip), idx
+ offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne seven(%rip), idx
+
+ # copy idx to empty lanes
+copy_lane_data:
+ offset = (_args + _data_ptr)
+ mov offset(state,idx,8), tmp
+
+ I = 0
+.rep 8
+ offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+.altmacro
+ JNE_SKIP %I
+ offset = (_args + _data_ptr + 8*I)
+ mov tmp, offset(state)
+ offset = (_lens + 4*I)
+ movl $0xFFFFFFFF, offset(state)
+LABEL skip_ %I
+ I = (I+1)
+.noaltmacro
+.endr
+
+ # Find min length
+ vmovdqa _lens+0*16(state), %xmm0
+ vmovdqa _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+ vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
+ vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
+ vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
+
+ vmovd %xmm2, DWORD_idx
+ mov idx, len2
+ and $0xF, idx
+ shr $4, len2
+ jz len_is_0
+
+ vpand clear_low_nibble(%rip), %xmm2, %xmm2
+ vpshufd $0, %xmm2, %xmm2
+
+ vpsubd %xmm2, %xmm0, %xmm0
+ vpsubd %xmm2, %xmm1, %xmm1
+
+ vmovdqa %xmm0, _lens+0*16(state)
+ vmovdqa %xmm1, _lens+1*16(state)
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+ call sha256_x8_avx2
+ # state and idx are intact
+
+len_is_0:
+ # process completed job "idx"
+ imul $_LANE_DATA_size, idx, lane_data
+ lea _ldata(state, lane_data), lane_data
+
+ mov _job_in_lane(lane_data), job_rax
+ movq $0, _job_in_lane(lane_data)
+ movl $STS_COMPLETED, _status(job_rax)
+ mov _unused_lanes(state), unused_lanes
+ shl $4, unused_lanes
+ or idx, unused_lanes
+
+ mov unused_lanes, _unused_lanes(state)
+ movl $0xFFFFFFFF, _lens(state,idx,4)
+
+ vmovd _args_digest(state , idx, 4) , %xmm0
+ vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
+ vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
+ vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
+ vmovd _args_digest+4*32(state, idx, 4), %xmm1
+ vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
+ vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
+ vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
+
+ vmovdqu %xmm0, _result_digest(job_rax)
+ offset = (_result_digest + 1*16)
+ vmovdqu %xmm1, offset(job_rax)
+
+return:
+ pop %rbx
+ FRAME_END
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+ENDPROC(sha256_mb_mgr_flush_avx2)
+
+##############################################################################
+
+.align 16
+ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+ push %rbx
+
+ ## if bit 32+3 is set, then all lanes are empty
+ mov _unused_lanes(state), unused_lanes
+ bt $(32+3), unused_lanes
+ jc .return_null
+
+ # Find min length
+ vmovdqa _lens(state), %xmm0
+ vmovdqa _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+ vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
+ vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
+ vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
+
+ vmovd %xmm2, DWORD_idx
+ test $~0xF, idx
+ jnz .return_null
+
+ # process completed job "idx"
+ imul $_LANE_DATA_size, idx, lane_data
+ lea _ldata(state, lane_data), lane_data
+
+ mov _job_in_lane(lane_data), job_rax
+ movq $0, _job_in_lane(lane_data)
+ movl $STS_COMPLETED, _status(job_rax)
+ mov _unused_lanes(state), unused_lanes
+ shl $4, unused_lanes
+ or idx, unused_lanes
+ mov unused_lanes, _unused_lanes(state)
+
+ movl $0xFFFFFFFF, _lens(state, idx, 4)
+
+ vmovd _args_digest(state, idx, 4), %xmm0
+ vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
+ vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
+ vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
+ movl _args_digest+4*32(state, idx, 4), tmp2_w
+ vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
+ vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
+ vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
+
+ vmovdqu %xmm0, _result_digest(job_rax)
+ movl tmp2_w, _result_digest+1*16(job_rax)
+
+ pop %rbx
+
+ ret
+
+.return_null:
+ xor job_rax, job_rax
+ pop %rbx
+ ret
+ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
+
+.data
+
+.align 16
+clear_low_nibble:
+.octa 0x000000000000000000000000FFFFFFF0
+one:
+.quad 1
+two:
+.quad 2
+three:
+.quad 3
+four:
+.quad 4
+five:
+.quad 5
+six:
+.quad 6
+seven:
+.quad 7
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
new file mode 100644
index 000000000000..b0c498371e67
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
@@ -0,0 +1,65 @@
+/*
+ * Initialization code for multi buffer SHA256 algorithm for AVX2
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "sha256_mb_mgr.h"
+
+void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state)
+{
+ unsigned int j;
+
+ state->unused_lanes = 0xF76543210ULL;
+ for (j = 0; j < 8; j++) {
+ state->lens[j] = 0xFFFFFFFF;
+ state->ldata[j].job_in_lane = NULL;
+ }
+}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
new file mode 100644
index 000000000000..7ea670e25acc
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
@@ -0,0 +1,215 @@
+/*
+ * Buffer submit code for multi buffer SHA256 algorithm
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+#include "sha256_mb_mgr_datastruct.S"
+
+.extern sha256_x8_avx2
+
+# LINUX register definitions
+arg1 = %rdi
+arg2 = %rsi
+size_offset = %rcx
+tmp2 = %rcx
+extra_blocks = %rdx
+
+# Common definitions
+#define state arg1
+#define job %rsi
+#define len2 arg2
+#define p2 arg2
+
+# idx must be a register not clobberred by sha1_x8_avx2
+idx = %r8
+DWORD_idx = %r8d
+last_len = %r8
+
+p = %r11
+start_offset = %r11
+
+unused_lanes = %rbx
+BYTE_unused_lanes = %bl
+
+job_rax = %rax
+len = %rax
+DWORD_len = %eax
+
+lane = %r12
+tmp3 = %r12
+
+tmp = %r9
+DWORD_tmp = %r9d
+
+lane_data = %r10
+
+# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
+# arg 1 : rcx : state
+# arg 2 : rdx : job
+ENTRY(sha256_mb_mgr_submit_avx2)
+ FRAME_BEGIN
+ push %rbx
+ push %r12
+
+ mov _unused_lanes(state), unused_lanes
+ mov unused_lanes, lane
+ and $0xF, lane
+ shr $4, unused_lanes
+ imul $_LANE_DATA_size, lane, lane_data
+ movl $STS_BEING_PROCESSED, _status(job)
+ lea _ldata(state, lane_data), lane_data
+ mov unused_lanes, _unused_lanes(state)
+ movl _len(job), DWORD_len
+
+ mov job, _job_in_lane(lane_data)
+ shl $4, len
+ or lane, len
+
+ movl DWORD_len, _lens(state , lane, 4)
+
+ # Load digest words from result_digest
+ vmovdqu _result_digest(job), %xmm0
+ vmovdqu _result_digest+1*16(job), %xmm1
+ vmovd %xmm0, _args_digest(state, lane, 4)
+ vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
+ vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
+ vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
+ vmovd %xmm1, _args_digest+4*32(state , lane, 4)
+
+ vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
+ vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
+ vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
+
+ mov _buffer(job), p
+ mov p, _args_data_ptr(state, lane, 8)
+
+ cmp $0xF, unused_lanes
+ jne return_null
+
+start_loop:
+ # Find min length
+ vmovdqa _lens(state), %xmm0
+ vmovdqa _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+ vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
+ vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
+ vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
+
+ vmovd %xmm2, DWORD_idx
+ mov idx, len2
+ and $0xF, idx
+ shr $4, len2
+ jz len_is_0
+
+ vpand clear_low_nibble(%rip), %xmm2, %xmm2
+ vpshufd $0, %xmm2, %xmm2
+
+ vpsubd %xmm2, %xmm0, %xmm0
+ vpsubd %xmm2, %xmm1, %xmm1
+
+ vmovdqa %xmm0, _lens + 0*16(state)
+ vmovdqa %xmm1, _lens + 1*16(state)
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+ call sha256_x8_avx2
+
+ # state and idx are intact
+
+len_is_0:
+ # process completed job "idx"
+ imul $_LANE_DATA_size, idx, lane_data
+ lea _ldata(state, lane_data), lane_data
+
+ mov _job_in_lane(lane_data), job_rax
+ mov _unused_lanes(state), unused_lanes
+ movq $0, _job_in_lane(lane_data)
+ movl $STS_COMPLETED, _status(job_rax)
+ shl $4, unused_lanes
+ or idx, unused_lanes
+ mov unused_lanes, _unused_lanes(state)
+
+ movl $0xFFFFFFFF, _lens(state,idx,4)
+
+ vmovd _args_digest(state, idx, 4), %xmm0
+ vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
+ vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
+ vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
+ vmovd _args_digest+4*32(state, idx, 4), %xmm1
+
+ vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
+ vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
+ vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
+
+ vmovdqu %xmm0, _result_digest(job_rax)
+ vmovdqu %xmm1, _result_digest+1*16(job_rax)
+
+return:
+ pop %r12
+ pop %rbx
+ FRAME_END
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ENDPROC(sha256_mb_mgr_submit_avx2)
+
+.data
+
+.align 16
+clear_low_nibble:
+ .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
new file mode 100644
index 000000000000..aa21aea4c722
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
@@ -0,0 +1,593 @@
+/*
+ * Multi-buffer SHA256 algorithm hash compute routine
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/linkage.h>
+#include "sha256_mb_mgr_datastruct.S"
+
+## code to compute oct SHA256 using SSE-256
+## outer calling routine takes care of save and restore of XMM registers
+## Logic designed/laid out by JDG
+
+## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15
+## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
+## Linux preserves: rdi rbp r8
+##
+## clobbers %ymm0-15
+
+arg1 = %rdi
+arg2 = %rsi
+reg3 = %rcx
+reg4 = %rdx
+
+# Common definitions
+STATE = arg1
+INP_SIZE = arg2
+
+IDX = %rax
+ROUND = %rbx
+TBL = reg3
+
+inp0 = %r9
+inp1 = %r10
+inp2 = %r11
+inp3 = %r12
+inp4 = %r13
+inp5 = %r14
+inp6 = %r15
+inp7 = reg4
+
+a = %ymm0
+b = %ymm1
+c = %ymm2
+d = %ymm3
+e = %ymm4
+f = %ymm5
+g = %ymm6
+h = %ymm7
+
+T1 = %ymm8
+
+a0 = %ymm12
+a1 = %ymm13
+a2 = %ymm14
+TMP = %ymm15
+TMP0 = %ymm6
+TMP1 = %ymm7
+
+TT0 = %ymm8
+TT1 = %ymm9
+TT2 = %ymm10
+TT3 = %ymm11
+TT4 = %ymm12
+TT5 = %ymm13
+TT6 = %ymm14
+TT7 = %ymm15
+
+# Define stack usage
+
+# Assume stack aligned to 32 bytes before call
+# Therefore FRAMESZ mod 32 must be 32-8 = 24
+
+#define FRAMESZ 0x388
+
+#define VMOVPS vmovups
+
+# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
+# "transpose" data in {r0...r7} using temps {t0...t1}
+# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
+# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
+# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
+# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
+# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
+# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
+# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
+# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
+# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
+#
+# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
+# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
+# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
+# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
+# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
+# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
+# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
+# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
+# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
+#
+
+.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
+ # process top half (r0..r3) {a...d}
+ vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
+ vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
+ vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
+ vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
+ vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
+ vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
+ vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
+ vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
+
+ # use r2 in place of t0
+ # process bottom half (r4..r7) {e...h}
+ vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
+ vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
+ vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
+ vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
+ vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
+ vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
+ vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
+ vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
+
+ vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
+ vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
+ vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
+ vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
+ vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
+ vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
+ vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
+ vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
+
+.endm
+
+.macro ROTATE_ARGS
+TMP_ = h
+h = g
+g = f
+f = e
+e = d
+d = c
+c = b
+b = a
+a = TMP_
+.endm
+
+.macro _PRORD reg imm tmp
+ vpslld $(32-\imm),\reg,\tmp
+ vpsrld $\imm,\reg, \reg
+ vpor \tmp,\reg, \reg
+.endm
+
+# PRORD_nd reg, imm, tmp, src
+.macro _PRORD_nd reg imm tmp src
+ vpslld $(32-\imm), \src, \tmp
+ vpsrld $\imm, \src, \reg
+ vpor \tmp, \reg, \reg
+.endm
+
+# PRORD dst/src, amt
+.macro PRORD reg imm
+ _PRORD \reg,\imm,TMP
+.endm
+
+# PRORD_nd dst, src, amt
+.macro PRORD_nd reg tmp imm
+ _PRORD_nd \reg, \imm, TMP, \tmp
+.endm
+
+# arguments passed implicitly in preprocessor symbols i, a...h
+.macro ROUND_00_15 _T1 i
+ PRORD_nd a0,e,5 # sig1: a0 = (e >> 5)
+
+ vpxor g, f, a2 # ch: a2 = f^g
+ vpand e,a2, a2 # ch: a2 = (f^g)&e
+ vpxor g, a2, a2 # a2 = ch
+
+ PRORD_nd a1,e,25 # sig1: a1 = (e >> 25)
+
+ vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp)
+ vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
+ vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
+ PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11)
+ vpaddd a2, h, h # h = h + ch
+ PRORD_nd a2,a,11 # sig0: a2 = (a >> 11)
+ vpaddd \_T1,h, h # h = h + ch + W + K
+ vpxor a1, a0, a0 # a0 = sigma1
+ PRORD_nd a1,a,22 # sig0: a1 = (a >> 22)
+ vpxor c, a, \_T1 # maj: T1 = a^c
+ add $SZ8, ROUND # ROUND++
+ vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
+ vpaddd a0, h, h
+ vpaddd h, d, d
+ vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
+ PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13)
+ vpxor a1, a2, a2 # a2 = sig0
+ vpand c, a, a1 # maj: a1 = a&c
+ vpor \_T1, a1, a1 # a1 = maj
+ vpaddd a1, h, h # h = h + ch + W + K + maj
+ vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0
+ ROTATE_ARGS
+.endm
+
+# arguments passed implicitly in preprocessor symbols i, a...h
+.macro ROUND_16_XX _T1 i
+ vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1
+ vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1
+ vmovdqu \_T1, a0
+ PRORD \_T1,11
+ vmovdqu a1, a2
+ PRORD a1,2
+ vpxor a0, \_T1, \_T1
+ PRORD \_T1, 7
+ vpxor a2, a1, a1
+ PRORD a1, 17
+ vpsrld $3, a0, a0
+ vpxor a0, \_T1, \_T1
+ vpsrld $10, a2, a2
+ vpxor a2, a1, a1
+ vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1
+ vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1
+ vpaddd a1, \_T1, \_T1
+
+ ROUND_00_15 \_T1,\i
+.endm
+
+# SHA256_ARGS:
+# UINT128 digest[8]; // transposed digests
+# UINT8 *data_ptr[4];
+
+# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes);
+# arg 1 : STATE : pointer to array of pointers to input data
+# arg 2 : INP_SIZE : size of input in blocks
+ # general registers preserved in outer calling routine
+ # outer calling routine saves all the XMM registers
+ # save rsp, allocate 32-byte aligned for local variables
+ENTRY(sha256_x8_avx2)
+
+ # save callee-saved clobbered registers to comply with C function ABI
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov %rsp, IDX
+ sub $FRAMESZ, %rsp
+ and $~0x1F, %rsp
+ mov IDX, _rsp(%rsp)
+
+ # Load the pre-transposed incoming digest.
+ vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a
+ vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b
+ vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c
+ vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d
+ vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e
+ vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f
+ vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g
+ vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h
+
+ lea K256_8(%rip),TBL
+
+ # load the address of each of the 4 message lanes
+ # getting ready to transpose input onto stack
+ mov _args_data_ptr+0*PTR_SZ(STATE),inp0
+ mov _args_data_ptr+1*PTR_SZ(STATE),inp1
+ mov _args_data_ptr+2*PTR_SZ(STATE),inp2
+ mov _args_data_ptr+3*PTR_SZ(STATE),inp3
+ mov _args_data_ptr+4*PTR_SZ(STATE),inp4
+ mov _args_data_ptr+5*PTR_SZ(STATE),inp5
+ mov _args_data_ptr+6*PTR_SZ(STATE),inp6
+ mov _args_data_ptr+7*PTR_SZ(STATE),inp7
+
+ xor IDX, IDX
+lloop:
+ xor ROUND, ROUND
+
+ # save old digest
+ vmovdqu a, _digest(%rsp)
+ vmovdqu b, _digest+1*SZ8(%rsp)
+ vmovdqu c, _digest+2*SZ8(%rsp)
+ vmovdqu d, _digest+3*SZ8(%rsp)
+ vmovdqu e, _digest+4*SZ8(%rsp)
+ vmovdqu f, _digest+5*SZ8(%rsp)
+ vmovdqu g, _digest+6*SZ8(%rsp)
+ vmovdqu h, _digest+7*SZ8(%rsp)
+ i = 0
+.rep 2
+ VMOVPS i*32(inp0, IDX), TT0
+ VMOVPS i*32(inp1, IDX), TT1
+ VMOVPS i*32(inp2, IDX), TT2
+ VMOVPS i*32(inp3, IDX), TT3
+ VMOVPS i*32(inp4, IDX), TT4
+ VMOVPS i*32(inp5, IDX), TT5
+ VMOVPS i*32(inp6, IDX), TT6
+ VMOVPS i*32(inp7, IDX), TT7
+ vmovdqu g, _ytmp(%rsp)
+ vmovdqu h, _ytmp+1*SZ8(%rsp)
+ TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
+ vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1
+ vmovdqu _ytmp(%rsp), g
+ vpshufb TMP1, TT0, TT0
+ vpshufb TMP1, TT1, TT1
+ vpshufb TMP1, TT2, TT2
+ vpshufb TMP1, TT3, TT3
+ vpshufb TMP1, TT4, TT4
+ vpshufb TMP1, TT5, TT5
+ vpshufb TMP1, TT6, TT6
+ vpshufb TMP1, TT7, TT7
+ vmovdqu _ytmp+1*SZ8(%rsp), h
+ vmovdqu TT4, _ytmp(%rsp)
+ vmovdqu TT5, _ytmp+1*SZ8(%rsp)
+ vmovdqu TT6, _ytmp+2*SZ8(%rsp)
+ vmovdqu TT7, _ytmp+3*SZ8(%rsp)
+ ROUND_00_15 TT0,(i*8+0)
+ vmovdqu _ytmp(%rsp), TT0
+ ROUND_00_15 TT1,(i*8+1)
+ vmovdqu _ytmp+1*SZ8(%rsp), TT1
+ ROUND_00_15 TT2,(i*8+2)
+ vmovdqu _ytmp+2*SZ8(%rsp), TT2
+ ROUND_00_15 TT3,(i*8+3)
+ vmovdqu _ytmp+3*SZ8(%rsp), TT3
+ ROUND_00_15 TT0,(i*8+4)
+ ROUND_00_15 TT1,(i*8+5)
+ ROUND_00_15 TT2,(i*8+6)
+ ROUND_00_15 TT3,(i*8+7)
+ i = (i+1)
+.endr
+ add $64, IDX
+ i = (i*8)
+
+ jmp Lrounds_16_xx
+.align 16
+Lrounds_16_xx:
+.rep 16
+ ROUND_16_XX T1, i
+ i = (i+1)
+.endr
+
+ cmp $ROUNDS,ROUND
+ jb Lrounds_16_xx
+
+ # add old digest
+ vpaddd _digest+0*SZ8(%rsp), a, a
+ vpaddd _digest+1*SZ8(%rsp), b, b
+ vpaddd _digest+2*SZ8(%rsp), c, c
+ vpaddd _digest+3*SZ8(%rsp), d, d
+ vpaddd _digest+4*SZ8(%rsp), e, e
+ vpaddd _digest+5*SZ8(%rsp), f, f
+ vpaddd _digest+6*SZ8(%rsp), g, g
+ vpaddd _digest+7*SZ8(%rsp), h, h
+
+ sub $1, INP_SIZE # unit is blocks
+ jne lloop
+
+ # write back to memory (state object) the transposed digest
+ vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE)
+ vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE)
+ vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE)
+ vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE)
+ vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE)
+ vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE)
+ vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE)
+ vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE)
+
+ # update input pointers
+ add IDX, inp0
+ mov inp0, _args_data_ptr+0*8(STATE)
+ add IDX, inp1
+ mov inp1, _args_data_ptr+1*8(STATE)
+ add IDX, inp2
+ mov inp2, _args_data_ptr+2*8(STATE)
+ add IDX, inp3
+ mov inp3, _args_data_ptr+3*8(STATE)
+ add IDX, inp4
+ mov inp4, _args_data_ptr+4*8(STATE)
+ add IDX, inp5
+ mov inp5, _args_data_ptr+5*8(STATE)
+ add IDX, inp6
+ mov inp6, _args_data_ptr+6*8(STATE)
+ add IDX, inp7
+ mov inp7, _args_data_ptr+7*8(STATE)
+
+ # Postamble
+ mov _rsp(%rsp), %rsp
+
+ # restore callee-saved clobbered registers
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+
+ ret
+ENDPROC(sha256_x8_avx2)
+.data
+.align 64
+K256_8:
+ .octa 0x428a2f98428a2f98428a2f98428a2f98
+ .octa 0x428a2f98428a2f98428a2f98428a2f98
+ .octa 0x71374491713744917137449171374491
+ .octa 0x71374491713744917137449171374491
+ .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
+ .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
+ .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
+ .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
+ .octa 0x3956c25b3956c25b3956c25b3956c25b
+ .octa 0x3956c25b3956c25b3956c25b3956c25b
+ .octa 0x59f111f159f111f159f111f159f111f1
+ .octa 0x59f111f159f111f159f111f159f111f1
+ .octa 0x923f82a4923f82a4923f82a4923f82a4
+ .octa 0x923f82a4923f82a4923f82a4923f82a4
+ .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
+ .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
+ .octa 0xd807aa98d807aa98d807aa98d807aa98
+ .octa 0xd807aa98d807aa98d807aa98d807aa98
+ .octa 0x12835b0112835b0112835b0112835b01
+ .octa 0x12835b0112835b0112835b0112835b01
+ .octa 0x243185be243185be243185be243185be
+ .octa 0x243185be243185be243185be243185be
+ .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
+ .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
+ .octa 0x72be5d7472be5d7472be5d7472be5d74
+ .octa 0x72be5d7472be5d7472be5d7472be5d74
+ .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
+ .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
+ .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
+ .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
+ .octa 0xc19bf174c19bf174c19bf174c19bf174
+ .octa 0xc19bf174c19bf174c19bf174c19bf174
+ .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
+ .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
+ .octa 0xefbe4786efbe4786efbe4786efbe4786
+ .octa 0xefbe4786efbe4786efbe4786efbe4786
+ .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
+ .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
+ .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
+ .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
+ .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
+ .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
+ .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
+ .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
+ .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
+ .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
+ .octa 0x76f988da76f988da76f988da76f988da
+ .octa 0x76f988da76f988da76f988da76f988da
+ .octa 0x983e5152983e5152983e5152983e5152
+ .octa 0x983e5152983e5152983e5152983e5152
+ .octa 0xa831c66da831c66da831c66da831c66d
+ .octa 0xa831c66da831c66da831c66da831c66d
+ .octa 0xb00327c8b00327c8b00327c8b00327c8
+ .octa 0xb00327c8b00327c8b00327c8b00327c8
+ .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
+ .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
+ .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
+ .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
+ .octa 0xd5a79147d5a79147d5a79147d5a79147
+ .octa 0xd5a79147d5a79147d5a79147d5a79147
+ .octa 0x06ca635106ca635106ca635106ca6351
+ .octa 0x06ca635106ca635106ca635106ca6351
+ .octa 0x14292967142929671429296714292967
+ .octa 0x14292967142929671429296714292967
+ .octa 0x27b70a8527b70a8527b70a8527b70a85
+ .octa 0x27b70a8527b70a8527b70a8527b70a85
+ .octa 0x2e1b21382e1b21382e1b21382e1b2138
+ .octa 0x2e1b21382e1b21382e1b21382e1b2138
+ .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
+ .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
+ .octa 0x53380d1353380d1353380d1353380d13
+ .octa 0x53380d1353380d1353380d1353380d13
+ .octa 0x650a7354650a7354650a7354650a7354
+ .octa 0x650a7354650a7354650a7354650a7354
+ .octa 0x766a0abb766a0abb766a0abb766a0abb
+ .octa 0x766a0abb766a0abb766a0abb766a0abb
+ .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
+ .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
+ .octa 0x92722c8592722c8592722c8592722c85
+ .octa 0x92722c8592722c8592722c8592722c85
+ .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
+ .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
+ .octa 0xa81a664ba81a664ba81a664ba81a664b
+ .octa 0xa81a664ba81a664ba81a664ba81a664b
+ .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
+ .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
+ .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
+ .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
+ .octa 0xd192e819d192e819d192e819d192e819
+ .octa 0xd192e819d192e819d192e819d192e819
+ .octa 0xd6990624d6990624d6990624d6990624
+ .octa 0xd6990624d6990624d6990624d6990624
+ .octa 0xf40e3585f40e3585f40e3585f40e3585
+ .octa 0xf40e3585f40e3585f40e3585f40e3585
+ .octa 0x106aa070106aa070106aa070106aa070
+ .octa 0x106aa070106aa070106aa070106aa070
+ .octa 0x19a4c11619a4c11619a4c11619a4c116
+ .octa 0x19a4c11619a4c11619a4c11619a4c116
+ .octa 0x1e376c081e376c081e376c081e376c08
+ .octa 0x1e376c081e376c081e376c081e376c08
+ .octa 0x2748774c2748774c2748774c2748774c
+ .octa 0x2748774c2748774c2748774c2748774c
+ .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
+ .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
+ .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
+ .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
+ .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
+ .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
+ .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
+ .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
+ .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
+ .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
+ .octa 0x748f82ee748f82ee748f82ee748f82ee
+ .octa 0x748f82ee748f82ee748f82ee748f82ee
+ .octa 0x78a5636f78a5636f78a5636f78a5636f
+ .octa 0x78a5636f78a5636f78a5636f78a5636f
+ .octa 0x84c8781484c8781484c8781484c87814
+ .octa 0x84c8781484c8781484c8781484c87814
+ .octa 0x8cc702088cc702088cc702088cc70208
+ .octa 0x8cc702088cc702088cc702088cc70208
+ .octa 0x90befffa90befffa90befffa90befffa
+ .octa 0x90befffa90befffa90befffa90befffa
+ .octa 0xa4506ceba4506ceba4506ceba4506ceb
+ .octa 0xa4506ceba4506ceba4506ceba4506ceb
+ .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
+ .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
+ .octa 0xc67178f2c67178f2c67178f2c67178f2
+ .octa 0xc67178f2c67178f2c67178f2c67178f2
+PSHUFFLE_BYTE_FLIP_MASK:
+.octa 0x0c0d0e0f08090a0b0405060700010203
+.octa 0x0c0d0e0f08090a0b0405060700010203
+
+.align 64
+.global K256
+K256:
+ .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 3ae0f43ebd37..9e79baf03a4b 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -427,4 +427,14 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-ssse3");
+MODULE_ALIAS_CRYPTO("sha256-avx");
+MODULE_ALIAS_CRYPTO("sha256-avx2");
MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-ssse3");
+MODULE_ALIAS_CRYPTO("sha224-avx");
+MODULE_ALIAS_CRYPTO("sha224-avx2");
+#ifdef CONFIG_AS_SHA256_NI
+MODULE_ALIAS_CRYPTO("sha256-ni");
+MODULE_ALIAS_CRYPTO("sha224-ni");
+#endif
diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile
new file mode 100644
index 000000000000..0a57e2103980
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/Makefile
@@ -0,0 +1,11 @@
+#
+# Arch-specific CryptoAPI modules.
+#
+
+avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
+ $(comma)4)$(comma)%ymm2,yes,no)
+ifeq ($(avx2_supported),yes)
+ obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o
+ sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \
+ sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o
+endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
new file mode 100644
index 000000000000..f4cf5b78fd36
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -0,0 +1,1046 @@
+/*
+ * Multi buffer SHA512 algorithm Glue Code
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/mcryptd.h>
+#include <crypto/crypto_wq.h>
+#include <asm/byteorder.h>
+#include <linux/hardirq.h>
+#include <asm/fpu/api.h>
+#include "sha512_mb_ctx.h"
+
+#define FLUSH_INTERVAL 1000 /* in usec */
+
+static struct mcryptd_alg_state sha512_mb_alg_state;
+
+struct sha512_mb_ctx {
+ struct mcryptd_ahash *mcryptd_tfm;
+};
+
+static inline struct mcryptd_hash_request_ctx
+ *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
+{
+ struct ahash_request *areq;
+
+ areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
+ return container_of(areq, struct mcryptd_hash_request_ctx, areq);
+}
+
+static inline struct ahash_request
+ *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
+{
+ return container_of((void *) ctx, struct ahash_request, __ctx);
+}
+
+static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
+ struct ahash_request *areq)
+{
+ rctx->flag = HASH_UPDATE;
+}
+
+static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
+static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
+ (struct sha512_mb_mgr *state,
+ struct job_sha512 *job);
+static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
+ (struct sha512_mb_mgr *state);
+static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
+ (struct sha512_mb_mgr *state);
+
+inline void sha512_init_digest(uint64_t *digest)
+{
+ static const uint64_t initial_digest[SHA512_DIGEST_LENGTH] = {
+ SHA512_H0, SHA512_H1, SHA512_H2,
+ SHA512_H3, SHA512_H4, SHA512_H5,
+ SHA512_H6, SHA512_H7 };
+ memcpy(digest, initial_digest, sizeof(initial_digest));
+}
+
+inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
+ uint32_t total_len)
+{
+ uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
+
+ memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
+ padblock[i] = 0x80;
+
+ i += ((SHA512_BLOCK_SIZE - 1) &
+ (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
+ + 1 + SHA512_PADLENGTHFIELD_SIZE;
+
+#if SHA512_PADLENGTHFIELD_SIZE == 16
+ *((uint64_t *) &padblock[i - 16]) = 0;
+#endif
+
+ *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
+
+ /* Number of extra blocks to hash */
+ return i >> SHA512_LOG2_BLOCK_SIZE;
+}
+
+static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
+ (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
+{
+ while (ctx) {
+ if (ctx->status & HASH_CTX_STS_COMPLETE) {
+ /* Clear PROCESSING bit */
+ ctx->status = HASH_CTX_STS_COMPLETE;
+ return ctx;
+ }
+
+ /*
+ * If the extra blocks are empty, begin hashing what remains
+ * in the user's buffer.
+ */
+ if (ctx->partial_block_buffer_length == 0 &&
+ ctx->incoming_buffer_length) {
+
+ const void *buffer = ctx->incoming_buffer;
+ uint32_t len = ctx->incoming_buffer_length;
+ uint32_t copy_len;
+
+ /*
+ * Only entire blocks can be hashed.
+ * Copy remainder to extra blocks buffer.
+ */
+ copy_len = len & (SHA512_BLOCK_SIZE-1);
+
+ if (copy_len) {
+ len -= copy_len;
+ memcpy(ctx->partial_block_buffer,
+ ((const char *) buffer + len),
+ copy_len);
+ ctx->partial_block_buffer_length = copy_len;
+ }
+
+ ctx->incoming_buffer_length = 0;
+
+ /* len should be a multiple of the block size now */
+ assert((len % SHA512_BLOCK_SIZE) == 0);
+
+ /* Set len to the number of blocks to be hashed */
+ len >>= SHA512_LOG2_BLOCK_SIZE;
+
+ if (len) {
+
+ ctx->job.buffer = (uint8_t *) buffer;
+ ctx->job.len = len;
+ ctx = (struct sha512_hash_ctx *)
+ sha512_job_mgr_submit(&mgr->mgr,
+ &ctx->job);
+ continue;
+ }
+ }
+
+ /*
+ * If the extra blocks are not empty, then we are
+ * either on the last block(s) or we need more
+ * user input before continuing.
+ */
+ if (ctx->status & HASH_CTX_STS_LAST) {
+
+ uint8_t *buf = ctx->partial_block_buffer;
+ uint32_t n_extra_blocks =
+ sha512_pad(buf, ctx->total_length);
+
+ ctx->status = (HASH_CTX_STS_PROCESSING |
+ HASH_CTX_STS_COMPLETE);
+ ctx->job.buffer = buf;
+ ctx->job.len = (uint32_t) n_extra_blocks;
+ ctx = (struct sha512_hash_ctx *)
+ sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
+ continue;
+ }
+
+ if (ctx)
+ ctx->status = HASH_CTX_STS_IDLE;
+ return ctx;
+ }
+
+ return NULL;
+}
+
+static struct sha512_hash_ctx
+ *sha512_ctx_mgr_get_comp_ctx(struct sha512_ctx_mgr *mgr)
+{
+ /*
+ * If get_comp_job returns NULL, there are no jobs complete.
+ * If get_comp_job returns a job, verify that it is safe to return to
+ * the user.
+ * If it is not ready, resubmit the job to finish processing.
+ * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
+ * returned.
+ * Otherwise, all jobs currently being managed by the hash_ctx_mgr
+ * still need processing.
+ */
+ struct sha512_hash_ctx *ctx;
+
+ ctx = (struct sha512_hash_ctx *)
+ sha512_job_mgr_get_comp_job(&mgr->mgr);
+ return sha512_ctx_mgr_resubmit(mgr, ctx);
+}
+
+static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
+{
+ sha512_job_mgr_init(&mgr->mgr);
+}
+
+static struct sha512_hash_ctx
+ *sha512_ctx_mgr_submit(struct sha512_ctx_mgr *mgr,
+ struct sha512_hash_ctx *ctx,
+ const void *buffer,
+ uint32_t len,
+ int flags)
+{
+ if (flags & (~HASH_ENTIRE)) {
+ /*
+ * User should not pass anything other than FIRST, UPDATE, or
+ * LAST
+ */
+ ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
+ return ctx;
+ }
+
+ if (ctx->status & HASH_CTX_STS_PROCESSING) {
+ /* Cannot submit to a currently processing job. */
+ ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
+ return ctx;
+ }
+
+ if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
+ /* Cannot update a finished job. */
+ ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
+ return ctx;
+ }
+
+
+ if (flags & HASH_FIRST) {
+ /* Init digest */
+ sha512_init_digest(ctx->job.result_digest);
+
+ /* Reset byte counter */
+ ctx->total_length = 0;
+
+ /* Clear extra blocks */
+ ctx->partial_block_buffer_length = 0;
+ }
+
+ /*
+ * If we made it here, there were no errors during this call to
+ * submit
+ */
+ ctx->error = HASH_CTX_ERROR_NONE;
+
+ /* Store buffer ptr info from user */
+ ctx->incoming_buffer = buffer;
+ ctx->incoming_buffer_length = len;
+
+ /*
+ * Store the user's request flags and mark this ctx as currently being
+ * processed.
+ */
+ ctx->status = (flags & HASH_LAST) ?
+ (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
+ HASH_CTX_STS_PROCESSING;
+
+ /* Advance byte counter */
+ ctx->total_length += len;
+
+ /*
+ * If there is anything currently buffered in the extra blocks,
+ * append to it until it contains a whole block.
+ * Or if the user's buffer contains less than a whole block,
+ * append as much as possible to the extra block.
+ */
+ if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
+ /* Compute how many bytes to copy from user buffer into extra
+ * block
+ */
+ uint32_t copy_len = SHA512_BLOCK_SIZE -
+ ctx->partial_block_buffer_length;
+ if (len < copy_len)
+ copy_len = len;
+
+ if (copy_len) {
+ /* Copy and update relevant pointers and counters */
+ memcpy
+ (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
+ buffer, copy_len);
+
+ ctx->partial_block_buffer_length += copy_len;
+ ctx->incoming_buffer = (const void *)
+ ((const char *)buffer + copy_len);
+ ctx->incoming_buffer_length = len - copy_len;
+ }
+
+ /* The extra block should never contain more than 1 block
+ * here
+ */
+ assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
+
+ /* If the extra block buffer contains exactly 1 block, it can
+ * be hashed.
+ */
+ if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
+ ctx->partial_block_buffer_length = 0;
+
+ ctx->job.buffer = ctx->partial_block_buffer;
+ ctx->job.len = 1;
+ ctx = (struct sha512_hash_ctx *)
+ sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
+ }
+ }
+
+ return sha512_ctx_mgr_resubmit(mgr, ctx);
+}
+
+static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
+{
+ struct sha512_hash_ctx *ctx;
+
+ while (1) {
+ ctx = (struct sha512_hash_ctx *)
+ sha512_job_mgr_flush(&mgr->mgr);
+
+ /* If flush returned 0, there are no more jobs in flight. */
+ if (!ctx)
+ return NULL;
+
+ /*
+ * If flush returned a job, resubmit the job to finish
+ * processing.
+ */
+ ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
+
+ /*
+ * If sha512_ctx_mgr_resubmit returned a job, it is ready to
+ * be returned. Otherwise, all jobs currently being managed by
+ * the sha512_ctx_mgr still need processing. Loop.
+ */
+ if (ctx)
+ return ctx;
+ }
+}
+
+static int sha512_mb_init(struct ahash_request *areq)
+{
+ struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
+
+ hash_ctx_init(sctx);
+ sctx->job.result_digest[0] = SHA512_H0;
+ sctx->job.result_digest[1] = SHA512_H1;
+ sctx->job.result_digest[2] = SHA512_H2;
+ sctx->job.result_digest[3] = SHA512_H3;
+ sctx->job.result_digest[4] = SHA512_H4;
+ sctx->job.result_digest[5] = SHA512_H5;
+ sctx->job.result_digest[6] = SHA512_H6;
+ sctx->job.result_digest[7] = SHA512_H7;
+ sctx->total_length = 0;
+ sctx->partial_block_buffer_length = 0;
+ sctx->status = HASH_CTX_STS_IDLE;
+
+ return 0;
+}
+
+static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
+{
+ int i;
+ struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
+ __be64 *dst = (__be64 *) rctx->out;
+
+ for (i = 0; i < 8; ++i)
+ dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
+
+ return 0;
+}
+
+static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
+ struct mcryptd_alg_cstate *cstate, bool flush)
+{
+ int flag = HASH_UPDATE;
+ int nbytes, err = 0;
+ struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
+ struct sha512_hash_ctx *sha_ctx;
+
+ /* more work ? */
+ while (!(rctx->flag & HASH_DONE)) {
+ nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
+ if (nbytes < 0) {
+ err = nbytes;
+ goto out;
+ }
+ /* check if the walk is done */
+ if (crypto_ahash_walk_last(&rctx->walk)) {
+ rctx->flag |= HASH_DONE;
+ if (rctx->flag & HASH_FINAL)
+ flag |= HASH_LAST;
+
+ }
+ sha_ctx = (struct sha512_hash_ctx *)
+ ahash_request_ctx(&rctx->areq);
+ kernel_fpu_begin();
+ sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx,
+ rctx->walk.data, nbytes, flag);
+ if (!sha_ctx) {
+ if (flush)
+ sha_ctx = sha512_ctx_mgr_flush(cstate->mgr);
+ }
+ kernel_fpu_end();
+ if (sha_ctx)
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ else {
+ rctx = NULL;
+ goto out;
+ }
+ }
+
+ /* copy the results */
+ if (rctx->flag & HASH_FINAL)
+ sha512_mb_set_results(rctx);
+
+out:
+ *ret_rctx = rctx;
+ return err;
+}
+
+static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
+ struct mcryptd_alg_cstate *cstate,
+ int err)
+{
+ struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
+ struct sha512_hash_ctx *sha_ctx;
+ struct mcryptd_hash_request_ctx *req_ctx;
+ int ret;
+
+ /* remove from work list */
+ spin_lock(&cstate->work_lock);
+ list_del(&rctx->waiter);
+ spin_unlock(&cstate->work_lock);
+
+ if (irqs_disabled())
+ rctx->complete(&req->base, err);
+ else {
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+ }
+
+ /* check to see if there are other jobs that are done */
+ sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
+ while (sha_ctx) {
+ req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ ret = sha_finish_walk(&req_ctx, cstate, false);
+ if (req_ctx) {
+ spin_lock(&cstate->work_lock);
+ list_del(&req_ctx->waiter);
+ spin_unlock(&cstate->work_lock);
+
+ req = cast_mcryptd_ctx_to_req(req_ctx);
+ if (irqs_disabled())
+ rctx->complete(&req->base, ret);
+ else {
+ local_bh_disable();
+ rctx->complete(&req->base, ret);
+ local_bh_enable();
+ }
+ }
+ sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
+ }
+
+ return 0;
+}
+
+static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
+ struct mcryptd_alg_cstate *cstate)
+{
+ unsigned long next_flush;
+ unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
+
+ /* initialize tag */
+ rctx->tag.arrival = jiffies; /* tag the arrival time */
+ rctx->tag.seq_num = cstate->next_seq_num++;
+ next_flush = rctx->tag.arrival + delay;
+ rctx->tag.expire = next_flush;
+
+ spin_lock(&cstate->work_lock);
+ list_add_tail(&rctx->waiter, &cstate->work_list);
+ spin_unlock(&cstate->work_lock);
+
+ mcryptd_arm_flusher(cstate, delay);
+}
+
+static int sha512_mb_update(struct ahash_request *areq)
+{
+ struct mcryptd_hash_request_ctx *rctx =
+ container_of(areq, struct mcryptd_hash_request_ctx,
+ areq);
+ struct mcryptd_alg_cstate *cstate =
+ this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
+
+ struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
+ struct sha512_hash_ctx *sha_ctx;
+ int ret = 0, nbytes;
+
+
+ /* sanity check */
+ if (rctx->tag.cpu != smp_processor_id()) {
+ pr_err("mcryptd error: cpu clash\n");
+ goto done;
+ }
+
+ /* need to init context */
+ req_ctx_init(rctx, areq);
+
+ nbytes = crypto_ahash_walk_first(req, &rctx->walk);
+
+ if (nbytes < 0) {
+ ret = nbytes;
+ goto done;
+ }
+
+ if (crypto_ahash_walk_last(&rctx->walk))
+ rctx->flag |= HASH_DONE;
+
+ /* submit */
+ sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
+ sha512_mb_add_list(rctx, cstate);
+ kernel_fpu_begin();
+ sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
+ nbytes, HASH_UPDATE);
+ kernel_fpu_end();
+
+ /* check if anything is returned */
+ if (!sha_ctx)
+ return -EINPROGRESS;
+
+ if (sha_ctx->error) {
+ ret = sha_ctx->error;
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ goto done;
+ }
+
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ ret = sha_finish_walk(&rctx, cstate, false);
+
+ if (!rctx)
+ return -EINPROGRESS;
+done:
+ sha_complete_job(rctx, cstate, ret);
+ return ret;
+}
+
+static int sha512_mb_finup(struct ahash_request *areq)
+{
+ struct mcryptd_hash_request_ctx *rctx =
+ container_of(areq, struct mcryptd_hash_request_ctx,
+ areq);
+ struct mcryptd_alg_cstate *cstate =
+ this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
+
+ struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
+ struct sha512_hash_ctx *sha_ctx;
+ int ret = 0, flag = HASH_UPDATE, nbytes;
+
+ /* sanity check */
+ if (rctx->tag.cpu != smp_processor_id()) {
+ pr_err("mcryptd error: cpu clash\n");
+ goto done;
+ }
+
+ /* need to init context */
+ req_ctx_init(rctx, areq);
+
+ nbytes = crypto_ahash_walk_first(req, &rctx->walk);
+
+ if (nbytes < 0) {
+ ret = nbytes;
+ goto done;
+ }
+
+ if (crypto_ahash_walk_last(&rctx->walk)) {
+ rctx->flag |= HASH_DONE;
+ flag = HASH_LAST;
+ }
+
+ /* submit */
+ rctx->flag |= HASH_FINAL;
+ sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
+ sha512_mb_add_list(rctx, cstate);
+
+ kernel_fpu_begin();
+ sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
+ nbytes, flag);
+ kernel_fpu_end();
+
+ /* check if anything is returned */
+ if (!sha_ctx)
+ return -EINPROGRESS;
+
+ if (sha_ctx->error) {
+ ret = sha_ctx->error;
+ goto done;
+ }
+
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ ret = sha_finish_walk(&rctx, cstate, false);
+ if (!rctx)
+ return -EINPROGRESS;
+done:
+ sha_complete_job(rctx, cstate, ret);
+ return ret;
+}
+
+static int sha512_mb_final(struct ahash_request *areq)
+{
+ struct mcryptd_hash_request_ctx *rctx =
+ container_of(areq, struct mcryptd_hash_request_ctx,
+ areq);
+ struct mcryptd_alg_cstate *cstate =
+ this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
+
+ struct sha512_hash_ctx *sha_ctx;
+ int ret = 0;
+ u8 data;
+
+ /* sanity check */
+ if (rctx->tag.cpu != smp_processor_id()) {
+ pr_err("mcryptd error: cpu clash\n");
+ goto done;
+ }
+
+ /* need to init context */
+ req_ctx_init(rctx, areq);
+
+ rctx->flag |= HASH_DONE | HASH_FINAL;
+
+ sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
+ /* flag HASH_FINAL and 0 data size */
+ sha512_mb_add_list(rctx, cstate);
+ kernel_fpu_begin();
+ sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
+ HASH_LAST);
+ kernel_fpu_end();
+
+ /* check if anything is returned */
+ if (!sha_ctx)
+ return -EINPROGRESS;
+
+ if (sha_ctx->error) {
+ ret = sha_ctx->error;
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ goto done;
+ }
+
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ ret = sha_finish_walk(&rctx, cstate, false);
+ if (!rctx)
+ return -EINPROGRESS;
+done:
+ sha_complete_job(rctx, cstate, ret);
+ return ret;
+}
+
+static int sha512_mb_export(struct ahash_request *areq, void *out)
+{
+ struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
+
+ memcpy(out, sctx, sizeof(*sctx));
+
+ return 0;
+}
+
+static int sha512_mb_import(struct ahash_request *areq, const void *in)
+{
+ struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
+
+ memcpy(sctx, in, sizeof(*sctx));
+
+ return 0;
+}
+
+static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
+{
+ struct mcryptd_ahash *mcryptd_tfm;
+ struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mcryptd_hash_ctx *mctx;
+
+ mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(mcryptd_tfm))
+ return PTR_ERR(mcryptd_tfm);
+ mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
+ mctx->alg_state = &sha512_mb_alg_state;
+ ctx->mcryptd_tfm = mcryptd_tfm;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(&mcryptd_tfm->base));
+
+ return 0;
+}
+
+static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
+}
+
+static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ sizeof(struct sha512_hash_ctx));
+
+ return 0;
+}
+
+static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
+}
+
+static struct ahash_alg sha512_mb_areq_alg = {
+ .init = sha512_mb_init,
+ .update = sha512_mb_update,
+ .final = sha512_mb_final,
+ .finup = sha512_mb_finup,
+ .export = sha512_mb_export,
+ .import = sha512_mb_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_hash_ctx),
+ .base = {
+ .cra_name = "__sha512-mb",
+ .cra_driver_name = "__intel_sha512-mb",
+ .cra_priority = 100,
+ /*
+ * use ASYNC flag as some buffers in multi-buffer
+ * algo may not have completed before hashing thread
+ * sleep
+ */
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT
+ (sha512_mb_areq_alg.halg.base.cra_list),
+ .cra_init = sha512_mb_areq_init_tfm,
+ .cra_exit = sha512_mb_areq_exit_tfm,
+ .cra_ctxsize = sizeof(struct sha512_hash_ctx),
+ }
+ }
+};
+
+static int sha512_mb_async_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_init(mcryptd_req);
+}
+
+static int sha512_mb_async_update(struct ahash_request *req)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_update(mcryptd_req);
+}
+
+static int sha512_mb_async_finup(struct ahash_request *req)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_finup(mcryptd_req);
+}
+
+static int sha512_mb_async_final(struct ahash_request *req)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_final(mcryptd_req);
+}
+
+static int sha512_mb_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_digest(mcryptd_req);
+}
+
+static int sha512_mb_async_export(struct ahash_request *req, void *out)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ return crypto_ahash_export(mcryptd_req, out);
+}
+
+static int sha512_mb_async_import(struct ahash_request *req, const void *in)
+{
+ struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+ struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
+ struct mcryptd_hash_request_ctx *rctx;
+ struct ahash_request *areq;
+
+ memcpy(mcryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+ rctx = ahash_request_ctx(mcryptd_req);
+
+ areq = &rctx->areq;
+
+ ahash_request_set_tfm(areq, child);
+ ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ rctx->complete, req);
+
+ return crypto_ahash_import(mcryptd_req, in);
+}
+
+static struct ahash_alg sha512_mb_async_alg = {
+ .init = sha512_mb_async_init,
+ .update = sha512_mb_async_update,
+ .final = sha512_mb_async_final,
+ .finup = sha512_mb_async_finup,
+ .digest = sha512_mb_async_digest,
+ .export = sha512_mb_async_export,
+ .import = sha512_mb_async_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_hash_ctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512_mb",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT
+ (sha512_mb_async_alg.halg.base.cra_list),
+ .cra_init = sha512_mb_async_init_tfm,
+ .cra_exit = sha512_mb_async_exit_tfm,
+ .cra_ctxsize = sizeof(struct sha512_mb_ctx),
+ .cra_alignmask = 0,
+ },
+ },
+};
+
+static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
+{
+ struct mcryptd_hash_request_ctx *rctx;
+ unsigned long cur_time;
+ unsigned long next_flush = 0;
+ struct sha512_hash_ctx *sha_ctx;
+
+
+ cur_time = jiffies;
+
+ while (!list_empty(&cstate->work_list)) {
+ rctx = list_entry(cstate->work_list.next,
+ struct mcryptd_hash_request_ctx, waiter);
+ if time_before(cur_time, rctx->tag.expire)
+ break;
+ kernel_fpu_begin();
+ sha_ctx = (struct sha512_hash_ctx *)
+ sha512_ctx_mgr_flush(cstate->mgr);
+ kernel_fpu_end();
+ if (!sha_ctx) {
+ pr_err("sha512_mb error: nothing got flushed for"
+ " non-empty list\n");
+ break;
+ }
+ rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
+ sha_finish_walk(&rctx, cstate, true);
+ sha_complete_job(rctx, cstate, 0);
+ }
+
+ if (!list_empty(&cstate->work_list)) {
+ rctx = list_entry(cstate->work_list.next,
+ struct mcryptd_hash_request_ctx, waiter);
+ /* get the hash context and then flush time */
+ next_flush = rctx->tag.expire;
+ mcryptd_arm_flusher(cstate, get_delay(next_flush));
+ }
+ return next_flush;
+}
+
+static int __init sha512_mb_mod_init(void)
+{
+
+ int cpu;
+ int err;
+ struct mcryptd_alg_cstate *cpu_state;
+
+ /* check for dependent cpu features */
+ if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+ !boot_cpu_has(X86_FEATURE_BMI2))
+ return -ENODEV;
+
+ /* initialize multibuffer structures */
+ sha512_mb_alg_state.alg_cstate =
+ alloc_percpu(struct mcryptd_alg_cstate);
+
+ sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
+ sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
+ sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
+ sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
+
+ if (!sha512_mb_alg_state.alg_cstate)
+ return -ENOMEM;
+ for_each_possible_cpu(cpu) {
+ cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
+ cpu_state->next_flush = 0;
+ cpu_state->next_seq_num = 0;
+ cpu_state->flusher_engaged = false;
+ INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
+ cpu_state->cpu = cpu;
+ cpu_state->alg_state = &sha512_mb_alg_state;
+ cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
+ GFP_KERNEL);
+ if (!cpu_state->mgr)
+ goto err2;
+ sha512_ctx_mgr_init(cpu_state->mgr);
+ INIT_LIST_HEAD(&cpu_state->work_list);
+ spin_lock_init(&cpu_state->work_lock);
+ }
+ sha512_mb_alg_state.flusher = &sha512_mb_flusher;
+
+ err = crypto_register_ahash(&sha512_mb_areq_alg);
+ if (err)
+ goto err2;
+ err = crypto_register_ahash(&sha512_mb_async_alg);
+ if (err)
+ goto err1;
+
+
+ return 0;
+err1:
+ crypto_unregister_ahash(&sha512_mb_areq_alg);
+err2:
+ for_each_possible_cpu(cpu) {
+ cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
+ kfree(cpu_state->mgr);
+ }
+ free_percpu(sha512_mb_alg_state.alg_cstate);
+ return -ENODEV;
+}
+
+static void __exit sha512_mb_mod_fini(void)
+{
+ int cpu;
+ struct mcryptd_alg_cstate *cpu_state;
+
+ crypto_unregister_ahash(&sha512_mb_async_alg);
+ crypto_unregister_ahash(&sha512_mb_areq_alg);
+ for_each_possible_cpu(cpu) {
+ cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
+ kfree(cpu_state->mgr);
+ }
+ free_percpu(sha512_mb_alg_state.alg_cstate);
+}
+
+module_init(sha512_mb_mod_init);
+module_exit(sha512_mb_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
+
+MODULE_ALIAS("sha512");
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
new file mode 100644
index 000000000000..9d4b2c8208d5
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
@@ -0,0 +1,130 @@
+/*
+ * Header file for multi buffer SHA512 context
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SHA_MB_CTX_INTERNAL_H
+#define _SHA_MB_CTX_INTERNAL_H
+
+#include "sha512_mb_mgr.h"
+
+#define HASH_UPDATE 0x00
+#define HASH_FIRST 0x01
+#define HASH_LAST 0x02
+#define HASH_ENTIRE 0x03
+#define HASH_DONE 0x04
+#define HASH_FINAL 0x08
+
+#define HASH_CTX_STS_IDLE 0x00
+#define HASH_CTX_STS_PROCESSING 0x01
+#define HASH_CTX_STS_LAST 0x02
+#define HASH_CTX_STS_COMPLETE 0x04
+
+enum hash_ctx_error {
+ HASH_CTX_ERROR_NONE = 0,
+ HASH_CTX_ERROR_INVALID_FLAGS = -1,
+ HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
+ HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
+};
+
+#define hash_ctx_user_data(ctx) ((ctx)->user_data)
+#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
+#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
+#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
+#define hash_ctx_status(ctx) ((ctx)->status)
+#define hash_ctx_error(ctx) ((ctx)->error)
+#define hash_ctx_init(ctx) \
+ do { \
+ (ctx)->error = HASH_CTX_ERROR_NONE; \
+ (ctx)->status = HASH_CTX_STS_COMPLETE; \
+ } while (0)
+
+/* Hash Constants and Typedefs */
+#define SHA512_DIGEST_LENGTH 8
+#define SHA512_LOG2_BLOCK_SIZE 7
+
+#define SHA512_PADLENGTHFIELD_SIZE 16
+
+#ifdef SHA_MB_DEBUG
+#define assert(expr) \
+do { \
+ if (unlikely(!(expr))) { \
+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ } \
+} while (0)
+#else
+#define assert(expr) do {} while (0)
+#endif
+
+struct sha512_ctx_mgr {
+ struct sha512_mb_mgr mgr;
+};
+
+/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */
+
+struct sha512_hash_ctx {
+ /* Must be at struct offset 0 */
+ struct job_sha512 job;
+ /* status flag */
+ int status;
+ /* error flag */
+ int error;
+
+ uint32_t total_length;
+ const void *incoming_buffer;
+ uint32_t incoming_buffer_length;
+ uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
+ uint32_t partial_block_buffer_length;
+ void *user_data;
+};
+
+#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
new file mode 100644
index 000000000000..178f17eef382
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
@@ -0,0 +1,104 @@
+/*
+ * Header file for multi buffer SHA512 algorithm manager
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SHA_MB_MGR_H
+#define __SHA_MB_MGR_H
+
+#include <linux/types.h>
+
+#define NUM_SHA512_DIGEST_WORDS 8
+
+enum job_sts {STS_UNKNOWN = 0,
+ STS_BEING_PROCESSED = 1,
+ STS_COMPLETED = 2,
+ STS_INTERNAL_ERROR = 3,
+ STS_ERROR = 4
+};
+
+struct job_sha512 {
+ u8 *buffer;
+ u64 len;
+ u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32);
+ enum job_sts status;
+ void *user_data;
+};
+
+struct sha512_args_x4 {
+ uint64_t digest[8][4];
+ uint8_t *data_ptr[4];
+};
+
+struct sha512_lane_data {
+ struct job_sha512 *job_in_lane;
+};
+
+struct sha512_mb_mgr {
+ struct sha512_args_x4 args;
+
+ uint64_t lens[4];
+
+ /* each byte is index (0...7) of unused lanes */
+ uint64_t unused_lanes;
+ /* byte 4 is set to FF as a flag */
+ struct sha512_lane_data ldata[4];
+};
+
+#define SHA512_MB_MGR_NUM_LANES_AVX2 4
+
+void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state);
+struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
+ struct job_sha512 *job);
+struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
+struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
+
+#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
new file mode 100644
index 000000000000..cf2636d4c9ba
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
@@ -0,0 +1,281 @@
+/*
+ * Header file for multi buffer SHA256 algorithm data structure
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+# Macros for defining data structures
+
+# Usage example
+
+#START_FIELDS # JOB_AES
+### name size align
+#FIELD _plaintext, 8, 8 # pointer to plaintext
+#FIELD _ciphertext, 8, 8 # pointer to ciphertext
+#FIELD _IV, 16, 8 # IV
+#FIELD _keys, 8, 8 # pointer to keys
+#FIELD _len, 4, 4 # length in bytes
+#FIELD _status, 4, 4 # status enumeration
+#FIELD _user_data, 8, 8 # pointer to user data
+#UNION _union, size1, align1, \
+# size2, align2, \
+# size3, align3, \
+# ...
+#END_FIELDS
+#%assign _JOB_AES_size _FIELD_OFFSET
+#%assign _JOB_AES_align _STRUCT_ALIGN
+
+#########################################################################
+
+# Alternate "struc-like" syntax:
+# STRUCT job_aes2
+# RES_Q .plaintext, 1
+# RES_Q .ciphertext, 1
+# RES_DQ .IV, 1
+# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
+# RES_U .union, size1, align1, \
+# size2, align2, \
+# ...
+# ENDSTRUCT
+# # Following only needed if nesting
+# %assign job_aes2_size _FIELD_OFFSET
+# %assign job_aes2_align _STRUCT_ALIGN
+#
+# RES_* macros take a name, a count and an optional alignment.
+# The count in in terms of the base size of the macro, and the
+# default alignment is the base size.
+# The macros are:
+# Macro Base size
+# RES_B 1
+# RES_W 2
+# RES_D 4
+# RES_Q 8
+# RES_DQ 16
+# RES_Y 32
+# RES_Z 64
+#
+# RES_U defines a union. It's arguments are a name and two or more
+# pairs of "size, alignment"
+#
+# The two assigns are only needed if this structure is being nested
+# within another. Even if the assigns are not done, one can still use
+# STRUCT_NAME_size as the size of the structure.
+#
+# Note that for nesting, you still need to assign to STRUCT_NAME_size.
+#
+# The differences between this and using "struc" directly are that each
+# type is implicitly aligned to its natural length (although this can be
+# over-ridden with an explicit third parameter), and that the structure
+# is padded at the end to its overall alignment.
+#
+
+#########################################################################
+
+#ifndef _DATASTRUCT_ASM_
+#define _DATASTRUCT_ASM_
+
+#define PTR_SZ 8
+#define SHA512_DIGEST_WORD_SIZE 8
+#define SHA512_MB_MGR_NUM_LANES_AVX2 4
+#define NUM_SHA512_DIGEST_WORDS 8
+#define SZ4 4*SHA512_DIGEST_WORD_SIZE
+#define ROUNDS 80*SZ4
+#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8)
+
+# START_FIELDS
+.macro START_FIELDS
+ _FIELD_OFFSET = 0
+ _STRUCT_ALIGN = 0
+.endm
+
+# FIELD name size align
+.macro FIELD name size align
+ _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
+ \name = _FIELD_OFFSET
+ _FIELD_OFFSET = _FIELD_OFFSET + (\size)
+.if (\align > _STRUCT_ALIGN)
+ _STRUCT_ALIGN = \align
+.endif
+.endm
+
+# END_FIELDS
+.macro END_FIELDS
+ _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
+.endm
+
+.macro STRUCT p1
+START_FIELDS
+.struc \p1
+.endm
+
+.macro ENDSTRUCT
+ tmp = _FIELD_OFFSET
+ END_FIELDS
+ tmp = (_FIELD_OFFSET - ##tmp)
+.if (tmp > 0)
+ .lcomm tmp
+.endm
+
+## RES_int name size align
+.macro RES_int p1 p2 p3
+ name = \p1
+ size = \p2
+ align = .\p3
+
+ _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
+.align align
+.lcomm name size
+ _FIELD_OFFSET = _FIELD_OFFSET + (size)
+.if (align > _STRUCT_ALIGN)
+ _STRUCT_ALIGN = align
+.endif
+.endm
+
+# macro RES_B name, size [, align]
+.macro RES_B _name, _size, _align=1
+RES_int _name _size _align
+.endm
+
+# macro RES_W name, size [, align]
+.macro RES_W _name, _size, _align=2
+RES_int _name 2*(_size) _align
+.endm
+
+# macro RES_D name, size [, align]
+.macro RES_D _name, _size, _align=4
+RES_int _name 4*(_size) _align
+.endm
+
+# macro RES_Q name, size [, align]
+.macro RES_Q _name, _size, _align=8
+RES_int _name 8*(_size) _align
+.endm
+
+# macro RES_DQ name, size [, align]
+.macro RES_DQ _name, _size, _align=16
+RES_int _name 16*(_size) _align
+.endm
+
+# macro RES_Y name, size [, align]
+.macro RES_Y _name, _size, _align=32
+RES_int _name 32*(_size) _align
+.endm
+
+# macro RES_Z name, size [, align]
+.macro RES_Z _name, _size, _align=64
+RES_int _name 64*(_size) _align
+.endm
+
+#endif
+
+###################################################################
+### Define SHA512 Out Of Order Data Structures
+###################################################################
+
+START_FIELDS # LANE_DATA
+### name size align
+FIELD _job_in_lane, 8, 8 # pointer to job object
+END_FIELDS
+
+ _LANE_DATA_size = _FIELD_OFFSET
+ _LANE_DATA_align = _STRUCT_ALIGN
+
+####################################################################
+
+START_FIELDS # SHA512_ARGS_X4
+### name size align
+FIELD _digest, 8*8*4, 4 # transposed digest
+FIELD _data_ptr, 8*4, 8 # array of pointers to data
+END_FIELDS
+
+ _SHA512_ARGS_X4_size = _FIELD_OFFSET
+ _SHA512_ARGS_X4_align = _STRUCT_ALIGN
+
+#####################################################################
+
+START_FIELDS # MB_MGR
+### name size align
+FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align
+FIELD _lens, 8*4, 8
+FIELD _unused_lanes, 8, 8
+FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align
+END_FIELDS
+
+ _MB_MGR_size = _FIELD_OFFSET
+ _MB_MGR_align = _STRUCT_ALIGN
+
+_args_digest = _args + _digest
+_args_data_ptr = _args + _data_ptr
+
+#######################################################################
+
+#######################################################################
+#### Define constants
+#######################################################################
+
+#define STS_UNKNOWN 0
+#define STS_BEING_PROCESSED 1
+#define STS_COMPLETED 2
+
+#######################################################################
+#### Define JOB_SHA512 structure
+#######################################################################
+
+START_FIELDS # JOB_SHA512
+### name size align
+FIELD _buffer, 8, 8 # pointer to buffer
+FIELD _len, 8, 8 # length in bytes
+FIELD _result_digest, 8*8, 32 # Digest (output)
+FIELD _status, 4, 4
+FIELD _user_data, 8, 8
+END_FIELDS
+
+ _JOB_SHA512_size = _FIELD_OFFSET
+ _JOB_SHA512_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
new file mode 100644
index 000000000000..3ddba19a0db6
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
@@ -0,0 +1,291 @@
+/*
+ * Flush routine for SHA512 multibuffer
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+#include "sha512_mb_mgr_datastruct.S"
+
+.extern sha512_x4_avx2
+
+# LINUX register definitions
+#define arg1 %rdi
+#define arg2 %rsi
+
+# idx needs to be other than arg1, arg2, rbx, r12
+#define idx %rdx
+
+# Common definitions
+#define state arg1
+#define job arg2
+#define len2 arg2
+
+#define unused_lanes %rbx
+#define lane_data %rbx
+#define tmp2 %rbx
+
+#define job_rax %rax
+#define tmp1 %rax
+#define size_offset %rax
+#define tmp %rax
+#define start_offset %rax
+
+#define tmp3 arg1
+
+#define extra_blocks arg2
+#define p arg2
+
+#define tmp4 %r8
+#define lens0 %r8
+
+#define lens1 %r9
+#define lens2 %r10
+#define lens3 %r11
+
+.macro LABEL prefix n
+\prefix\n\():
+.endm
+
+.macro JNE_SKIP i
+jne skip_\i
+.endm
+
+.altmacro
+.macro SET_OFFSET _offset
+offset = \_offset
+.endm
+.noaltmacro
+
+# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
+# arg 1 : rcx : state
+ENTRY(sha512_mb_mgr_flush_avx2)
+ FRAME_BEGIN
+ push %rbx
+
+ # If bit (32+3) is set, then all lanes are empty
+ mov _unused_lanes(state), unused_lanes
+ bt $32+7, unused_lanes
+ jc return_null
+
+ # find a lane with a non-null job
+ xor idx, idx
+ offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne one(%rip), idx
+ offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne two(%rip), idx
+ offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+ cmovne three(%rip), idx
+
+ # copy idx to empty lanes
+copy_lane_data:
+ offset = (_args + _data_ptr)
+ mov offset(state,idx,8), tmp
+
+ I = 0
+.rep 4
+ offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
+ cmpq $0, offset(state)
+.altmacro
+ JNE_SKIP %I
+ offset = (_args + _data_ptr + 8*I)
+ mov tmp, offset(state)
+ offset = (_lens + 8*I +4)
+ movl $0xFFFFFFFF, offset(state)
+LABEL skip_ %I
+ I = (I+1)
+.noaltmacro
+.endr
+
+ # Find min length
+ mov _lens + 0*8(state),lens0
+ mov lens0,idx
+ mov _lens + 1*8(state),lens1
+ cmp idx,lens1
+ cmovb lens1,idx
+ mov _lens + 2*8(state),lens2
+ cmp idx,lens2
+ cmovb lens2,idx
+ mov _lens + 3*8(state),lens3
+ cmp idx,lens3
+ cmovb lens3,idx
+ mov idx,len2
+ and $0xF,idx
+ and $~0xFF,len2
+ jz len_is_0
+
+ sub len2, lens0
+ sub len2, lens1
+ sub len2, lens2
+ sub len2, lens3
+ shr $32,len2
+ mov lens0, _lens + 0*8(state)
+ mov lens1, _lens + 1*8(state)
+ mov lens2, _lens + 2*8(state)
+ mov lens3, _lens + 3*8(state)
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+ call sha512_x4_avx2
+ # state and idx are intact
+
+len_is_0:
+ # process completed job "idx"
+ imul $_LANE_DATA_size, idx, lane_data
+ lea _ldata(state, lane_data), lane_data
+
+ mov _job_in_lane(lane_data), job_rax
+ movq $0, _job_in_lane(lane_data)
+ movl $STS_COMPLETED, _status(job_rax)
+ mov _unused_lanes(state), unused_lanes
+ shl $8, unused_lanes
+ or idx, unused_lanes
+ mov unused_lanes, _unused_lanes(state)
+
+ movl $0xFFFFFFFF, _lens+4(state, idx, 8)
+
+ vmovq _args_digest+0*32(state, idx, 8), %xmm0
+ vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
+ vmovq _args_digest+2*32(state, idx, 8), %xmm1
+ vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
+ vmovq _args_digest+4*32(state, idx, 8), %xmm2
+ vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
+ vmovq _args_digest+6*32(state, idx, 8), %xmm3
+ vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
+
+ vmovdqu %xmm0, _result_digest(job_rax)
+ vmovdqu %xmm1, _result_digest+1*16(job_rax)
+ vmovdqu %xmm2, _result_digest+2*16(job_rax)
+ vmovdqu %xmm3, _result_digest+3*16(job_rax)
+
+return:
+ pop %rbx
+ FRAME_END
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+ENDPROC(sha512_mb_mgr_flush_avx2)
+.align 16
+
+ENTRY(sha512_mb_mgr_get_comp_job_avx2)
+ push %rbx
+
+ mov _unused_lanes(state), unused_lanes
+ bt $(32+7), unused_lanes
+ jc .return_null
+
+ # Find min length
+ mov _lens(state),lens0
+ mov lens0,idx
+ mov _lens+1*8(state),lens1
+ cmp idx,lens1
+ cmovb lens1,idx
+ mov _lens+2*8(state),lens2
+ cmp idx,lens2
+ cmovb lens2,idx
+ mov _lens+3*8(state),lens3
+ cmp idx,lens3
+ cmovb lens3,idx
+ test $~0xF,idx
+ jnz .return_null
+ and $0xF,idx
+
+ #process completed job "idx"
+ imul $_LANE_DATA_size, idx, lane_data
+ lea _ldata(state, lane_data), lane_data
+
+ mov _job_in_lane(lane_data), job_rax
+ movq $0, _job_in_lane(lane_data)
+ movl $STS_COMPLETED, _status(job_rax)
+ mov _unused_lanes(state), unused_lanes
+ shl $8, unused_lanes
+ or idx, unused_lanes
+ mov unused_lanes, _unused_lanes(state)
+
+ movl $0xFFFFFFFF, _lens+4(state, idx, 8)
+
+ vmovq _args_digest(state, idx, 8), %xmm0
+ vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
+ vmovq _args_digest+2*32(state, idx, 8), %xmm1
+ vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
+ vmovq _args_digest+4*32(state, idx, 8), %xmm2
+ vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
+ vmovq _args_digest+6*32(state, idx, 8), %xmm3
+ vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
+
+ vmovdqu %xmm0, _result_digest+0*16(job_rax)
+ vmovdqu %xmm1, _result_digest+1*16(job_rax)
+ vmovdqu %xmm2, _result_digest+2*16(job_rax)
+ vmovdqu %xmm3, _result_digest+3*16(job_rax)
+
+ pop %rbx
+
+ ret
+
+.return_null:
+ xor job_rax, job_rax
+ pop %rbx
+ ret
+ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
+.data
+
+.align 16
+one:
+.quad 1
+two:
+.quad 2
+three:
+.quad 3
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
new file mode 100644
index 000000000000..36870b26067a
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
@@ -0,0 +1,67 @@
+/*
+ * Initialization code for multi buffer SHA256 algorithm for AVX2
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "sha512_mb_mgr.h"
+
+void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
+{
+ unsigned int j;
+
+ state->lens[0] = 0;
+ state->lens[1] = 1;
+ state->lens[2] = 2;
+ state->lens[3] = 3;
+ state->unused_lanes = 0xFF03020100;
+ for (j = 0; j < 4; j++)
+ state->ldata[j].job_in_lane = NULL;
+}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
new file mode 100644
index 000000000000..815f07bdd1f8
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
@@ -0,0 +1,222 @@
+/*
+ * Buffer submit code for multi buffer SHA512 algorithm
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+#include "sha512_mb_mgr_datastruct.S"
+
+.extern sha512_x4_avx2
+
+#define arg1 %rdi
+#define arg2 %rsi
+
+#define idx %rdx
+#define last_len %rdx
+
+#define size_offset %rcx
+#define tmp2 %rcx
+
+# Common definitions
+#define state arg1
+#define job arg2
+#define len2 arg2
+#define p2 arg2
+
+#define p %r11
+#define start_offset %r11
+
+#define unused_lanes %rbx
+
+#define job_rax %rax
+#define len %rax
+
+#define lane %r12
+#define tmp3 %r12
+#define lens3 %r12
+
+#define extra_blocks %r8
+#define lens0 %r8
+
+#define tmp %r9
+#define lens1 %r9
+
+#define lane_data %r10
+#define lens2 %r10
+
+#define DWORD_len %eax
+
+# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
+# arg 1 : rcx : state
+# arg 2 : rdx : job
+ENTRY(sha512_mb_mgr_submit_avx2)
+ FRAME_BEGIN
+ push %rbx
+ push %r12
+
+ mov _unused_lanes(state), unused_lanes
+ movzb %bl,lane
+ shr $8, unused_lanes
+ imul $_LANE_DATA_size, lane,lane_data
+ movl $STS_BEING_PROCESSED, _status(job)
+ lea _ldata(state, lane_data), lane_data
+ mov unused_lanes, _unused_lanes(state)
+ movl _len(job), DWORD_len
+
+ mov job, _job_in_lane(lane_data)
+ movl DWORD_len,_lens+4(state , lane, 8)
+
+ # Load digest words from result_digest
+ vmovdqu _result_digest+0*16(job), %xmm0
+ vmovdqu _result_digest+1*16(job), %xmm1
+ vmovdqu _result_digest+2*16(job), %xmm2
+ vmovdqu _result_digest+3*16(job), %xmm3
+
+ vmovq %xmm0, _args_digest(state, lane, 8)
+ vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8)
+ vmovq %xmm1, _args_digest+2*32(state , lane, 8)
+ vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8)
+ vmovq %xmm2, _args_digest+4*32(state , lane, 8)
+ vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8)
+ vmovq %xmm3, _args_digest+6*32(state , lane, 8)
+ vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8)
+
+ mov _buffer(job), p
+ mov p, _args_data_ptr(state, lane, 8)
+
+ cmp $0xFF, unused_lanes
+ jne return_null
+
+start_loop:
+
+ # Find min length
+ mov _lens+0*8(state),lens0
+ mov lens0,idx
+ mov _lens+1*8(state),lens1
+ cmp idx,lens1
+ cmovb lens1, idx
+ mov _lens+2*8(state),lens2
+ cmp idx,lens2
+ cmovb lens2,idx
+ mov _lens+3*8(state),lens3
+ cmp idx,lens3
+ cmovb lens3,idx
+ mov idx,len2
+ and $0xF,idx
+ and $~0xFF,len2
+ jz len_is_0
+
+ sub len2,lens0
+ sub len2,lens1
+ sub len2,lens2
+ sub len2,lens3
+ shr $32,len2
+ mov lens0, _lens + 0*8(state)
+ mov lens1, _lens + 1*8(state)
+ mov lens2, _lens + 2*8(state)
+ mov lens3, _lens + 3*8(state)
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+ call sha512_x4_avx2
+ # state and idx are intact
+
+len_is_0:
+
+ # process completed job "idx"
+ imul $_LANE_DATA_size, idx, lane_data
+ lea _ldata(state, lane_data), lane_data
+
+ mov _job_in_lane(lane_data), job_rax
+ mov _unused_lanes(state), unused_lanes
+ movq $0, _job_in_lane(lane_data)
+ movl $STS_COMPLETED, _status(job_rax)
+ shl $8, unused_lanes
+ or idx, unused_lanes
+ mov unused_lanes, _unused_lanes(state)
+
+ movl $0xFFFFFFFF,_lens+4(state,idx,8)
+ vmovq _args_digest+0*32(state , idx, 8), %xmm0
+ vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0
+ vmovq _args_digest+2*32(state , idx, 8), %xmm1
+ vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1
+ vmovq _args_digest+4*32(state , idx, 8), %xmm2
+ vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2
+ vmovq _args_digest+6*32(state , idx, 8), %xmm3
+ vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3
+
+ vmovdqu %xmm0, _result_digest + 0*16(job_rax)
+ vmovdqu %xmm1, _result_digest + 1*16(job_rax)
+ vmovdqu %xmm2, _result_digest + 2*16(job_rax)
+ vmovdqu %xmm3, _result_digest + 3*16(job_rax)
+
+return:
+ pop %r12
+ pop %rbx
+ FRAME_END
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+ENDPROC(sha512_mb_mgr_submit_avx2)
+.data
+
+.align 16
+H0: .int 0x6a09e667
+H1: .int 0xbb67ae85
+H2: .int 0x3c6ef372
+H3: .int 0xa54ff53a
+H4: .int 0x510e527f
+H5: .int 0x9b05688c
+H6: .int 0x1f83d9ab
+H7: .int 0x5be0cd19
diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
new file mode 100644
index 000000000000..31ab1eff6413
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
@@ -0,0 +1,529 @@
+/*
+ * Multi-buffer SHA512 algorithm hash compute routine
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Megha Dey <megha.dey@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+# code to compute quad SHA512 using AVX2
+# use YMMs to tackle the larger digest size
+# outer calling routine takes care of save and restore of XMM registers
+# Logic designed/laid out by JDG
+
+# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
+# Stack must be aligned to 32 bytes before call
+# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
+# Linux preserves: rcx rdx rdi rbp r13 r14 r15
+# clobbers ymm0-15
+
+#include <linux/linkage.h>
+#include "sha512_mb_mgr_datastruct.S"
+
+arg1 = %rdi
+arg2 = %rsi
+
+# Common definitions
+STATE = arg1
+INP_SIZE = arg2
+
+IDX = %rax
+ROUND = %rbx
+TBL = %r8
+
+inp0 = %r9
+inp1 = %r10
+inp2 = %r11
+inp3 = %r12
+
+a = %ymm0
+b = %ymm1
+c = %ymm2
+d = %ymm3
+e = %ymm4
+f = %ymm5
+g = %ymm6
+h = %ymm7
+
+a0 = %ymm8
+a1 = %ymm9
+a2 = %ymm10
+
+TT0 = %ymm14
+TT1 = %ymm13
+TT2 = %ymm12
+TT3 = %ymm11
+TT4 = %ymm10
+TT5 = %ymm9
+
+T1 = %ymm14
+TMP = %ymm15
+
+# Define stack usage
+STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
+
+#define VMOVPD vmovupd
+_digest = SZ4*16
+
+# transpose r0, r1, r2, r3, t0, t1
+# "transpose" data in {r0..r3} using temps {t0..t3}
+# Input looks like: {r0 r1 r2 r3}
+# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
+# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
+# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
+# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
+#
+# output looks like: {t0 r1 r0 r3}
+# t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
+# r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
+# r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
+# r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
+
+.macro TRANSPOSE r0 r1 r2 r3 t0 t1
+ vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
+ vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
+ vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
+ vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
+
+ vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
+ vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
+ vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
+ vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
+.endm
+
+.macro ROTATE_ARGS
+TMP_ = h
+h = g
+g = f
+f = e
+e = d
+d = c
+c = b
+b = a
+a = TMP_
+.endm
+
+# PRORQ reg, imm, tmp
+# packed-rotate-right-double
+# does a rotate by doing two shifts and an or
+.macro _PRORQ reg imm tmp
+ vpsllq $(64-\imm),\reg,\tmp
+ vpsrlq $\imm,\reg, \reg
+ vpor \tmp,\reg, \reg
+.endm
+
+# non-destructive
+# PRORQ_nd reg, imm, tmp, src
+.macro _PRORQ_nd reg imm tmp src
+ vpsllq $(64-\imm), \src, \tmp
+ vpsrlq $\imm, \src, \reg
+ vpor \tmp, \reg, \reg
+.endm
+
+# PRORQ dst/src, amt
+.macro PRORQ reg imm
+ _PRORQ \reg, \imm, TMP
+.endm
+
+# PRORQ_nd dst, src, amt
+.macro PRORQ_nd reg tmp imm
+ _PRORQ_nd \reg, \imm, TMP, \tmp
+.endm
+
+#; arguments passed implicitly in preprocessor symbols i, a...h
+.macro ROUND_00_15 _T1 i
+ PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
+
+ vpxor g, f, a2 # ch: a2 = f^g
+ vpand e,a2, a2 # ch: a2 = (f^g)&e
+ vpxor g, a2, a2 # a2 = ch
+
+ PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
+
+ offset = SZ4*(\i & 0xf)
+ vmovdqu \_T1,offset(%rsp)
+ vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
+ vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
+ PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
+ vpaddq a2, h, h # h = h + ch
+ PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
+ vpaddq \_T1,h, h # h = h + ch + W + K
+ vpxor a1, a0, a0 # a0 = sigma1
+ vmovdqu a,\_T1
+ PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
+ vpxor c, \_T1, \_T1 # maj: T1 = a^c
+ add $SZ4, ROUND # ROUND++
+ vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
+ vpaddq a0, h, h
+ vpaddq h, d, d
+ vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
+ PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
+ vpxor a1, a2, a2 # a2 = sig0
+ vpand c, a, a1 # maj: a1 = a&c
+ vpor \_T1, a1, a1 # a1 = maj
+ vpaddq a1, h, h # h = h + ch + W + K + maj
+ vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
+ ROTATE_ARGS
+.endm
+
+
+#; arguments passed implicitly in preprocessor symbols i, a...h
+.macro ROUND_16_XX _T1 i
+ vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
+ vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
+ vmovdqu \_T1, a0
+ PRORQ \_T1,7
+ vmovdqu a1, a2
+ PRORQ a1,42
+ vpxor a0, \_T1, \_T1
+ PRORQ \_T1, 1
+ vpxor a2, a1, a1
+ PRORQ a1, 19
+ vpsrlq $7, a0, a0
+ vpxor a0, \_T1, \_T1
+ vpsrlq $6, a2, a2
+ vpxor a2, a1, a1
+ vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
+ vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
+ vpaddq a1, \_T1, \_T1
+
+ ROUND_00_15 \_T1,\i
+.endm
+
+
+# void sha512_x4_avx2(void *STATE, const int INP_SIZE)
+# arg 1 : STATE : pointer to input data
+# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
+ENTRY(sha512_x4_avx2)
+ # general registers preserved in outer calling routine
+ # outer calling routine saves all the XMM registers
+ # save callee-saved clobbered registers to comply with C function ABI
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ sub $STACK_SPACE1, %rsp
+
+ # Load the pre-transposed incoming digest.
+ vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
+ vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
+ vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
+ vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
+ vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
+ vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
+ vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
+ vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
+
+ lea K512_4(%rip),TBL
+
+ # load the address of each of the 4 message lanes
+ # getting ready to transpose input onto stack
+ mov _data_ptr+0*PTR_SZ(STATE),inp0
+ mov _data_ptr+1*PTR_SZ(STATE),inp1
+ mov _data_ptr+2*PTR_SZ(STATE),inp2
+ mov _data_ptr+3*PTR_SZ(STATE),inp3
+
+ xor IDX, IDX
+lloop:
+ xor ROUND, ROUND
+
+ # save old digest
+ vmovdqu a, _digest(%rsp)
+ vmovdqu b, _digest+1*SZ4(%rsp)
+ vmovdqu c, _digest+2*SZ4(%rsp)
+ vmovdqu d, _digest+3*SZ4(%rsp)
+ vmovdqu e, _digest+4*SZ4(%rsp)
+ vmovdqu f, _digest+5*SZ4(%rsp)
+ vmovdqu g, _digest+6*SZ4(%rsp)
+ vmovdqu h, _digest+7*SZ4(%rsp)
+ i = 0
+.rep 4
+ vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
+ VMOVPD i*32(inp0, IDX), TT2
+ VMOVPD i*32(inp1, IDX), TT1
+ VMOVPD i*32(inp2, IDX), TT4
+ VMOVPD i*32(inp3, IDX), TT3
+ TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
+ vpshufb TMP, TT0, TT0
+ vpshufb TMP, TT1, TT1
+ vpshufb TMP, TT2, TT2
+ vpshufb TMP, TT3, TT3
+ ROUND_00_15 TT0,(i*4+0)
+ ROUND_00_15 TT1,(i*4+1)
+ ROUND_00_15 TT2,(i*4+2)
+ ROUND_00_15 TT3,(i*4+3)
+ i = (i+1)
+.endr
+ add $128, IDX
+
+ i = (i*4)
+
+ jmp Lrounds_16_xx
+.align 16
+Lrounds_16_xx:
+.rep 16
+ ROUND_16_XX T1, i
+ i = (i+1)
+.endr
+ cmp $0xa00,ROUND
+ jb Lrounds_16_xx
+
+ # add old digest
+ vpaddq _digest(%rsp), a, a
+ vpaddq _digest+1*SZ4(%rsp), b, b
+ vpaddq _digest+2*SZ4(%rsp), c, c
+ vpaddq _digest+3*SZ4(%rsp), d, d
+ vpaddq _digest+4*SZ4(%rsp), e, e
+ vpaddq _digest+5*SZ4(%rsp), f, f
+ vpaddq _digest+6*SZ4(%rsp), g, g
+ vpaddq _digest+7*SZ4(%rsp), h, h
+
+ sub $1, INP_SIZE # unit is blocks
+ jne lloop
+
+ # write back to memory (state object) the transposed digest
+ vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
+ vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
+ vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
+ vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
+ vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
+ vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
+ vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
+ vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
+
+ # update input data pointers
+ add IDX, inp0
+ mov inp0, _data_ptr+0*PTR_SZ(STATE)
+ add IDX, inp1
+ mov inp1, _data_ptr+1*PTR_SZ(STATE)
+ add IDX, inp2
+ mov inp2, _data_ptr+2*PTR_SZ(STATE)
+ add IDX, inp3
+ mov inp3, _data_ptr+3*PTR_SZ(STATE)
+
+ #;;;;;;;;;;;;;;;
+ #; Postamble
+ add $STACK_SPACE1, %rsp
+ # restore callee-saved clobbered registers
+
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+
+ # outer calling routine restores XMM and other GP registers
+ ret
+ENDPROC(sha512_x4_avx2)
+
+.data
+.align 64
+K512_4:
+ .octa 0x428a2f98d728ae22428a2f98d728ae22,\
+ 0x428a2f98d728ae22428a2f98d728ae22
+ .octa 0x7137449123ef65cd7137449123ef65cd,\
+ 0x7137449123ef65cd7137449123ef65cd
+ .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
+ 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
+ .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
+ 0xe9b5dba58189dbbce9b5dba58189dbbc
+ .octa 0x3956c25bf348b5383956c25bf348b538,\
+ 0x3956c25bf348b5383956c25bf348b538
+ .octa 0x59f111f1b605d01959f111f1b605d019,\
+ 0x59f111f1b605d01959f111f1b605d019
+ .octa 0x923f82a4af194f9b923f82a4af194f9b,\
+ 0x923f82a4af194f9b923f82a4af194f9b
+ .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
+ 0xab1c5ed5da6d8118ab1c5ed5da6d8118
+ .octa 0xd807aa98a3030242d807aa98a3030242,\
+ 0xd807aa98a3030242d807aa98a3030242
+ .octa 0x12835b0145706fbe12835b0145706fbe,\
+ 0x12835b0145706fbe12835b0145706fbe
+ .octa 0x243185be4ee4b28c243185be4ee4b28c,\
+ 0x243185be4ee4b28c243185be4ee4b28c
+ .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
+ 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
+ .octa 0x72be5d74f27b896f72be5d74f27b896f,\
+ 0x72be5d74f27b896f72be5d74f27b896f
+ .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
+ 0x80deb1fe3b1696b180deb1fe3b1696b1
+ .octa 0x9bdc06a725c712359bdc06a725c71235,\
+ 0x9bdc06a725c712359bdc06a725c71235
+ .octa 0xc19bf174cf692694c19bf174cf692694,\
+ 0xc19bf174cf692694c19bf174cf692694
+ .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
+ 0xe49b69c19ef14ad2e49b69c19ef14ad2
+ .octa 0xefbe4786384f25e3efbe4786384f25e3,\
+ 0xefbe4786384f25e3efbe4786384f25e3
+ .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
+ 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
+ .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
+ 0x240ca1cc77ac9c65240ca1cc77ac9c65
+ .octa 0x2de92c6f592b02752de92c6f592b0275,\
+ 0x2de92c6f592b02752de92c6f592b0275
+ .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
+ 0x4a7484aa6ea6e4834a7484aa6ea6e483
+ .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
+ 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
+ .octa 0x76f988da831153b576f988da831153b5,\
+ 0x76f988da831153b576f988da831153b5
+ .octa 0x983e5152ee66dfab983e5152ee66dfab,\
+ 0x983e5152ee66dfab983e5152ee66dfab
+ .octa 0xa831c66d2db43210a831c66d2db43210,\
+ 0xa831c66d2db43210a831c66d2db43210
+ .octa 0xb00327c898fb213fb00327c898fb213f,\
+ 0xb00327c898fb213fb00327c898fb213f
+ .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
+ 0xbf597fc7beef0ee4bf597fc7beef0ee4
+ .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
+ 0xc6e00bf33da88fc2c6e00bf33da88fc2
+ .octa 0xd5a79147930aa725d5a79147930aa725,\
+ 0xd5a79147930aa725d5a79147930aa725
+ .octa 0x06ca6351e003826f06ca6351e003826f,\
+ 0x06ca6351e003826f06ca6351e003826f
+ .octa 0x142929670a0e6e70142929670a0e6e70,\
+ 0x142929670a0e6e70142929670a0e6e70
+ .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
+ 0x27b70a8546d22ffc27b70a8546d22ffc
+ .octa 0x2e1b21385c26c9262e1b21385c26c926,\
+ 0x2e1b21385c26c9262e1b21385c26c926
+ .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
+ 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
+ .octa 0x53380d139d95b3df53380d139d95b3df,\
+ 0x53380d139d95b3df53380d139d95b3df
+ .octa 0x650a73548baf63de650a73548baf63de,\
+ 0x650a73548baf63de650a73548baf63de
+ .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
+ 0x766a0abb3c77b2a8766a0abb3c77b2a8
+ .octa 0x81c2c92e47edaee681c2c92e47edaee6,\
+ 0x81c2c92e47edaee681c2c92e47edaee6
+ .octa 0x92722c851482353b92722c851482353b,\
+ 0x92722c851482353b92722c851482353b
+ .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
+ 0xa2bfe8a14cf10364a2bfe8a14cf10364
+ .octa 0xa81a664bbc423001a81a664bbc423001,\
+ 0xa81a664bbc423001a81a664bbc423001
+ .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
+ 0xc24b8b70d0f89791c24b8b70d0f89791
+ .octa 0xc76c51a30654be30c76c51a30654be30,\
+ 0xc76c51a30654be30c76c51a30654be30
+ .octa 0xd192e819d6ef5218d192e819d6ef5218,\
+ 0xd192e819d6ef5218d192e819d6ef5218
+ .octa 0xd69906245565a910d69906245565a910,\
+ 0xd69906245565a910d69906245565a910
+ .octa 0xf40e35855771202af40e35855771202a,\
+ 0xf40e35855771202af40e35855771202a
+ .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
+ 0x106aa07032bbd1b8106aa07032bbd1b8
+ .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
+ 0x19a4c116b8d2d0c819a4c116b8d2d0c8
+ .octa 0x1e376c085141ab531e376c085141ab53,\
+ 0x1e376c085141ab531e376c085141ab53
+ .octa 0x2748774cdf8eeb992748774cdf8eeb99,\
+ 0x2748774cdf8eeb992748774cdf8eeb99
+ .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
+ 0x34b0bcb5e19b48a834b0bcb5e19b48a8
+ .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
+ 0x391c0cb3c5c95a63391c0cb3c5c95a63
+ .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
+ 0x4ed8aa4ae3418acb4ed8aa4ae3418acb
+ .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
+ 0x5b9cca4f7763e3735b9cca4f7763e373
+ .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
+ 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
+ .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
+ 0x748f82ee5defb2fc748f82ee5defb2fc
+ .octa 0x78a5636f43172f6078a5636f43172f60,\
+ 0x78a5636f43172f6078a5636f43172f60
+ .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
+ 0x84c87814a1f0ab7284c87814a1f0ab72
+ .octa 0x8cc702081a6439ec8cc702081a6439ec,\
+ 0x8cc702081a6439ec8cc702081a6439ec
+ .octa 0x90befffa23631e2890befffa23631e28,\
+ 0x90befffa23631e2890befffa23631e28
+ .octa 0xa4506cebde82bde9a4506cebde82bde9,\
+ 0xa4506cebde82bde9a4506cebde82bde9
+ .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
+ 0xbef9a3f7b2c67915bef9a3f7b2c67915
+ .octa 0xc67178f2e372532bc67178f2e372532b,\
+ 0xc67178f2e372532bc67178f2e372532b
+ .octa 0xca273eceea26619cca273eceea26619c,\
+ 0xca273eceea26619cca273eceea26619c
+ .octa 0xd186b8c721c0c207d186b8c721c0c207,\
+ 0xd186b8c721c0c207d186b8c721c0c207
+ .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
+ 0xeada7dd6cde0eb1eeada7dd6cde0eb1e
+ .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
+ 0xf57d4f7fee6ed178f57d4f7fee6ed178
+ .octa 0x06f067aa72176fba06f067aa72176fba,\
+ 0x06f067aa72176fba06f067aa72176fba
+ .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
+ 0x0a637dc5a2c898a60a637dc5a2c898a6
+ .octa 0x113f9804bef90dae113f9804bef90dae,\
+ 0x113f9804bef90dae113f9804bef90dae
+ .octa 0x1b710b35131c471b1b710b35131c471b,\
+ 0x1b710b35131c471b1b710b35131c471b
+ .octa 0x28db77f523047d8428db77f523047d84,\
+ 0x28db77f523047d8428db77f523047d84
+ .octa 0x32caab7b40c7249332caab7b40c72493,\
+ 0x32caab7b40c7249332caab7b40c72493
+ .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
+ 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
+ .octa 0x431d67c49c100d4c431d67c49c100d4c,\
+ 0x431d67c49c100d4c431d67c49c100d4c
+ .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
+ 0x4cc5d4becb3e42b64cc5d4becb3e42b6
+ .octa 0x597f299cfc657e2a597f299cfc657e2a,\
+ 0x597f299cfc657e2a597f299cfc657e2a
+ .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
+ 0x5fcb6fab3ad6faec5fcb6fab3ad6faec
+ .octa 0x6c44198c4a4758176c44198c4a475817,\
+ 0x6c44198c4a4758176c44198c4a475817
+
+PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
+ .octa 0x18191a1b1c1d1e1f1011121314151617
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 0b17c83d027d..2b0e2a6825f3 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -346,4 +346,10 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha512-ssse3");
+MODULE_ALIAS_CRYPTO("sha512-avx");
+MODULE_ALIAS_CRYPTO("sha512-avx2");
MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha384-ssse3");
+MODULE_ALIAS_CRYPTO("sha384-avx");
+MODULE_ALIAS_CRYPTO("sha384-avx2");
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index ec138e538c44..a1e71d431fed 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -40,10 +40,10 @@ static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
#ifdef CONFIG_CONTEXT_TRACKING
/* Called on entry from user mode with IRQs off. */
-__visible void enter_from_user_mode(void)
+__visible inline void enter_from_user_mode(void)
{
CT_WARN_ON(ct_state() != CONTEXT_USER);
- user_exit();
+ user_exit_irqoff();
}
#else
static inline void enter_from_user_mode(void) {}
@@ -64,22 +64,16 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
}
/*
- * We can return 0 to resume the syscall or anything else to go to phase
- * 2. If we resume the syscall, we need to put something appropriate in
- * regs->orig_ax.
- *
- * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
- * are fully functional.
- *
- * For phase 2's benefit, our return value is:
- * 0: resume the syscall
- * 1: go to phase 2; no seccomp phase 2 needed
- * anything else: go to phase 2; pass return value to seccomp
+ * Returns the syscall nr to run (which should match regs->orig_ax) or -1
+ * to skip the syscall.
*/
-unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
+static long syscall_trace_enter(struct pt_regs *regs)
{
+ u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
+
struct thread_info *ti = pt_regs_to_thread_info(regs);
unsigned long ret = 0;
+ bool emulated = false;
u32 work;
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
@@ -87,11 +81,19 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
+ if (unlikely(work & _TIF_SYSCALL_EMU))
+ emulated = true;
+
+ if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
+ tracehook_report_syscall_entry(regs))
+ return -1L;
+
+ if (emulated)
+ return -1L;
+
#ifdef CONFIG_SECCOMP
/*
- * Do seccomp first -- it should minimize exposure of other
- * code, and keeping seccomp fast is probably more valuable
- * than the rest of this.
+ * Do seccomp after ptrace, to catch any tracer changes.
*/
if (work & _TIF_SECCOMP) {
struct seccomp_data sd;
@@ -118,69 +120,12 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
sd.args[5] = regs->bp;
}
- BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
- BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
-
- ret = seccomp_phase1(&sd);
- if (ret == SECCOMP_PHASE1_SKIP) {
- regs->orig_ax = -1;
- ret = 0;
- } else if (ret != SECCOMP_PHASE1_OK) {
- return ret; /* Go directly to phase 2 */
- }
-
- work &= ~_TIF_SECCOMP;
- }
-#endif
-
- /* Do our best to finish without phase 2. */
- if (work == 0)
- return ret; /* seccomp and/or nohz only (ret == 0 here) */
-
-#ifdef CONFIG_AUDITSYSCALL
- if (work == _TIF_SYSCALL_AUDIT) {
- /*
- * If there is no more work to be done except auditing,
- * then audit in phase 1. Phase 2 always audits, so, if
- * we audit here, then we can't go on to phase 2.
- */
- do_audit_syscall_entry(regs, arch);
- return 0;
+ ret = __secure_computing(&sd);
+ if (ret == -1)
+ return ret;
}
#endif
- return 1; /* Something is enabled that we can't handle in phase 1 */
-}
-
-/* Returns the syscall nr to run (which should match regs->orig_ax). */
-long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
- unsigned long phase1_result)
-{
- struct thread_info *ti = pt_regs_to_thread_info(regs);
- long ret = 0;
- u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
-
- if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
- BUG_ON(regs != task_pt_regs(current));
-
-#ifdef CONFIG_SECCOMP
- /*
- * Call seccomp_phase2 before running the other hooks so that
- * they can see any changes made by a seccomp tracer.
- */
- if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
- /* seccomp failures shouldn't expose any additional code. */
- return -1;
- }
-#endif
-
- if (unlikely(work & _TIF_SYSCALL_EMU))
- ret = -1L;
-
- if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
- tracehook_report_syscall_entry(regs))
- ret = -1L;
-
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->orig_ax);
@@ -189,17 +134,6 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
return ret ?: regs->orig_ax;
}
-long syscall_trace_enter(struct pt_regs *regs)
-{
- u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
- unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
-
- if (phase1_result == 0)
- return regs->orig_ax;
- else
- return syscall_trace_enter_phase2(regs, arch, phase1_result);
-}
-
#define EXIT_TO_USERMODE_LOOP_FLAGS \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
@@ -274,7 +208,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
ti->status &= ~TS_COMPAT;
#endif
- user_enter();
+ user_enter_irqoff();
}
#define SYSCALL_EXIT_WORK_FLAGS \
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 983e5d3a0d27..0b56666e6039 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -1153,3 +1153,14 @@ ENTRY(async_page_fault)
jmp error_code
END(async_page_fault)
#endif
+
+ENTRY(rewind_stack_do_exit)
+ /* Prevent any naive code from trying to unwind to our caller. */
+ xorl %ebp, %ebp
+
+ movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
+ leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
+
+ call do_exit
+1: jmp 1b
+END(rewind_stack_do_exit)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 9ee0da1807ed..b846875aeea6 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1423,3 +1423,14 @@ ENTRY(ignore_sysret)
mov $-ENOSYS, %eax
sysret
END(ignore_sysret)
+
+ENTRY(rewind_stack_do_exit)
+ /* Prevent any naive code from trying to unwind to our caller. */
+ xorl %ebp, %ebp
+
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
+ leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
+
+ call do_exit
+1: jmp 1b
+END(rewind_stack_do_exit)
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 555263e385c9..e9ce9c7c39b4 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -374,5 +374,5 @@
543 x32 io_setup compat_sys_io_setup
544 x32 io_submit compat_sys_io_submit
545 x32 execveat compat_sys_execveat/ptregs
-534 x32 preadv2 compat_sys_preadv2
-535 x32 pwritev2 compat_sys_pwritev2
+546 x32 preadv2 compat_sys_preadv64v2
+547 x32 pwritev2 compat_sys_pwritev64v2
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index 027aec4a74df..627ecbcb2e62 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -33,7 +33,7 @@
.endif
call \func
- jmp restore
+ jmp .L_restore
_ASM_NOKPROBE(\name)
.endm
@@ -54,7 +54,7 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPT)
-restore:
+.L_restore:
popq %r11
popq %r10
popq %r9
@@ -66,5 +66,5 @@ restore:
popq %rdi
popq %rbp
ret
- _ASM_NOKPROBE(restore)
+ _ASM_NOKPROBE(.L_restore)
#endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 253b72eaade6..6ba89a1ab0e5 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -55,7 +55,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
-HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/$(SUBARCH)/include/uapi
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
@@ -134,7 +134,7 @@ VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
targets += vdso32/vdso32.lds
-targets += vdso32/note.o vdso32/vclock_gettime.o vdso32/system_call.o
+targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
targets += vdso32/vclock_gettime.o
KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
@@ -156,7 +156,8 @@ $(obj)/vdso32.so.dbg: FORCE \
$(obj)/vdso32/vdso32.lds \
$(obj)/vdso32/vclock_gettime.o \
$(obj)/vdso32/note.o \
- $(obj)/vdso32/system_call.o
+ $(obj)/vdso32/system_call.o \
+ $(obj)/vdso32/sigreturn.o
$(call if_changed,vdso)
#
diff --git a/arch/x86/entry/vdso/vdso32/sigreturn.S b/arch/x86/entry/vdso/vdso32/sigreturn.S
index d7ec4e251c0a..20633e026e82 100644
--- a/arch/x86/entry/vdso/vdso32/sigreturn.S
+++ b/arch/x86/entry/vdso/vdso32/sigreturn.S
@@ -1,11 +1,3 @@
-/*
- * Common code for the sigreturn entry points in vDSO images.
- * So far this code is the same for both int80 and sysenter versions.
- * This file is #include'd by int80.S et al to define them first thing.
- * The kernel assumes that the addresses of these routines are constant
- * for all vDSO implementations.
- */
-
#include <linux/linkage.h>
#include <asm/unistd_32.h>
#include <asm/asm-offsets.h>
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
index 0109ac6cb79c..ed4bc9731cbb 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -2,16 +2,11 @@
* AT_SYSINFO entry point
*/
+#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
-/*
- * First get the common code for the sigreturn entry points.
- * This must come first.
- */
-#include "sigreturn.S"
-
.text
.globl __kernel_vsyscall
.type __kernel_vsyscall,@function
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index ab220ac9b3b9..f840766659a8 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -12,6 +12,7 @@
#include <linux/random.h>
#include <linux/elf.h>
#include <linux/cpu.h>
+#include <linux/ptrace.h>
#include <asm/pvclock.h>
#include <asm/vgtod.h>
#include <asm/proto.h>
@@ -97,10 +98,40 @@ static int vdso_fault(const struct vm_special_mapping *sm,
return 0;
}
-static const struct vm_special_mapping text_mapping = {
- .name = "[vdso]",
- .fault = vdso_fault,
-};
+static void vdso_fix_landing(const struct vdso_image *image,
+ struct vm_area_struct *new_vma)
+{
+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
+ if (in_ia32_syscall() && image == &vdso_image_32) {
+ struct pt_regs *regs = current_pt_regs();
+ unsigned long vdso_land = image->sym_int80_landing_pad;
+ unsigned long old_land_addr = vdso_land +
+ (unsigned long)current->mm->context.vdso;
+
+ /* Fixing userspace landing - look at do_fast_syscall_32 */
+ if (regs->ip == old_land_addr)
+ regs->ip = new_vma->vm_start + vdso_land;
+ }
+#endif
+}
+
+static int vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+ const struct vdso_image *image = current->mm->context.vdso_image;
+
+ if (image->size != new_size)
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
+ return -EFAULT;
+
+ vdso_fix_landing(image, new_vma);
+ current->mm->context.vdso = (void __user *)new_vma->vm_start;
+
+ return 0;
+}
static int vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -151,6 +182,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
struct vm_area_struct *vma;
unsigned long addr, text_start;
int ret = 0;
+
+ static const struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ .fault = vdso_fault,
+ .mremap = vdso_mremap,
+ };
static const struct vm_special_mapping vvar_mapping = {
.name = "[vvar]",
.fault = vvar_fault,
@@ -185,7 +222,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
image->size,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &text_mapping);
+ &vdso_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
@@ -294,15 +331,9 @@ static void vgetcpu_cpu_init(void *arg)
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
}
-static int
-vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
+static int vgetcpu_online(unsigned int cpu)
{
- long cpu = (long)arg;
-
- if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
- smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
-
- return NOTIFY_DONE;
+ return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
}
static int __init init_vdso(void)
@@ -313,15 +344,9 @@ static int __init init_vdso(void)
init_vdso_image(&vdso_image_x32);
#endif
- cpu_notifier_register_begin();
-
- on_each_cpu(vgetcpu_cpu_init, NULL, 1);
/* notifier priority > KVM */
- __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
-
- cpu_notifier_register_done();
-
- return 0;
+ return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
+ "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
}
subsys_initcall(init_vdso);
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 174c2549939d..636c4b341f36 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -96,7 +96,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
/*
* XXX: if access_ok, get_user, and put_user handled
- * sig_on_uaccess_error, this could go away.
+ * sig_on_uaccess_err, this could go away.
*/
if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
@@ -125,7 +125,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
struct task_struct *tsk;
unsigned long caller;
int vsyscall_nr, syscall_nr, tmp;
- int prev_sig_on_uaccess_error;
+ int prev_sig_on_uaccess_err;
long ret;
/*
@@ -207,7 +207,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
*/
regs->orig_ax = syscall_nr;
regs->ax = -ENOSYS;
- tmp = secure_computing();
+ tmp = secure_computing(NULL);
if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
warn_bad_vsyscall(KERN_DEBUG, regs,
"seccomp tried to change syscall nr or ip");
@@ -221,8 +221,8 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
* With a real vsyscall, page faults cause SIGSEGV. We want to
* preserve that behavior to make writing exploits harder.
*/
- prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
- current_thread_info()->sig_on_uaccess_error = 1;
+ prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
+ current->thread.sig_on_uaccess_err = 1;
ret = -EFAULT;
switch (vsyscall_nr) {
@@ -243,7 +243,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
break;
}
- current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
+ current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
check_fault:
if (ret == -EFAULT) {
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index bd3e8421b57c..e07a22bb9308 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -370,13 +370,13 @@ static int amd_pmu_cpu_prepare(int cpu)
WARN_ON_ONCE(cpuc->amd_nb);
if (!x86_pmu.amd_nb_constraints)
- return NOTIFY_OK;
+ return 0;
cpuc->amd_nb = amd_alloc_nb(cpu);
if (!cpuc->amd_nb)
- return NOTIFY_BAD;
+ return -ENOMEM;
- return NOTIFY_OK;
+ return 0;
}
static void amd_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index feb90f6730e8..b26ee32f73e8 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -7,7 +7,8 @@
*/
#include <linux/perf_event.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
#include <linux/syscore_ops.h>
@@ -655,8 +656,12 @@ fail:
}
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
- raw.size = sizeof(u32) + ibs_data.size;
- raw.data = ibs_data.data;
+ raw = (struct perf_raw_record){
+ .frag = {
+ .size = sizeof(u32) + ibs_data.size,
+ .data = ibs_data.data,
+ },
+ };
data.raw = &raw;
}
@@ -721,13 +726,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
return ret;
}
-static __init int perf_event_ibs_init(void)
+static __init void perf_event_ibs_init(void)
{
struct attribute **attr = ibs_op_format_attrs;
- if (!ibs_caps)
- return -ENODEV; /* ibs not supported by the cpu */
-
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
if (ibs_caps & IBS_CAPS_OPCNT) {
@@ -738,13 +740,11 @@ static __init int perf_event_ibs_init(void)
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
-
- return 0;
}
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
-static __init int perf_event_ibs_init(void) { return 0; }
+static __init void perf_event_ibs_init(void) { }
#endif
@@ -921,7 +921,7 @@ static inline int get_ibs_lvt_offset(void)
return val & IBSCTL_LVT_OFFSET_MASK;
}
-static void setup_APIC_ibs(void *dummy)
+static void setup_APIC_ibs(void)
{
int offset;
@@ -936,7 +936,7 @@ failed:
smp_processor_id());
}
-static void clear_APIC_ibs(void *dummy)
+static void clear_APIC_ibs(void)
{
int offset;
@@ -945,18 +945,24 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
+static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
+{
+ setup_APIC_ibs();
+ return 0;
+}
+
#ifdef CONFIG_PM
static int perf_ibs_suspend(void)
{
- clear_APIC_ibs(NULL);
+ clear_APIC_ibs();
return 0;
}
static void perf_ibs_resume(void)
{
ibs_eilvt_setup();
- setup_APIC_ibs(NULL);
+ setup_APIC_ibs();
}
static struct syscore_ops perf_ibs_syscore_ops = {
@@ -975,27 +981,15 @@ static inline void perf_ibs_pm_init(void) { }
#endif
-static int
-perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- setup_APIC_ibs(NULL);
- break;
- case CPU_DYING:
- clear_APIC_ibs(NULL);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
+ clear_APIC_ibs();
+ return 0;
}
static __init int amd_ibs_init(void)
{
u32 caps;
- int ret = -EINVAL;
caps = __get_ibs_caps();
if (!caps)
@@ -1004,22 +998,25 @@ static __init int amd_ibs_init(void)
ibs_eilvt_setup();
if (!ibs_eilvt_valid())
- goto out;
+ return -EINVAL;
perf_ibs_pm_init();
- cpu_notifier_register_begin();
+
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
smp_mb();
- smp_call_function(setup_APIC_ibs, NULL, 1);
- __perf_cpu_notifier(perf_ibs_cpu_notifier);
- cpu_notifier_register_done();
+ /*
+ * x86_pmu_amd_ibs_starting_cpu will be called from core on
+ * all online cpus.
+ */
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
+ "AP_PERF_X86_AMD_IBS_STARTING",
+ x86_pmu_amd_ibs_starting_cpu,
+ x86_pmu_amd_ibs_dying_cpu);
- ret = perf_event_ibs_init();
-out:
- if (ret)
- pr_err("Failed to setup IBS, %d\n", ret);
- return ret;
+ perf_event_ibs_init();
+
+ return 0;
}
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index 6011a573dd64..b28200dea715 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -12,7 +12,7 @@
*/
#include <linux/perf_event.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/slab.h>
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 55a3529dbf12..9842270ed2f2 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -228,12 +228,12 @@ static struct pmu pmu_class = {
.read = pmu_event_read,
};
-static void power_cpu_exit(int cpu)
+static int power_cpu_exit(unsigned int cpu)
{
int target;
if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
- return;
+ return 0;
/*
* Find a new CPU on the same compute unit, if was set in cpumask
@@ -245,9 +245,10 @@ static void power_cpu_exit(int cpu)
cpumask_set_cpu(target, &cpu_mask);
perf_pmu_migrate_context(&pmu_class, cpu, target);
}
+ return 0;
}
-static void power_cpu_init(int cpu)
+static int power_cpu_init(unsigned int cpu)
{
int target;
@@ -255,7 +256,7 @@ static void power_cpu_init(int cpu)
* 1) If any CPU is set at cpu_mask in the same compute unit, do
* nothing.
* 2) If no CPU is set at cpu_mask in the same compute unit,
- * set current STARTING CPU.
+ * set current ONLINE CPU.
*
* Note: if there is a CPU aside of the new one already in the
* sibling mask, then it is also in cpu_mask.
@@ -263,33 +264,9 @@ static void power_cpu_init(int cpu)
target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
if (target >= nr_cpumask_bits)
cpumask_set_cpu(cpu, &cpu_mask);
+ return 0;
}
-static int
-power_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_FAILED:
- case CPU_STARTING:
- power_cpu_init(cpu);
- break;
- case CPU_DOWN_PREPARE:
- power_cpu_exit(cpu);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block power_cpu_notifier_nb = {
- .notifier_call = power_cpu_notifier,
- .priority = CPU_PRI_PERF,
-};
-
static const struct x86_cpu_id cpu_match[] = {
{ .vendor = X86_VENDOR_AMD, .family = 0x15 },
{},
@@ -297,7 +274,7 @@ static const struct x86_cpu_id cpu_match[] = {
static int __init amd_power_pmu_init(void)
{
- int cpu, target, ret;
+ int ret;
if (!x86_match_cpu(cpu_match))
return 0;
@@ -312,38 +289,25 @@ static int __init amd_power_pmu_init(void)
return -ENODEV;
}
- cpu_notifier_register_begin();
- /* Choose one online core of each compute unit. */
- for_each_online_cpu(cpu) {
- target = cpumask_first(topology_sibling_cpumask(cpu));
- if (!cpumask_test_cpu(target, &cpu_mask))
- cpumask_set_cpu(target, &cpu_mask);
- }
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
+ "AP_PERF_X86_AMD_POWER_ONLINE",
+ power_cpu_init, power_cpu_exit);
ret = perf_pmu_register(&pmu_class, "power", -1);
if (WARN_ON(ret)) {
pr_warn("AMD Power PMU registration failed\n");
- goto out;
+ return ret;
}
- __register_cpu_notifier(&power_cpu_notifier_nb);
-
pr_info("AMD Power PMU detected\n");
-
-out:
- cpu_notifier_register_done();
-
return ret;
}
module_init(amd_power_pmu_init);
static void __exit amd_power_pmu_exit(void)
{
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&power_cpu_notifier_nb);
- cpu_notifier_register_done();
-
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE);
perf_pmu_unregister(&pmu_class);
}
module_exit(amd_power_pmu_exit);
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 98ac57381bf9..e6131d4454e6 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -358,7 +358,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
return this;
}
-static void amd_uncore_cpu_starting(unsigned int cpu)
+static int amd_uncore_cpu_starting(unsigned int cpu)
{
unsigned int eax, ebx, ecx, edx;
struct amd_uncore *uncore;
@@ -384,6 +384,8 @@ static void amd_uncore_cpu_starting(unsigned int cpu)
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
}
+
+ return 0;
}
static void uncore_online(unsigned int cpu,
@@ -398,13 +400,15 @@ static void uncore_online(unsigned int cpu,
cpumask_set_cpu(cpu, uncore->active_mask);
}
-static void amd_uncore_cpu_online(unsigned int cpu)
+static int amd_uncore_cpu_online(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_online(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_online(cpu, amd_uncore_l2);
+
+ return 0;
}
static void uncore_down_prepare(unsigned int cpu,
@@ -433,13 +437,15 @@ static void uncore_down_prepare(unsigned int cpu,
}
}
-static void amd_uncore_cpu_down_prepare(unsigned int cpu)
+static int amd_uncore_cpu_down_prepare(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_down_prepare(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_down_prepare(cpu, amd_uncore_l2);
+
+ return 0;
}
static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
@@ -454,74 +460,19 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
*per_cpu_ptr(uncores, cpu) = NULL;
}
-static void amd_uncore_cpu_dead(unsigned int cpu)
+static int amd_uncore_cpu_dead(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_dead(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_dead(cpu, amd_uncore_l2);
-}
-
-static int
-amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- if (amd_uncore_cpu_up_prepare(cpu))
- return notifier_from_errno(-ENOMEM);
- break;
-
- case CPU_STARTING:
- amd_uncore_cpu_starting(cpu);
- break;
-
- case CPU_ONLINE:
- amd_uncore_cpu_online(cpu);
- break;
-
- case CPU_DOWN_PREPARE:
- amd_uncore_cpu_down_prepare(cpu);
- break;
-
- case CPU_UP_CANCELED:
- case CPU_DEAD:
- amd_uncore_cpu_dead(cpu);
- break;
-
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block amd_uncore_cpu_notifier_block = {
- .notifier_call = amd_uncore_cpu_notifier,
- .priority = CPU_PRI_PERF + 1,
-};
-
-static void __init init_cpu_already_online(void *dummy)
-{
- unsigned int cpu = smp_processor_id();
-
- amd_uncore_cpu_starting(cpu);
- amd_uncore_cpu_online(cpu);
-}
-static void cleanup_cpu_online(void *dummy)
-{
- unsigned int cpu = smp_processor_id();
-
- amd_uncore_cpu_dead(cpu);
+ return 0;
}
static int __init amd_uncore_init(void)
{
- unsigned int cpu, cpu2;
int ret = -ENODEV;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
@@ -558,38 +509,29 @@ static int __init amd_uncore_init(void)
ret = 0;
}
- if (ret)
- goto fail_nodev;
-
- cpu_notifier_register_begin();
-
- /* init cpus already online before registering for hotplug notifier */
- for_each_online_cpu(cpu) {
- ret = amd_uncore_cpu_up_prepare(cpu);
- if (ret)
- goto fail_online;
- smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
- }
-
- __register_cpu_notifier(&amd_uncore_cpu_notifier_block);
- cpu_notifier_register_done();
-
+ /*
+ * Install callbacks. Core will call them for each online cpu.
+ */
+ if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
+ "PERF_X86_AMD_UNCORE_PREP",
+ amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
+ goto fail_l2;
+
+ if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
+ "AP_PERF_X86_AMD_UNCORE_STARTING",
+ amd_uncore_cpu_starting, NULL))
+ goto fail_prep;
+ if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
+ "AP_PERF_X86_AMD_UNCORE_ONLINE",
+ amd_uncore_cpu_online,
+ amd_uncore_cpu_down_prepare))
+ goto fail_start;
return 0;
-
-fail_online:
- for_each_online_cpu(cpu2) {
- if (cpu2 == cpu)
- break;
- smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
- }
- cpu_notifier_register_done();
-
- /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
- amd_uncore_nb = amd_uncore_l2 = NULL;
-
- if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
- perf_pmu_unregister(&amd_l2_pmu);
+fail_start:
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
+fail_prep:
+ cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
fail_l2:
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
perf_pmu_unregister(&amd_nb_pmu);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 33787ee817f0..fad97886d8b1 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -17,7 +17,8 @@
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -263,7 +264,7 @@ static bool check_hw_exists(void)
msr_fail:
pr_cont("Broken PMU hardware detected, using software events only.\n");
- pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+ printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
reg, val_new);
@@ -1477,49 +1478,49 @@ NOKPROBE_SYMBOL(perf_event_nmi_handler);
struct event_constraint emptyconstraint;
struct event_constraint unconstrained;
-static int
-x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int x86_pmu_prepare_cpu(unsigned int cpu)
{
- unsigned int cpu = (long)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- int i, ret = NOTIFY_OK;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
- cpuc->kfree_on_online[i] = NULL;
- if (x86_pmu.cpu_prepare)
- ret = x86_pmu.cpu_prepare(cpu);
- break;
-
- case CPU_STARTING:
- if (x86_pmu.cpu_starting)
- x86_pmu.cpu_starting(cpu);
- break;
+ int i;
- case CPU_ONLINE:
- for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
- kfree(cpuc->kfree_on_online[i]);
- cpuc->kfree_on_online[i] = NULL;
- }
- break;
+ for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
+ cpuc->kfree_on_online[i] = NULL;
+ if (x86_pmu.cpu_prepare)
+ return x86_pmu.cpu_prepare(cpu);
+ return 0;
+}
- case CPU_DYING:
- if (x86_pmu.cpu_dying)
- x86_pmu.cpu_dying(cpu);
- break;
+static int x86_pmu_dead_cpu(unsigned int cpu)
+{
+ if (x86_pmu.cpu_dead)
+ x86_pmu.cpu_dead(cpu);
+ return 0;
+}
- case CPU_UP_CANCELED:
- case CPU_DEAD:
- if (x86_pmu.cpu_dead)
- x86_pmu.cpu_dead(cpu);
- break;
+static int x86_pmu_online_cpu(unsigned int cpu)
+{
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ int i;
- default:
- break;
+ for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
+ kfree(cpuc->kfree_on_online[i]);
+ cpuc->kfree_on_online[i] = NULL;
}
+ return 0;
+}
- return ret;
+static int x86_pmu_starting_cpu(unsigned int cpu)
+{
+ if (x86_pmu.cpu_starting)
+ x86_pmu.cpu_starting(cpu);
+ return 0;
+}
+
+static int x86_pmu_dying_cpu(unsigned int cpu)
+{
+ if (x86_pmu.cpu_dying)
+ x86_pmu.cpu_dying(cpu);
+ return 0;
}
static void __init pmu_check_apic(void)
@@ -1622,6 +1623,29 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, cha
}
EXPORT_SYMBOL_GPL(events_sysfs_show);
+ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_ht_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_ht_attr, attr);
+
+ /*
+ * Report conditional events depending on Hyper-Threading.
+ *
+ * This is overly conservative as usually the HT special
+ * handling is not needed if the other CPU thread is idle.
+ *
+ * Note this does not (and cannot) handle the case when thread
+ * siblings are invisible, for example with virtualization
+ * if they are owned by some other guest. The user tool
+ * has to re-read when a thread sibling gets onlined later.
+ */
+ return sprintf(page, "%s",
+ topology_max_smt_threads() > 1 ?
+ pmu_attr->event_str_ht :
+ pmu_attr->event_str_noht);
+}
+
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
EVENT_ATTR(instructions, INSTRUCTIONS );
EVENT_ATTR(cache-references, CACHE_REFERENCES );
@@ -1764,10 +1788,39 @@ static int __init init_hw_perf_events(void)
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
- perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
- perf_cpu_notifier(x86_pmu_notifier);
+ /*
+ * Install callbacks. Core will call them for each online
+ * cpu.
+ */
+ err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
+ x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
+ if (err)
+ return err;
+
+ err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
+ "AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
+ x86_pmu_dying_cpu);
+ if (err)
+ goto out;
+
+ err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
+ x86_pmu_online_cpu, NULL);
+ if (err)
+ goto out1;
+
+ err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+ if (err)
+ goto out2;
return 0;
+
+out2:
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
+out1:
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
+out:
+ cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
+ return err;
}
early_initcall(init_hw_perf_events);
@@ -2319,7 +2372,7 @@ void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stack_frame frame;
- const void __user *fp;
+ const unsigned long __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
@@ -2332,7 +2385,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
return;
- fp = (void __user *)regs->bp;
+ fp = (unsigned long __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
@@ -2345,16 +2398,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
pagefault_disable();
while (entry->nr < entry->max_stack) {
unsigned long bytes;
+
frame.next_frame = NULL;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, 16))
+ if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
break;
- bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
+ bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
if (bytes != 0)
break;
- bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
+ bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
if (bytes != 0)
break;
diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile
index 3660b2cf245a..06c2baa51814 100644
--- a/arch/x86/events/intel/Makefile
+++ b/arch/x86/events/intel/Makefile
@@ -1,8 +1,8 @@
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
-obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
-intel-rapl-objs := rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
+intel-rapl-perf-objs := rapl.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 7c666958a625..2cbde2f449aa 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -16,6 +16,7 @@
#include <asm/cpufeature.h>
#include <asm/hardirq.h>
+#include <asm/intel-family.h>
#include <asm/apic.h>
#include "../perf_event.h"
@@ -115,6 +116,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -139,6 +144,10 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -177,19 +186,27 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
EVENT_CONSTRAINT_END
};
-struct event_constraint intel_skl_event_constraints[] = {
+static struct event_constraint intel_skl_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
+
+ /*
+ * when HT is off, these can only run on the bottom 4 counters
+ */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
+
EVENT_CONSTRAINT_END
};
static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
- INTEL_UEVENT_EXTRA_REG(0x01b7,
- MSR_OFFCORE_RSP_0, 0x7f9ffbffffull, RSP_0),
- INTEL_UEVENT_EXTRA_REG(0x02b7,
- MSR_OFFCORE_RSP_1, 0x3f9ffbffffull, RSP_1),
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
EVENT_EXTRA_END
};
@@ -225,14 +242,51 @@ EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
-struct attribute *nhm_events_attrs[] = {
+static struct attribute *nhm_events_attrs[] = {
EVENT_PTR(mem_ld_nhm),
NULL,
};
-struct attribute *snb_events_attrs[] = {
+/*
+ * topdown events for Intel Core CPUs.
+ *
+ * The events are all in slots, which is a free slot in a 4 wide
+ * pipeline. Some events are already reported in slots, for cycle
+ * events we multiply by the pipeline width (4).
+ *
+ * With Hyper Threading on, topdown metrics are either summed or averaged
+ * between the threads of a core: (count_t0 + count_t1).
+ *
+ * For the average case the metric is always scaled to pipeline width,
+ * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
+ */
+
+EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
+ "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
+ "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
+EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
+EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
+ "event=0xe,umask=0x1"); /* uops_issued.any */
+EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
+ "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
+EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
+ "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
+EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
+ "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
+ "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
+EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
+ "4", "2");
+
+static struct attribute *snb_events_attrs[] = {
EVENT_PTR(mem_ld_snb),
EVENT_PTR(mem_st_snb),
+ EVENT_PTR(td_slots_issued),
+ EVENT_PTR(td_slots_retired),
+ EVENT_PTR(td_fetch_bubbles),
+ EVENT_PTR(td_total_slots),
+ EVENT_PTR(td_total_slots_scale),
+ EVENT_PTR(td_recovery_bubbles),
+ EVENT_PTR(td_recovery_bubbles_scale),
NULL,
};
@@ -250,6 +304,10 @@ static struct event_constraint intel_hsw_event_constraints[] = {
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -258,12 +316,19 @@ static struct event_constraint intel_hsw_event_constraints[] = {
EVENT_CONSTRAINT_END
};
-struct event_constraint intel_bdw_event_constraints[] = {
+static struct event_constraint intel_bdw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
+ /*
+ * when HT is off, these can only run on the bottom 4 counters
+ */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
EVENT_CONSTRAINT_END
};
@@ -1332,6 +1397,29 @@ static __initconst const u64 atom_hw_cache_event_ids
},
};
+EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
+EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
+/* no_alloc_cycles.not_delivered */
+EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
+ "event=0xca,umask=0x50");
+EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
+/* uops_retired.all */
+EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
+ "event=0xc2,umask=0x10");
+/* uops_retired.all */
+EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
+ "event=0xc2,umask=0x10");
+
+static struct attribute *slm_events_attrs[] = {
+ EVENT_PTR(td_total_slots_slm),
+ EVENT_PTR(td_total_slots_scale_slm),
+ EVENT_PTR(td_fetch_bubbles_slm),
+ EVENT_PTR(td_fetch_bubbles_scale_slm),
+ EVENT_PTR(td_slots_issued_slm),
+ EVENT_PTR(td_slots_retired_slm),
+ NULL
+};
+
static struct extra_reg intel_slm_extra_regs[] __read_mostly =
{
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
@@ -3021,7 +3109,7 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->excl_thread_id = 0;
}
- return NOTIFY_OK;
+ return 0;
err_constraint_list:
kfree(cpuc->constraint_list);
@@ -3032,7 +3120,7 @@ err_shared_regs:
cpuc->shared_regs = NULL;
err:
- return NOTIFY_BAD;
+ return -ENOMEM;
}
static void intel_pmu_cpu_starting(int cpu)
@@ -3261,11 +3349,11 @@ static int intel_snb_pebs_broken(int cpu)
u32 rev = UINT_MAX; /* default to broken for unknown models */
switch (cpu_data(cpu).x86_model) {
- case 42: /* SNB */
+ case INTEL_FAM6_SANDYBRIDGE:
rev = 0x28;
break;
- case 45: /* SNB-EP */
+ case INTEL_FAM6_SANDYBRIDGE_X:
switch (cpu_data(cpu).x86_mask) {
case 6: rev = 0x618; break;
case 7: rev = 0x70c; break;
@@ -3302,6 +3390,13 @@ static void intel_snb_check_microcode(void)
}
}
+static bool is_lbr_from(unsigned long msr)
+{
+ unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
+
+ return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
+}
+
/*
* Under certain circumstances, access certain MSR may cause #GP.
* The function tests if the input MSR can be safely accessed.
@@ -3322,13 +3417,24 @@ static bool check_msr(unsigned long msr, u64 mask)
* Only change the bits which can be updated by wrmsrl.
*/
val_tmp = val_old ^ mask;
+
+ if (is_lbr_from(msr))
+ val_tmp = lbr_from_signext_quirk_wr(val_tmp);
+
if (wrmsrl_safe(msr, val_tmp) ||
rdmsrl_safe(msr, &val_new))
return false;
+ /*
+ * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
+ * should equal rdmsrl()'s even with the quirk.
+ */
if (val_new != val_tmp)
return false;
+ if (is_lbr_from(msr))
+ val_old = lbr_from_signext_quirk_wr(val_old);
+
/* Here it's sure that the MSR can be safely accessed.
* Restore the old value and return.
*/
@@ -3437,6 +3543,13 @@ static struct attribute *hsw_events_attrs[] = {
EVENT_PTR(cycles_ct),
EVENT_PTR(mem_ld_hsw),
EVENT_PTR(mem_st_hsw),
+ EVENT_PTR(td_slots_issued),
+ EVENT_PTR(td_slots_retired),
+ EVENT_PTR(td_fetch_bubbles),
+ EVENT_PTR(td_total_slots),
+ EVENT_PTR(td_total_slots_scale),
+ EVENT_PTR(td_recovery_bubbles),
+ EVENT_PTR(td_recovery_bubbles_scale),
NULL
};
@@ -3508,15 +3621,15 @@ __init int intel_pmu_init(void)
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_model) {
- case 14: /* 65nm Core "Yonah" */
+ case INTEL_FAM6_CORE_YONAH:
pr_cont("Core events, ");
break;
- case 15: /* 65nm Core2 "Merom" */
+ case INTEL_FAM6_CORE2_MEROM:
x86_add_quirk(intel_clovertown_quirk);
- case 22: /* 65nm Core2 "Merom-L" */
- case 23: /* 45nm Core2 "Penryn" */
- case 29: /* 45nm Core2 "Dunnington (MP) */
+ case INTEL_FAM6_CORE2_MEROM_L:
+ case INTEL_FAM6_CORE2_PENRYN:
+ case INTEL_FAM6_CORE2_DUNNINGTON:
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -3527,9 +3640,9 @@ __init int intel_pmu_init(void)
pr_cont("Core2 events, ");
break;
- case 30: /* 45nm Nehalem */
- case 26: /* 45nm Nehalem-EP */
- case 46: /* 45nm Nehalem-EX */
+ case INTEL_FAM6_NEHALEM:
+ case INTEL_FAM6_NEHALEM_EP:
+ case INTEL_FAM6_NEHALEM_EX:
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -3557,11 +3670,11 @@ __init int intel_pmu_init(void)
pr_cont("Nehalem events, ");
break;
- case 28: /* 45nm Atom "Pineview" */
- case 38: /* 45nm Atom "Lincroft" */
- case 39: /* 32nm Atom "Penwell" */
- case 53: /* 32nm Atom "Cloverview" */
- case 54: /* 32nm Atom "Cedarview" */
+ case INTEL_FAM6_ATOM_PINEVIEW:
+ case INTEL_FAM6_ATOM_LINCROFT:
+ case INTEL_FAM6_ATOM_PENWELL:
+ case INTEL_FAM6_ATOM_CLOVERVIEW:
+ case INTEL_FAM6_ATOM_CEDARVIEW:
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -3573,9 +3686,9 @@ __init int intel_pmu_init(void)
pr_cont("Atom events, ");
break;
- case 55: /* 22nm Atom "Silvermont" */
- case 76: /* 14nm Atom "Airmont" */
- case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
+ case INTEL_FAM6_ATOM_SILVERMONT1:
+ case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_AIRMONT:
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -3587,11 +3700,12 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
x86_pmu.extra_regs = intel_slm_extra_regs;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.cpu_events = slm_events_attrs;
pr_cont("Silvermont events, ");
break;
- case 92: /* 14nm Atom "Goldmont" */
- case 95: /* 14nm Atom "Goldmont Denverton" */
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_DENVERTON:
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -3614,9 +3728,9 @@ __init int intel_pmu_init(void)
pr_cont("Goldmont events, ");
break;
- case 37: /* 32nm Westmere */
- case 44: /* 32nm Westmere-EP */
- case 47: /* 32nm Westmere-EX */
+ case INTEL_FAM6_WESTMERE:
+ case INTEL_FAM6_WESTMERE_EP:
+ case INTEL_FAM6_WESTMERE_EX:
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -3643,8 +3757,8 @@ __init int intel_pmu_init(void)
pr_cont("Westmere events, ");
break;
- case 42: /* 32nm SandyBridge */
- case 45: /* 32nm SandyBridge-E/EN/EP */
+ case INTEL_FAM6_SANDYBRIDGE:
+ case INTEL_FAM6_SANDYBRIDGE_X:
x86_add_quirk(intel_sandybridge_quirk);
x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
@@ -3657,7 +3771,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- if (boot_cpu_data.x86_model == 45)
+ if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
x86_pmu.extra_regs = intel_snbep_extra_regs;
else
x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -3679,8 +3793,8 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, ");
break;
- case 58: /* 22nm IvyBridge */
- case 62: /* 22nm IvyBridge-EP/EX */
+ case INTEL_FAM6_IVYBRIDGE:
+ case INTEL_FAM6_IVYBRIDGE_X:
x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -3696,7 +3810,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
x86_pmu.pebs_prec_dist = true;
- if (boot_cpu_data.x86_model == 62)
+ if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
x86_pmu.extra_regs = intel_snbep_extra_regs;
else
x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -3714,10 +3828,10 @@ __init int intel_pmu_init(void)
break;
- case 60: /* 22nm Haswell Core */
- case 63: /* 22nm Haswell Server */
- case 69: /* 22nm Haswell ULT */
- case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+ case INTEL_FAM6_HASWELL_CORE:
+ case INTEL_FAM6_HASWELL_X:
+ case INTEL_FAM6_HASWELL_ULT:
+ case INTEL_FAM6_HASWELL_GT3E:
x86_add_quirk(intel_ht_bug);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -3741,10 +3855,10 @@ __init int intel_pmu_init(void)
pr_cont("Haswell events, ");
break;
- case 61: /* 14nm Broadwell Core-M */
- case 86: /* 14nm Broadwell Xeon D */
- case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
- case 79: /* 14nm Broadwell Server */
+ case INTEL_FAM6_BROADWELL_CORE:
+ case INTEL_FAM6_BROADWELL_XEON_D:
+ case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_BROADWELL_X:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -3777,7 +3891,7 @@ __init int intel_pmu_init(void)
pr_cont("Broadwell events, ");
break;
- case 87: /* Knights Landing Xeon Phi */
+ case INTEL_FAM6_XEON_PHI_KNL:
memcpy(hw_cache_event_ids,
slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs,
@@ -3795,16 +3909,22 @@ __init int intel_pmu_init(void)
pr_cont("Knights Landing events, ");
break;
- case 142: /* 14nm Kabylake Mobile */
- case 158: /* 14nm Kabylake Desktop */
- case 78: /* 14nm Skylake Mobile */
- case 94: /* 14nm Skylake Desktop */
- case 85: /* 14nm Skylake Server */
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_skl();
+ /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
+ event_attr_td_recovery_bubbles.event_str_noht =
+ "event=0xd,umask=0x1,cmask=1";
+ event_attr_td_recovery_bubbles.event_str_ht =
+ "event=0xd,umask=0x1,cmask=1,any=1";
+
x86_pmu.event_constraints = intel_skl_event_constraints;
x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
x86_pmu.extra_regs = intel_skl_extra_regs;
@@ -3885,6 +4005,8 @@ __init int intel_pmu_init(void)
x86_pmu.lbr_nr = 0;
}
+ if (x86_pmu.lbr_nr)
+ pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
/*
* Access extra MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support offcore event
@@ -3917,16 +4039,14 @@ __init int intel_pmu_init(void)
*/
static __init int fixup_ht_bug(void)
{
- int cpu = smp_processor_id();
- int w, c;
+ int c;
/*
* problem not present on this CPU model, nothing to do
*/
if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
return 0;
- w = cpumask_weight(topology_sibling_cpumask(cpu));
- if (w > 1) {
+ if (topology_max_smt_threads() > 1) {
pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
return 0;
}
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 7b5fd811ef45..783c49ddef29 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -1577,7 +1577,7 @@ static inline void cqm_pick_event_reader(int cpu)
cpumask_set_cpu(cpu, &cqm_cpumask);
}
-static void intel_cqm_cpu_starting(unsigned int cpu)
+static int intel_cqm_cpu_starting(unsigned int cpu)
{
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1588,39 +1588,26 @@ static void intel_cqm_cpu_starting(unsigned int cpu)
WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
+
+ cqm_pick_event_reader(cpu);
+ return 0;
}
-static void intel_cqm_cpu_exit(unsigned int cpu)
+static int intel_cqm_cpu_exit(unsigned int cpu)
{
int target;
/* Is @cpu the current cqm reader for this package ? */
if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
- return;
+ return 0;
/* Find another online reader in this package */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
if (target < nr_cpu_ids)
cpumask_set_cpu(target, &cqm_cpumask);
-}
-
-static int intel_cqm_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- intel_cqm_cpu_exit(cpu);
- break;
- case CPU_STARTING:
- intel_cqm_cpu_starting(cpu);
- cqm_pick_event_reader(cpu);
- break;
- }
- return NOTIFY_OK;
+ return 0;
}
static const struct x86_cpu_id intel_cqm_match[] = {
@@ -1682,7 +1669,7 @@ out:
static int __init intel_cqm_init(void)
{
char *str = NULL, scale[20];
- int i, cpu, ret;
+ int cpu, ret;
if (x86_match_cpu(intel_cqm_match))
cqm_enabled = true;
@@ -1705,8 +1692,7 @@ static int __init intel_cqm_init(void)
*
* Also, check that the scales match on all cpus.
*/
- cpu_notifier_register_begin();
-
+ get_online_cpus();
for_each_online_cpu(cpu) {
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1743,11 +1729,6 @@ static int __init intel_cqm_init(void)
if (ret)
goto out;
- for_each_online_cpu(i) {
- intel_cqm_cpu_starting(i);
- cqm_pick_event_reader(i);
- }
-
if (mbm_enabled)
ret = intel_mbm_init();
if (ret && !cqm_enabled)
@@ -1772,12 +1753,18 @@ static int __init intel_cqm_init(void)
pr_info("Intel MBM enabled\n");
/*
- * Register the hot cpu notifier once we are sure cqm
+ * Setup the hot cpu notifier once we are sure cqm
* is enabled to avoid notifier leak.
*/
- __perf_cpu_notifier(intel_cqm_cpu_notifier);
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
+ "AP_PERF_X86_CQM_STARTING",
+ intel_cqm_cpu_starting, NULL);
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE",
+ NULL, intel_cqm_cpu_exit);
+
out:
- cpu_notifier_register_done();
+ put_online_cpus();
+
if (ret) {
kfree(str);
cqm_cleanup();
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 9ba4e4136a15..3ca87b5a8677 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -89,6 +89,7 @@
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include "../perf_event.h"
MODULE_LICENSE("GPL");
@@ -365,7 +366,7 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode)
* Check if exiting cpu is the designated reader. If so migrate the
* events when there is a valid target available
*/
-static void cstate_cpu_exit(int cpu)
+static int cstate_cpu_exit(unsigned int cpu)
{
unsigned int target;
@@ -390,9 +391,10 @@ static void cstate_cpu_exit(int cpu)
perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
}
}
+ return 0;
}
-static void cstate_cpu_init(int cpu)
+static int cstate_cpu_init(unsigned int cpu)
{
unsigned int target;
@@ -414,31 +416,10 @@ static void cstate_cpu_init(int cpu)
topology_core_cpumask(cpu));
if (has_cstate_pkg && target >= nr_cpu_ids)
cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
-}
-static int cstate_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- cstate_cpu_init(cpu);
- break;
- case CPU_DOWN_PREPARE:
- cstate_cpu_exit(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block cstate_cpu_nb = {
- .notifier_call = cstate_cpu_notifier,
- .priority = CPU_PRI_PERF + 1,
-};
-
static struct pmu cstate_core_pmu = {
.attr_groups = core_attr_groups,
.name = "cstate_core",
@@ -511,37 +492,37 @@ static const struct cstate_model slm_cstates __initconst = {
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
static const struct x86_cpu_id intel_cstates_match[] __initconst = {
- X86_CSTATES_MODEL(30, nhm_cstates), /* 45nm Nehalem */
- X86_CSTATES_MODEL(26, nhm_cstates), /* 45nm Nehalem-EP */
- X86_CSTATES_MODEL(46, nhm_cstates), /* 45nm Nehalem-EX */
+ X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM, nhm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
- X86_CSTATES_MODEL(37, nhm_cstates), /* 32nm Westmere */
- X86_CSTATES_MODEL(44, nhm_cstates), /* 32nm Westmere-EP */
- X86_CSTATES_MODEL(47, nhm_cstates), /* 32nm Westmere-EX */
+ X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE, nhm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
- X86_CSTATES_MODEL(42, snb_cstates), /* 32nm SandyBridge */
- X86_CSTATES_MODEL(45, snb_cstates), /* 32nm SandyBridge-E/EN/EP */
+ X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
- X86_CSTATES_MODEL(58, snb_cstates), /* 22nm IvyBridge */
- X86_CSTATES_MODEL(62, snb_cstates), /* 22nm IvyBridge-EP/EX */
+ X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
- X86_CSTATES_MODEL(60, snb_cstates), /* 22nm Haswell Core */
- X86_CSTATES_MODEL(63, snb_cstates), /* 22nm Haswell Server */
- X86_CSTATES_MODEL(70, snb_cstates), /* 22nm Haswell + GT3e */
+ X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates),
- X86_CSTATES_MODEL(69, hswult_cstates), /* 22nm Haswell ULT */
+ X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
- X86_CSTATES_MODEL(55, slm_cstates), /* 22nm Atom Silvermont */
- X86_CSTATES_MODEL(77, slm_cstates), /* 22nm Atom Avoton/Rangely */
- X86_CSTATES_MODEL(76, slm_cstates), /* 22nm Atom Airmont */
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates),
- X86_CSTATES_MODEL(61, snb_cstates), /* 14nm Broadwell Core-M */
- X86_CSTATES_MODEL(86, snb_cstates), /* 14nm Broadwell Xeon D */
- X86_CSTATES_MODEL(71, snb_cstates), /* 14nm Broadwell + GT3e */
- X86_CSTATES_MODEL(79, snb_cstates), /* 14nm Broadwell Server */
+ X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates),
- X86_CSTATES_MODEL(78, snb_cstates), /* 14nm Skylake Mobile */
- X86_CSTATES_MODEL(94, snb_cstates), /* 14nm Skylake Desktop */
+ X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
@@ -599,18 +580,20 @@ static inline void cstate_cleanup(void)
static int __init cstate_init(void)
{
- int cpu, err;
+ int err;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- cstate_cpu_init(cpu);
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
+ "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
+ NULL);
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
+ "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
if (has_cstate_core) {
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
if (err) {
has_cstate_core = false;
pr_info("Failed to register cstate core pmu\n");
- goto out;
+ return err;
}
}
@@ -620,12 +603,10 @@ static int __init cstate_init(void)
has_cstate_pkg = false;
pr_info("Failed to register cstate pkg pmu\n");
cstate_cleanup();
- goto out;
+ return err;
}
}
- __register_cpu_notifier(&cstate_cpu_nb);
-out:
- cpu_notifier_register_done();
+
return err;
}
@@ -651,9 +632,8 @@ module_init(cstate_pmu_init);
static void __exit cstate_pmu_exit(void)
{
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&cstate_cpu_nb);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
cstate_cleanup();
- cpu_notifier_register_done();
}
module_exit(cstate_pmu_exit);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 9e2b40cdb05f..707d358e0dff 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -77,9 +77,11 @@ static enum {
LBR_IND_JMP |\
LBR_FAR)
-#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
-#define LBR_FROM_FLAG_IN_TX (1ULL << 62)
-#define LBR_FROM_FLAG_ABORT (1ULL << 61)
+#define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
+#define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
+#define LBR_FROM_FLAG_ABORT BIT_ULL(61)
+
+#define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
/*
* x86control flow change classification
@@ -235,6 +237,97 @@ enum {
LBR_VALID,
};
+/*
+ * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
+ * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
+ * TSX is not supported they have no consistent behavior:
+ *
+ * - For wrmsr(), bits 61:62 are considered part of the sign extension.
+ * - For HW updates (branch captures) bits 61:62 are always OFF and are not
+ * part of the sign extension.
+ *
+ * Therefore, if:
+ *
+ * 1) LBR has TSX format
+ * 2) CPU has no TSX support enabled
+ *
+ * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
+ * value from rdmsr() must be converted to have a 61 bits sign extension,
+ * ignoring the TSX flags.
+ */
+static inline bool lbr_from_signext_quirk_needed(void)
+{
+ int lbr_format = x86_pmu.intel_cap.lbr_format;
+ bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
+ boot_cpu_has(X86_FEATURE_RTM);
+
+ return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
+}
+
+DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
+
+/* If quirk is enabled, ensure sign extension is 63 bits: */
+inline u64 lbr_from_signext_quirk_wr(u64 val)
+{
+ if (static_branch_unlikely(&lbr_from_quirk_key)) {
+ /*
+ * Sign extend into bits 61:62 while preserving bit 63.
+ *
+ * Quirk is enabled when TSX is disabled. Therefore TSX bits
+ * in val are always OFF and must be changed to be sign
+ * extension bits. Since bits 59:60 are guaranteed to be
+ * part of the sign extension bits, we can just copy them
+ * to 61:62.
+ */
+ val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
+ }
+ return val;
+}
+
+/*
+ * If quirk is needed, ensure sign extension is 61 bits:
+ */
+u64 lbr_from_signext_quirk_rd(u64 val)
+{
+ if (static_branch_unlikely(&lbr_from_quirk_key)) {
+ /*
+ * Quirk is on when TSX is not enabled. Therefore TSX
+ * flags must be read as OFF.
+ */
+ val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
+ }
+ return val;
+}
+
+static inline void wrlbr_from(unsigned int idx, u64 val)
+{
+ val = lbr_from_signext_quirk_wr(val);
+ wrmsrl(x86_pmu.lbr_from + idx, val);
+}
+
+static inline void wrlbr_to(unsigned int idx, u64 val)
+{
+ wrmsrl(x86_pmu.lbr_to + idx, val);
+}
+
+static inline u64 rdlbr_from(unsigned int idx)
+{
+ u64 val;
+
+ rdmsrl(x86_pmu.lbr_from + idx, val);
+
+ return lbr_from_signext_quirk_rd(val);
+}
+
+static inline u64 rdlbr_to(unsigned int idx)
+{
+ u64 val;
+
+ rdmsrl(x86_pmu.lbr_to + idx, val);
+
+ return val;
+}
+
static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
{
int i;
@@ -251,8 +344,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
tos = task_ctx->tos;
for (i = 0; i < tos; i++) {
lbr_idx = (tos - i) & mask;
- wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
- wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+ wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
+ wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
+
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
@@ -262,9 +356,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
- int i;
unsigned lbr_idx, mask;
u64 tos;
+ int i;
if (task_ctx->lbr_callstack_users == 0) {
task_ctx->lbr_stack_state = LBR_NONE;
@@ -275,8 +369,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
tos = intel_pmu_lbr_tos();
for (i = 0; i < tos; i++) {
lbr_idx = (tos - i) & mask;
- rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
- rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+ task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+ task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
@@ -452,8 +546,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
u16 cycles = 0;
int lbr_flags = lbr_desc[lbr_format];
- rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
- rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
+ from = rdlbr_from(lbr_idx);
+ to = rdlbr_to(lbr_idx);
if (lbr_format == LBR_FORMAT_INFO && need_info) {
u64 info;
@@ -956,7 +1050,6 @@ void __init intel_pmu_lbr_init_core(void)
* SW branch filter usage:
* - compensate for lack of HW filter
*/
- pr_cont("4-deep LBR, ");
}
/* nehalem/westmere */
@@ -977,7 +1070,6 @@ void __init intel_pmu_lbr_init_nhm(void)
* That requires LBR_FAR but that means far
* jmp need to be filtered out
*/
- pr_cont("16-deep LBR, ");
}
/* sandy bridge */
@@ -997,7 +1089,6 @@ void __init intel_pmu_lbr_init_snb(void)
* That requires LBR_FAR but that means far
* jmp need to be filtered out
*/
- pr_cont("16-deep LBR, ");
}
/* haswell */
@@ -1011,7 +1102,8 @@ void intel_pmu_lbr_init_hsw(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
- pr_cont("16-deep LBR, ");
+ if (lbr_from_signext_quirk_needed())
+ static_branch_enable(&lbr_from_quirk_key);
}
/* skylake */
@@ -1031,7 +1123,6 @@ __init void intel_pmu_lbr_init_skl(void)
* That requires LBR_FAR but that means far
* jmp need to be filtered out
*/
- pr_cont("32-deep LBR, ");
}
/* atom */
@@ -1057,7 +1148,6 @@ void __init intel_pmu_lbr_init_atom(void)
* SW branch filter usage:
* - compensate for lack of HW filter
*/
- pr_cont("8-deep LBR, ");
}
/* slm */
@@ -1088,6 +1178,4 @@ void intel_pmu_lbr_init_knl(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = snb_lbr_sel_map;
-
- pr_cont("8-deep LBR, ");
}
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 99c4bab123cd..28865938aadf 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -55,6 +55,7 @@
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include "../perf_event.h"
MODULE_LICENSE("GPL");
@@ -555,14 +556,14 @@ const struct attribute_group *rapl_attr_groups[] = {
NULL,
};
-static void rapl_cpu_exit(int cpu)
+static int rapl_cpu_offline(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
/* Check if exiting cpu is used for collecting rapl events */
if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
- return;
+ return 0;
pmu->cpu = -1;
/* Find a new cpu to collect rapl events */
@@ -574,9 +575,10 @@ static void rapl_cpu_exit(int cpu)
pmu->cpu = target;
perf_pmu_migrate_context(pmu->pmu, cpu, target);
}
+ return 0;
}
-static void rapl_cpu_init(int cpu)
+static int rapl_cpu_online(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
@@ -587,13 +589,14 @@ static void rapl_cpu_init(int cpu)
*/
target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
if (target < nr_cpu_ids)
- return;
+ return 0;
cpumask_set_cpu(cpu, &rapl_cpu_mask);
pmu->cpu = cpu;
+ return 0;
}
-static int rapl_cpu_prepare(int cpu)
+static int rapl_cpu_prepare(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
@@ -614,33 +617,6 @@ static int rapl_cpu_prepare(int cpu)
return 0;
}
-static int rapl_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- rapl_cpu_prepare(cpu);
- break;
-
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- rapl_cpu_init(cpu);
- break;
-
- case CPU_DOWN_PREPARE:
- rapl_cpu_exit(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block rapl_cpu_nb = {
- .notifier_call = rapl_cpu_notifier,
- .priority = CPU_PRI_PERF + 1,
-};
-
static int rapl_check_hw_unit(bool apply_quirk)
{
u64 msr_rapl_power_unit_bits;
@@ -691,30 +667,12 @@ static void __init rapl_advertise(void)
}
}
-static int __init rapl_prepare_cpus(void)
-{
- unsigned int cpu, pkg;
- int ret;
-
- for_each_online_cpu(cpu) {
- pkg = topology_logical_package_id(cpu);
- if (rapl_pmus->pmus[pkg])
- continue;
-
- ret = rapl_cpu_prepare(cpu);
- if (ret)
- return ret;
- rapl_cpu_init(cpu);
- }
- return 0;
-}
-
static void cleanup_rapl_pmus(void)
{
int i;
for (i = 0; i < rapl_pmus->maxpkg; i++)
- kfree(rapl_pmus->pmus + i);
+ kfree(rapl_pmus->pmus[i]);
kfree(rapl_pmus);
}
@@ -786,26 +744,27 @@ static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
};
static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
- X86_RAPL_MODEL_MATCH(42, snb_rapl_init), /* Sandy Bridge */
- X86_RAPL_MODEL_MATCH(45, snbep_rapl_init), /* Sandy Bridge-EP */
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
- X86_RAPL_MODEL_MATCH(58, snb_rapl_init), /* Ivy Bridge */
- X86_RAPL_MODEL_MATCH(62, snbep_rapl_init), /* IvyTown */
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, snb_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
- X86_RAPL_MODEL_MATCH(60, hsw_rapl_init), /* Haswell */
- X86_RAPL_MODEL_MATCH(63, hsx_rapl_init), /* Haswell-Server */
- X86_RAPL_MODEL_MATCH(69, hsw_rapl_init), /* Haswell-Celeron */
- X86_RAPL_MODEL_MATCH(70, hsw_rapl_init), /* Haswell GT3e */
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
- X86_RAPL_MODEL_MATCH(61, hsw_rapl_init), /* Broadwell */
- X86_RAPL_MODEL_MATCH(71, hsw_rapl_init), /* Broadwell-H */
- X86_RAPL_MODEL_MATCH(79, hsx_rapl_init), /* Broadwell-Server */
- X86_RAPL_MODEL_MATCH(86, hsx_rapl_init), /* Broadwell Xeon D */
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
- X86_RAPL_MODEL_MATCH(87, knl_rapl_init), /* Knights Landing */
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
- X86_RAPL_MODEL_MATCH(78, skl_rapl_init), /* Skylake */
- X86_RAPL_MODEL_MATCH(94, skl_rapl_init), /* Skylake H/S */
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init),
{},
};
@@ -835,35 +794,44 @@ static int __init rapl_pmu_init(void)
if (ret)
return ret;
- cpu_notifier_register_begin();
+ /*
+ * Install callbacks. Core will call them for each online cpu.
+ */
- ret = rapl_prepare_cpus();
+ ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
+ rapl_cpu_prepare, NULL);
if (ret)
goto out;
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
+ "AP_PERF_X86_RAPL_ONLINE",
+ rapl_cpu_online, rapl_cpu_offline);
+ if (ret)
+ goto out1;
+
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
if (ret)
- goto out;
+ goto out2;
- __register_cpu_notifier(&rapl_cpu_nb);
- cpu_notifier_register_done();
rapl_advertise();
return 0;
+out2:
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
+out1:
+ cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
out:
pr_warn("Initialization failed (%d), disabled\n", ret);
cleanup_rapl_pmus();
- cpu_notifier_register_done();
return ret;
}
module_init(rapl_pmu_init);
static void __exit intel_rapl_exit(void)
{
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&rapl_cpu_nb);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
+ cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
perf_pmu_unregister(&rapl_pmus->pmu);
cleanup_rapl_pmus();
- cpu_notifier_register_done();
}
module_exit(intel_rapl_exit);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index fce74062d981..463dc7a5a6c3 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1,4 +1,7 @@
+#include <linux/module.h>
+
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include "uncore.h"
static struct intel_uncore_type *empty_uncore[] = { NULL, };
@@ -882,7 +885,7 @@ uncore_types_init(struct intel_uncore_type **types, bool setid)
static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct intel_uncore_type *type;
- struct intel_uncore_pmu *pmu;
+ struct intel_uncore_pmu *pmu = NULL;
struct intel_uncore_box *box;
int phys_id, pkg, ret;
@@ -903,20 +906,37 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
}
type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
+
/*
- * for performance monitoring unit with multiple boxes,
- * each box has a different function id.
- */
- pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
- /* Knights Landing uses a common PCI device ID for multiple instances of
- * an uncore PMU device type. There is only one entry per device type in
- * the knl_uncore_pci_ids table inspite of multiple devices present for
- * some device types. Hence PCI device idx would be 0 for all devices.
- * So increment pmu pointer to point to an unused array element.
+ * Some platforms, e.g. Knights Landing, use a common PCI device ID
+ * for multiple instances of an uncore PMU device type. We should check
+ * PCI slot and func to indicate the uncore box.
*/
- if (boot_cpu_data.x86_model == 87) {
- while (pmu->func_id >= 0)
- pmu++;
+ if (id->driver_data & ~0xffff) {
+ struct pci_driver *pci_drv = pdev->driver;
+ const struct pci_device_id *ids = pci_drv->id_table;
+ unsigned int devfn;
+
+ while (ids && ids->vendor) {
+ if ((ids->vendor == pdev->vendor) &&
+ (ids->device == pdev->device)) {
+ devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
+ UNCORE_PCI_DEV_FUNC(ids->driver_data));
+ if (devfn == pdev->devfn) {
+ pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
+ break;
+ }
+ }
+ ids++;
+ }
+ if (pmu == NULL)
+ return -ENODEV;
+ } else {
+ /*
+ * for performance monitoring unit with multiple boxes,
+ * each box has a different function id.
+ */
+ pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
}
if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
@@ -956,7 +976,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
static void uncore_pci_remove(struct pci_dev *pdev)
{
- struct intel_uncore_box *box = pci_get_drvdata(pdev);
+ struct intel_uncore_box *box;
struct intel_uncore_pmu *pmu;
int i, phys_id, pkg;
@@ -1034,7 +1054,7 @@ static void uncore_pci_exit(void)
}
}
-static void uncore_cpu_dying(int cpu)
+static int uncore_cpu_dying(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
@@ -1051,16 +1071,19 @@ static void uncore_cpu_dying(int cpu)
uncore_box_exit(box);
}
}
+ return 0;
}
-static void uncore_cpu_starting(int cpu, bool init)
+static int first_init;
+
+static int uncore_cpu_starting(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg, ncpus = 1;
- if (init) {
+ if (first_init) {
/*
* On init we get the number of online cpus in the package
* and set refcount for all of them.
@@ -1081,9 +1104,11 @@ static void uncore_cpu_starting(int cpu, bool init)
uncore_box_init(box);
}
}
+
+ return 0;
}
-static int uncore_cpu_prepare(int cpu)
+static int uncore_cpu_prepare(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
@@ -1146,13 +1171,13 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
}
-static void uncore_event_exit_cpu(int cpu)
+static int uncore_event_cpu_offline(unsigned int cpu)
{
int target;
/* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
- return;
+ return 0;
/* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
@@ -1165,9 +1190,10 @@ static void uncore_event_exit_cpu(int cpu)
uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target);
+ return 0;
}
-static void uncore_event_init_cpu(int cpu)
+static int uncore_event_cpu_online(unsigned int cpu)
{
int target;
@@ -1177,50 +1203,15 @@ static void uncore_event_init_cpu(int cpu)
*/
target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
if (target < nr_cpu_ids)
- return;
+ return 0;
cpumask_set_cpu(cpu, &uncore_cpu_mask);
uncore_change_context(uncore_msr_uncores, -1, cpu);
uncore_change_context(uncore_pci_uncores, -1, cpu);
+ return 0;
}
-static int uncore_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- return notifier_from_errno(uncore_cpu_prepare(cpu));
-
- case CPU_STARTING:
- uncore_cpu_starting(cpu, false);
- case CPU_DOWN_FAILED:
- uncore_event_init_cpu(cpu);
- break;
-
- case CPU_UP_CANCELED:
- case CPU_DYING:
- uncore_cpu_dying(cpu);
- break;
-
- case CPU_DOWN_PREPARE:
- uncore_event_exit_cpu(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block uncore_cpu_nb = {
- .notifier_call = uncore_cpu_notifier,
- /*
- * to migrate uncore events, our notifier should be executed
- * before perf core's notifier.
- */
- .priority = CPU_PRI_PERF + 1,
-};
-
static int __init type_pmu_register(struct intel_uncore_type *type)
{
int i, ret;
@@ -1264,41 +1255,6 @@ err:
return ret;
}
-static void __init uncore_cpu_setup(void *dummy)
-{
- uncore_cpu_starting(smp_processor_id(), true);
-}
-
-/* Lazy to avoid allocation of a few bytes for the normal case */
-static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
-
-static int __init uncore_cpumask_init(bool msr)
-{
- unsigned int cpu;
-
- for_each_online_cpu(cpu) {
- unsigned int pkg = topology_logical_package_id(cpu);
- int ret;
-
- if (test_and_set_bit(pkg, packages))
- continue;
- /*
- * The first online cpu of each package allocates and takes
- * the refcounts for all other online cpus in that package.
- * If msrs are not enabled no allocation is required.
- */
- if (msr) {
- ret = uncore_cpu_prepare(cpu);
- if (ret)
- return ret;
- }
- uncore_event_init_cpu(cpu);
- smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
- }
- __register_cpu_notifier(&uncore_cpu_nb);
- return 0;
-}
-
#define X86_UNCORE_MODEL_MATCH(model, init) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
@@ -1361,30 +1317,32 @@ static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
};
static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
+ .cpu_init = skl_uncore_cpu_init,
.pci_init = skl_uncore_pci_init,
};
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
- X86_UNCORE_MODEL_MATCH(26, nhm_uncore_init), /* Nehalem */
- X86_UNCORE_MODEL_MATCH(30, nhm_uncore_init),
- X86_UNCORE_MODEL_MATCH(37, nhm_uncore_init), /* Westmere */
- X86_UNCORE_MODEL_MATCH(44, nhm_uncore_init),
- X86_UNCORE_MODEL_MATCH(42, snb_uncore_init), /* Sandy Bridge */
- X86_UNCORE_MODEL_MATCH(58, ivb_uncore_init), /* Ivy Bridge */
- X86_UNCORE_MODEL_MATCH(60, hsw_uncore_init), /* Haswell */
- X86_UNCORE_MODEL_MATCH(69, hsw_uncore_init), /* Haswell Celeron */
- X86_UNCORE_MODEL_MATCH(70, hsw_uncore_init), /* Haswell */
- X86_UNCORE_MODEL_MATCH(61, bdw_uncore_init), /* Broadwell */
- X86_UNCORE_MODEL_MATCH(71, bdw_uncore_init), /* Broadwell */
- X86_UNCORE_MODEL_MATCH(45, snbep_uncore_init), /* Sandy Bridge-EP */
- X86_UNCORE_MODEL_MATCH(46, nhmex_uncore_init), /* Nehalem-EX */
- X86_UNCORE_MODEL_MATCH(47, nhmex_uncore_init), /* Westmere-EX aka. Xeon E7 */
- X86_UNCORE_MODEL_MATCH(62, ivbep_uncore_init), /* Ivy Bridge-EP */
- X86_UNCORE_MODEL_MATCH(63, hswep_uncore_init), /* Haswell-EP */
- X86_UNCORE_MODEL_MATCH(79, bdx_uncore_init), /* BDX-EP */
- X86_UNCORE_MODEL_MATCH(86, bdx_uncore_init), /* BDX-DE */
- X86_UNCORE_MODEL_MATCH(87, knl_uncore_init), /* Knights Landing */
- X86_UNCORE_MODEL_MATCH(94, skl_uncore_init), /* SkyLake */
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
{},
};
@@ -1420,11 +1378,33 @@ static int __init intel_uncore_init(void)
if (cret && pret)
return -ENODEV;
- cpu_notifier_register_begin();
- ret = uncore_cpumask_init(!cret);
- if (ret)
- goto err;
- cpu_notifier_register_done();
+ /*
+ * Install callbacks. Core will call them for each online cpu.
+ *
+ * The first online cpu of each package allocates and takes
+ * the refcounts for all other online cpus in that package.
+ * If msrs are not enabled no allocation is required and
+ * uncore_cpu_prepare() is not called for each online cpu.
+ */
+ if (!cret) {
+ ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
+ "PERF_X86_UNCORE_PREP",
+ uncore_cpu_prepare, NULL);
+ if (ret)
+ goto err;
+ } else {
+ cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
+ "PERF_X86_UNCORE_PREP",
+ uncore_cpu_prepare, NULL);
+ }
+ first_init = 1;
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
+ "AP_PERF_X86_UNCORE_STARTING",
+ uncore_cpu_starting, uncore_cpu_dying);
+ first_init = 0;
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+ "AP_PERF_X86_UNCORE_ONLINE",
+ uncore_event_cpu_online, uncore_event_cpu_offline);
return 0;
err:
@@ -1432,17 +1412,16 @@ err:
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
- cpu_notifier_register_done();
return ret;
}
module_init(intel_uncore_init);
static void __exit intel_uncore_exit(void)
{
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&uncore_cpu_nb);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
+ cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
- cpu_notifier_register_done();
}
module_exit(intel_uncore_exit);
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 79766b9a3580..78b9c23e2d8d 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -1,4 +1,3 @@
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <asm/apicdef.h>
@@ -15,7 +14,11 @@
#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
+#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
+ ((dev << 24) | (func << 16) | (type << 8) | idx)
#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
+#define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
+#define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
#define UNCORE_EXTRA_PCI_DEV 0xff
@@ -360,6 +363,7 @@ int bdw_uncore_pci_init(void);
int skl_uncore_pci_init(void);
void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
+void skl_uncore_cpu_init(void);
int snb_pci2phy_map_init(int devid);
/* perf_event_intel_uncore_snbep.c */
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 96531d2b843f..97a69dbba649 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -1,4 +1,4 @@
-/* Nehalem/SandBridge/Haswell uncore support */
+/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
#include "uncore.h"
/* Uncore IMC PCI IDs */
@@ -9,6 +9,7 @@
#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
+#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -64,6 +65,10 @@
#define NHM_UNC_PERFEVTSEL0 0x3c0
#define NHM_UNC_UNCORE_PMC0 0x3b0
+/* SKL uncore global control */
+#define SKL_UNC_PERF_GLOBAL_CTL 0xe01
+#define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
+
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
@@ -179,6 +184,60 @@ void snb_uncore_cpu_init(void)
snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
}
+static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0) {
+ wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
+ SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
+ }
+}
+
+static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0)
+ wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static struct intel_uncore_ops skl_uncore_msr_ops = {
+ .init_box = skl_uncore_msr_init_box,
+ .exit_box = skl_uncore_msr_exit_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = snb_uncore_msr_enable_event,
+ .read_counter = uncore_msr_read_counter,
+};
+
+static struct intel_uncore_type skl_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .num_boxes = 5,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
+ .fixed_ctr = SNB_UNC_FIXED_CTR,
+ .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
+ .single_fixed = 1,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
+ .ops = &skl_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+ .event_descs = snb_uncore_events,
+};
+
+static struct intel_uncore_type *skl_msr_uncores[] = {
+ &skl_uncore_cbox,
+ &snb_uncore_arb,
+ NULL,
+};
+
+void skl_uncore_cpu_init(void)
+{
+ uncore_msr_uncores = skl_msr_uncores;
+ if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ snb_uncore_arb.ops = &skl_uncore_msr_ops;
+}
+
enum {
SNB_PCI_UNCORE_IMC,
};
@@ -544,6 +603,11 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+
{ /* end: all zeroes */ },
};
@@ -587,6 +651,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */
+ IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
{ /* end marker */ }
};
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index b2625867ebd1..824e54086e07 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2164,21 +2164,101 @@ static struct intel_uncore_type *knl_pci_uncores[] = {
*/
static const struct pci_device_id knl_uncore_pci_ids[] = {
- { /* MC UClk */
+ { /* MC0 UClk */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
- .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_UCLK, 0),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
},
- { /* MC DClk Channel */
+ { /* MC1 UClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
+ },
+ { /* MC0 DClk CH 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
+ },
+ { /* MC0 DClk CH 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
+ },
+ { /* MC0 DClk CH 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
+ },
+ { /* MC1 DClk CH 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
+ },
+ { /* MC1 DClk CH 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
- .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_DCLK, 0),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
+ },
+ { /* MC1 DClk CH 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
+ },
+ { /* EDC0 UClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
},
- { /* EDC UClk */
+ { /* EDC1 UClk */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
- .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_UCLK, 0),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
+ },
+ { /* EDC2 UClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
+ },
+ { /* EDC3 UClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
+ },
+ { /* EDC4 UClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
+ },
+ { /* EDC5 UClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
+ },
+ { /* EDC6 UClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
+ },
+ { /* EDC7 UClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
+ },
+ { /* EDC0 EClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
},
- { /* EDC EClk */
+ { /* EDC1 EClk */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
- .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_ECLK, 0),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
+ },
+ { /* EDC2 EClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
+ },
+ { /* EDC3 EClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
+ },
+ { /* EDC4 EClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
+ },
+ { /* EDC5 EClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
+ },
+ { /* EDC6 EClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
+ },
+ { /* EDC7 EClk */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
},
{ /* M2PCIe */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
@@ -2868,27 +2948,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
.format_group = &hswep_uncore_cbox_format_group,
};
-static struct intel_uncore_type bdx_uncore_sbox = {
- .name = "sbox",
- .num_counters = 4,
- .num_boxes = 4,
- .perf_ctr_bits = 48,
- .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
- .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
- .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
- .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
- .msr_offset = HSWEP_SBOX_MSR_OFFSET,
- .ops = &hswep_uncore_sbox_msr_ops,
- .format_group = &hswep_uncore_sbox_format_group,
-};
-
-#define BDX_MSR_UNCORE_SBOX 3
-
static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox,
&bdx_uncore_cbox,
&hswep_uncore_pcu,
- &bdx_uncore_sbox,
NULL,
};
@@ -2897,10 +2960,6 @@ void bdx_uncore_cpu_init(void)
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
-
- /* BDX-DE doesn't have SBOX */
- if (boot_cpu_data.x86_model == 86)
- uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
static struct intel_uncore_type bdx_uncore_ha = {
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 85ef3c2e80e0..4bb3ec69e8ea 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -1,4 +1,5 @@
#include <linux/perf_event.h>
+#include <asm/intel-family.h>
enum perf_msr_id {
PERF_MSR_TSC = 0,
@@ -34,39 +35,43 @@ static bool test_intel(int idx)
return false;
switch (boot_cpu_data.x86_model) {
- case 30: /* 45nm Nehalem */
- case 26: /* 45nm Nehalem-EP */
- case 46: /* 45nm Nehalem-EX */
-
- case 37: /* 32nm Westmere */
- case 44: /* 32nm Westmere-EP */
- case 47: /* 32nm Westmere-EX */
-
- case 42: /* 32nm SandyBridge */
- case 45: /* 32nm SandyBridge-E/EN/EP */
-
- case 58: /* 22nm IvyBridge */
- case 62: /* 22nm IvyBridge-EP/EX */
-
- case 60: /* 22nm Haswell Core */
- case 63: /* 22nm Haswell Server */
- case 69: /* 22nm Haswell ULT */
- case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
-
- case 61: /* 14nm Broadwell Core-M */
- case 86: /* 14nm Broadwell Xeon D */
- case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
- case 79: /* 14nm Broadwell Server */
-
- case 55: /* 22nm Atom "Silvermont" */
- case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
- case 76: /* 14nm Atom "Airmont" */
+ case INTEL_FAM6_NEHALEM:
+ case INTEL_FAM6_NEHALEM_G:
+ case INTEL_FAM6_NEHALEM_EP:
+ case INTEL_FAM6_NEHALEM_EX:
+
+ case INTEL_FAM6_WESTMERE:
+ case INTEL_FAM6_WESTMERE_EP:
+ case INTEL_FAM6_WESTMERE_EX:
+
+ case INTEL_FAM6_SANDYBRIDGE:
+ case INTEL_FAM6_SANDYBRIDGE_X:
+
+ case INTEL_FAM6_IVYBRIDGE:
+ case INTEL_FAM6_IVYBRIDGE_X:
+
+ case INTEL_FAM6_HASWELL_CORE:
+ case INTEL_FAM6_HASWELL_X:
+ case INTEL_FAM6_HASWELL_ULT:
+ case INTEL_FAM6_HASWELL_GT3E:
+
+ case INTEL_FAM6_BROADWELL_CORE:
+ case INTEL_FAM6_BROADWELL_XEON_D:
+ case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_BROADWELL_X:
+
+ case INTEL_FAM6_ATOM_SILVERMONT1:
+ case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_AIRMONT:
if (idx == PERF_MSR_SMI)
return true;
break;
- case 78: /* 14nm Skylake Mobile */
- case 94: /* 14nm Skylake Desktop */
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 8bd764df815d..8c4a47706296 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -668,6 +668,14 @@ static struct perf_pmu_events_attr event_attr_##v = { \
.event_str = str, \
};
+#define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
+static struct perf_pmu_events_ht_attr event_attr_##v = { \
+ .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
+ .id = 0, \
+ .event_str_noht = noht, \
+ .event_str_ht = ht, \
+}
+
extern struct x86_pmu x86_pmu __read_mostly;
static inline bool x86_pmu_has_lbr_callstack(void)
@@ -803,6 +811,8 @@ struct attribute **merge_attr(struct attribute **a, struct attribute **b);
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
+ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *page);
#ifdef CONFIG_CPU_SUP_AMD
@@ -892,6 +902,8 @@ void intel_ds_init(void);
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
+u64 lbr_from_signext_quirk_wr(u64 val);
+
void intel_pmu_lbr_reset(void);
void intel_pmu_lbr_enable(struct perf_event *event);
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index aeac434c9feb..2cfed174e3c9 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -1,5 +1,11 @@
+generated-y += syscalls_32.h
+generated-y += syscalls_64.h
+generated-y += unistd_32_ia32.h
+generated-y += unistd_64_x32.h
+generated-y += xen-hypercalls.h
+
genhdr-y += unistd_32.h
genhdr-y += unistd_64.h
genhdr-y += unistd_x32.h
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 94c18ebfd68c..5391b0ae7cc3 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -145,7 +145,6 @@ static inline void disable_acpi(void) { }
#define ARCH_HAS_POWER_INIT 1
#ifdef CONFIG_ACPI_NUMA
-extern int acpi_numa;
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
@@ -170,4 +169,6 @@ static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
}
#endif
+#define ACPI_TABLE_UPGRADE_MAX_PHYS (max_low_pfn_mapped << PAGE_SHIFT)
+
#endif /* _ASM_X86_ACPI_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index bc27611fa58f..f5befd4945f2 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -300,7 +300,6 @@ struct apic {
unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id);
- unsigned long apic_id_mask;
int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask,
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..93eebc636c76 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -45,11 +45,11 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
: "memory", "cc");
}
-static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
- u32 ecx_in, u32 *eax)
+static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+ u32 ecx_in, u32 *eax)
{
int cx, dx, si;
- u8 error;
+ bool error;
/*
* N.B. We do NOT need a cld after the BIOS call
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index 02e799fa43d1..e7cd63175de4 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -4,8 +4,8 @@
#include <asm/cpufeatures.h>
#ifdef CONFIG_64BIT
-/* popcnt %edi, %eax -- redundant REX prefix for alignment */
-#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
+/* popcnt %edi, %eax */
+#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
/* popcnt %rdi, %rax */
#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
#define REG_IN "D"
@@ -17,19 +17,15 @@
#define REG_OUT "a"
#endif
-/*
- * __sw_hweightXX are called from within the alternatives below
- * and callee-clobbered registers need to be taken care of. See
- * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
- * compiler switches.
- */
+#define __HAVE_ARCH_SW_HWEIGHT
+
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
- unsigned int res = 0;
+ unsigned int res;
asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
- : "="REG_OUT (res)
- : REG_IN (w));
+ : "="REG_OUT (res)
+ : REG_IN (w));
return res;
}
@@ -53,11 +49,11 @@ static inline unsigned long __arch_hweight64(__u64 w)
#else
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
- unsigned long res = 0;
+ unsigned long res;
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
- : "="REG_OUT (res)
- : REG_IN (w));
+ : "="REG_OUT (res)
+ : REG_IN (w));
return res;
}
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
index 69f1366f1aa3..5b0579abb398 100644
--- a/arch/x86/include/asm/archrandom.h
+++ b/arch/x86/include/asm/archrandom.h
@@ -25,8 +25,6 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
-#include <asm/alternative.h>
-#include <asm/nops.h>
#define RDRAND_RETRY_LOOPS 10
@@ -40,97 +38,91 @@
# define RDSEED_LONG RDSEED_INT
#endif
-#ifdef CONFIG_ARCH_RANDOM
+/* Unconditional execution of RDRAND and RDSEED */
-/* Instead of arch_get_random_long() when alternatives haven't run. */
-static inline int rdrand_long(unsigned long *v)
+static inline bool rdrand_long(unsigned long *v)
{
- int ok;
- asm volatile("1: " RDRAND_LONG "\n\t"
- "jc 2f\n\t"
- "decl %0\n\t"
- "jnz 1b\n\t"
- "2:"
- : "=r" (ok), "=a" (*v)
- : "0" (RDRAND_RETRY_LOOPS));
- return ok;
+ bool ok;
+ unsigned int retry = RDRAND_RETRY_LOOPS;
+ do {
+ asm volatile(RDRAND_LONG "\n\t"
+ CC_SET(c)
+ : CC_OUT(c) (ok), "=a" (*v));
+ if (ok)
+ return true;
+ } while (--retry);
+ return false;
+}
+
+static inline bool rdrand_int(unsigned int *v)
+{
+ bool ok;
+ unsigned int retry = RDRAND_RETRY_LOOPS;
+ do {
+ asm volatile(RDRAND_INT "\n\t"
+ CC_SET(c)
+ : CC_OUT(c) (ok), "=a" (*v));
+ if (ok)
+ return true;
+ } while (--retry);
+ return false;
}
-/* A single attempt at RDSEED */
static inline bool rdseed_long(unsigned long *v)
{
- unsigned char ok;
+ bool ok;
asm volatile(RDSEED_LONG "\n\t"
- "setc %0"
- : "=qm" (ok), "=a" (*v));
+ CC_SET(c)
+ : CC_OUT(c) (ok), "=a" (*v));
return ok;
}
-#define GET_RANDOM(name, type, rdrand, nop) \
-static inline int name(type *v) \
-{ \
- int ok; \
- alternative_io("movl $0, %0\n\t" \
- nop, \
- "\n1: " rdrand "\n\t" \
- "jc 2f\n\t" \
- "decl %0\n\t" \
- "jnz 1b\n\t" \
- "2:", \
- X86_FEATURE_RDRAND, \
- ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
- "0" (RDRAND_RETRY_LOOPS)); \
- return ok; \
-}
-
-#define GET_SEED(name, type, rdseed, nop) \
-static inline int name(type *v) \
-{ \
- unsigned char ok; \
- alternative_io("movb $0, %0\n\t" \
- nop, \
- rdseed "\n\t" \
- "setc %0", \
- X86_FEATURE_RDSEED, \
- ASM_OUTPUT2("=q" (ok), "=a" (*v))); \
- return ok; \
+static inline bool rdseed_int(unsigned int *v)
+{
+ bool ok;
+ asm volatile(RDSEED_INT "\n\t"
+ CC_SET(c)
+ : CC_OUT(c) (ok), "=a" (*v));
+ return ok;
}
-#ifdef CONFIG_X86_64
-
-GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
-GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
-
-GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP5);
-GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
-
-#else
-
-GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
-GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
-
-GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP4);
-GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
-
-#endif /* CONFIG_X86_64 */
-
+/* Conditional execution based on CPU type */
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
-#else
+/*
+ * These are the generic interfaces; they must not be declared if the
+ * stubs in <linux/random.h> are to be invoked,
+ * i.e. CONFIG_ARCH_RANDOM is not defined.
+ */
+#ifdef CONFIG_ARCH_RANDOM
-static inline int rdrand_long(unsigned long *v)
+static inline bool arch_get_random_long(unsigned long *v)
{
- return 0;
+ return arch_has_random() ? rdrand_long(v) : false;
}
-static inline bool rdseed_long(unsigned long *v)
+static inline bool arch_get_random_int(unsigned int *v)
{
- return 0;
+ return arch_has_random() ? rdrand_int(v) : false;
}
-#endif /* CONFIG_ARCH_RANDOM */
+static inline bool arch_get_random_seed_long(unsigned long *v)
+{
+ return arch_has_random_seed() ? rdseed_long(v) : false;
+}
+
+static inline bool arch_get_random_seed_int(unsigned int *v)
+{
+ return arch_has_random_seed() ? rdseed_int(v) : false;
+}
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
+#else /* !CONFIG_ARCH_RANDOM */
+
+static inline void x86_init_rdrand(struct cpuinfo_x86 *c) { }
+
+#endif /* !CONFIG_ARCH_RANDOM */
+
#endif /* ASM_X86_ARCHRANDOM_H */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index f5063b6659eb..7acb51c49fec 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -42,6 +42,18 @@
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
+/*
+ * Macros to generate condition code outputs from inline assembly,
+ * The output operand must be type "bool".
+ */
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+# define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
+# define CC_OUT(c) "=@cc" #c
+#else
+# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n"
+# define CC_OUT(c) [_cc_ ## c] "=qm"
+#endif
+
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 3e8674288198..14635c5ea025 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -75,9 +75,9 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all
* other cases.
*/
-static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
+static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
}
/**
@@ -112,9 +112,9 @@ static __always_inline void atomic_dec(atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static __always_inline int atomic_dec_and_test(atomic_t *v)
+static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
}
/**
@@ -125,9 +125,9 @@ static __always_inline int atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static __always_inline int atomic_inc_and_test(atomic_t *v)
+static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
}
/**
@@ -139,9 +139,9 @@ static __always_inline int atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static __always_inline int atomic_add_negative(int i, atomic_t *v)
+static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
}
/**
@@ -171,6 +171,16 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
+static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ return xadd(&v->counter, i);
+}
+
+static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+ return xadd(&v->counter, -i);
+}
+
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
@@ -190,10 +200,29 @@ static inline void atomic_##op(int i, atomic_t *v) \
: "memory"); \
}
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int old, val = atomic_read(v); \
+ for (;;) { \
+ old = atomic_cmpxchg(v, val, val c_op i); \
+ if (old == val) \
+ break; \
+ val = old; \
+ } \
+ return old; \
+}
+
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op) \
+ ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &)
+ATOMIC_OPS(or , |)
+ATOMIC_OPS(xor, ^)
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP
/**
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index a984111135b1..71d7705fb303 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -320,10 +320,29 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
c = old; \
}
-ATOMIC64_OP(and, &)
-ATOMIC64_OP(or, |)
-ATOMIC64_OP(xor, ^)
+#define ATOMIC64_FETCH_OP(op, c_op) \
+static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
+{ \
+ long long old, c = 0; \
+ while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
+ c = old; \
+ return old; \
+}
+
+ATOMIC64_FETCH_OP(add, +)
+
+#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &)
+ATOMIC64_OPS(or, |)
+ATOMIC64_OPS(xor, ^)
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 037351022f54..89ed2f6ae2f7 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -70,9 +70,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
-static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
}
/**
@@ -109,9 +109,9 @@ static __always_inline void atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline int atomic64_dec_and_test(atomic64_t *v)
+static inline bool atomic64_dec_and_test(atomic64_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
}
/**
@@ -122,9 +122,9 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline int atomic64_inc_and_test(atomic64_t *v)
+static inline bool atomic64_inc_and_test(atomic64_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
}
/**
@@ -136,9 +136,9 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline int atomic64_add_negative(long i, atomic64_t *v)
+static inline bool atomic64_add_negative(long i, atomic64_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
}
/**
@@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
return atomic64_add_return(-i, v);
}
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
+{
+ return xadd(&v->counter, i);
+}
+
+static inline long atomic64_fetch_sub(long i, atomic64_t *v)
+{
+ return xadd(&v->counter, -i);
+}
+
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
@@ -180,7 +190,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
@@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
: "memory"); \
}
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
+#define ATOMIC64_FETCH_OP(op, c_op) \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
+{ \
+ long old, val = atomic64_read(v); \
+ for (;;) { \
+ old = atomic64_cmpxchg(v, val, val c_op i); \
+ if (old == val) \
+ break; \
+ val = old; \
+ } \
+ return old; \
+}
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &)
+ATOMIC64_OPS(or, |)
+ATOMIC64_OPS(xor, ^)
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bios_ebda.h b/arch/x86/include/asm/bios_ebda.h
index 2b00c776f223..4b7b8e71607e 100644
--- a/arch/x86/include/asm/bios_ebda.h
+++ b/arch/x86/include/asm/bios_ebda.h
@@ -17,7 +17,7 @@ static inline unsigned int get_bios_ebda(void)
return address; /* 0 means none */
}
-void reserve_ebda_region(void);
+void reserve_bios_regions(void);
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
/*
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 7766d1cf096e..68557f52b961 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -201,9 +201,9 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
}
/**
@@ -213,7 +213,7 @@ static __always_inline int test_and_set_bit(long nr, volatile unsigned long *add
*
* This is the same as test_and_set_bit on x86.
*/
-static __always_inline int
+static __always_inline bool
test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
@@ -228,13 +228,13 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
+ bool oldbit;
asm("bts %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR
+ CC_SET(c)
+ : CC_OUT(c) (oldbit), ADDR
: "Ir" (nr));
return oldbit;
}
@@ -247,9 +247,9 @@ static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *a
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
}
/**
@@ -268,25 +268,25 @@ static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *a
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
-static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
+ bool oldbit;
asm volatile("btr %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR
+ CC_SET(c)
+ : CC_OUT(c) (oldbit), ADDR
: "Ir" (nr));
return oldbit;
}
/* WARNING: non atomic and it can be reordered! */
-static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
+ bool oldbit;
asm volatile("btc %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR
+ CC_SET(c)
+ : CC_OUT(c) (oldbit), ADDR
: "Ir" (nr) : "memory");
return oldbit;
@@ -300,24 +300,24 @@ static __always_inline int __test_and_change_bit(long nr, volatile unsigned long
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
}
-static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
-static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
+static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
{
- int oldbit;
+ bool oldbit;
asm volatile("bt %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit)
+ CC_SET(c)
+ : CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
@@ -329,7 +329,7 @@ static __always_inline int variable_test_bit(long nr, volatile const unsigned lo
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static int test_bit(int nr, const volatile unsigned long *addr);
+static bool test_bit(int nr, const volatile unsigned long *addr);
#endif
#define test_bit(nr, addr) \
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 532f85e6651f..7b53743ed267 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -2,8 +2,7 @@
#define _ASM_X86_CHECKSUM_32_H
#include <linux/in6.h>
-
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 5a3b2c119ed0..a18806165fe4 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -40,6 +40,7 @@ typedef s32 compat_long_t;
typedef s64 __attribute__((aligned(4))) compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
+typedef u32 compat_u32;
typedef u64 __attribute__((aligned(4))) compat_u64;
typedef u32 compat_uptr_t;
@@ -181,6 +182,16 @@ typedef struct compat_siginfo {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
unsigned int _addr; /* faulting insn/memory ref. */
+ short int _addr_lsb; /* Valid LSB of the reported address. */
+ union {
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ compat_uptr_t _lower;
+ compat_uptr_t _upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ compat_u32 _pkey;
+ };
} _sigfault;
/* SIGPOLL */
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 678637ad7476..9b7fa6313f1a 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -16,8 +16,8 @@ extern void prefill_possible_map(void);
static inline void prefill_possible_map(void) {}
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
+#define cpu_acpi_id(cpu) 0
#define safe_smp_processor_id() 0
-#define stack_smp_processor_id() 0
#endif /* CONFIG_SMP */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 483fb547e3c0..1d2b69fc0ceb 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -49,43 +49,59 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define test_cpu_cap(c, bit) \
test_bit(bit, (unsigned long *)((c)->x86_capability))
-#define REQUIRED_MASK_BIT_SET(bit) \
- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0 )) || \
- (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1 )) || \
- (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2 )) || \
- (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3 )) || \
- (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4 )) || \
- (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5 )) || \
- (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6 )) || \
- (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7 )) || \
- (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8 )) || \
- (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9 )) || \
- (((bit)>>5)==10 && (1UL<<((bit)&31) & REQUIRED_MASK10)) || \
- (((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \
- (((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \
- (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \
- (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \
- (((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \
- (((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
-
-#define DISABLED_MASK_BIT_SET(bit) \
- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0 )) || \
- (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1 )) || \
- (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2 )) || \
- (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3 )) || \
- (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4 )) || \
- (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5 )) || \
- (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6 )) || \
- (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7 )) || \
- (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8 )) || \
- (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9 )) || \
- (((bit)>>5)==10 && (1UL<<((bit)&31) & DISABLED_MASK10)) || \
- (((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \
- (((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \
- (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \
- (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \
- (((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \
- (((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
+/*
+ * There are 32 bits/features in each mask word. The high bits
+ * (selected with (bit>>5) give us the word number and the low 5
+ * bits give us the bit/feature number inside the word.
+ * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can
+ * see if it is set in the mask word.
+ */
+#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \
+ (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word ))
+
+#define REQUIRED_MASK_BIT_SET(feature_bit) \
+ ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
+ REQUIRED_MASK_CHECK || \
+ BUILD_BUG_ON_ZERO(NCAPINTS != 18))
+
+#define DISABLED_MASK_BIT_SET(feature_bit) \
+ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
+ DISABLED_MASK_CHECK || \
+ BUILD_BUG_ON_ZERO(NCAPINTS != 18))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 4a413485f9eb..92a8308b96f6 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -225,7 +225,6 @@
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
@@ -301,10 +300,6 @@
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
#ifdef CONFIG_X86_32
/*
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
@@ -312,5 +307,7 @@
*/
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
#endif
-
+#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 911e9358ceb1..85599ad4d024 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -56,5 +56,7 @@
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
+#define DISABLED_MASK17 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 78d1e7467eae..d0bb76d81402 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -41,10 +41,9 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
/*
* Wrap all the virtual calls in a way that forces the parameters on the stack.
*/
-#define arch_efi_call_virt(f, args...) \
+#define arch_efi_call_virt(p, f, args...) \
({ \
- ((efi_##f##_t __attribute__((regparm(0)))*) \
- efi.systab->runtime->f)(args); \
+ ((efi_##f##_t __attribute__((regparm(0)))*) p->f)(args); \
})
#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
@@ -81,8 +80,8 @@ struct efi_scratch {
} \
})
-#define arch_efi_call_virt(f, args...) \
- efi_call((void *)efi.systab->runtime->f, args) \
+#define arch_efi_call_virt(p, f, args...) \
+ efi_call((void *)p->f, args) \
#define arch_efi_call_virt_teardown() \
({ \
@@ -125,7 +124,6 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern int __init efi_alloc_page_tables(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
-extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init old_map_region(efi_memory_desc_t *md);
extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_update_mappings(void);
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 31ac8e6d9f36..116b58347501 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -18,6 +18,7 @@
#include <asm/fpu/api.h>
#include <asm/fpu/xstate.h>
#include <asm/cpufeature.h>
+#include <asm/trace/fpu.h>
/*
* High level FPU state handling functions:
@@ -524,6 +525,7 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+ trace_x86_fpu_regs_deactivated(fpu);
}
/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
@@ -533,6 +535,7 @@ static inline void __fpregs_activate(struct fpu *fpu)
fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu);
+ trace_x86_fpu_regs_activated(fpu);
}
/*
@@ -604,11 +607,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
/* But leave fpu_fpregs_owner_ctx! */
old_fpu->fpregs_active = 0;
+ trace_x86_fpu_regs_deactivated(old_fpu);
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
new_fpu->counter++;
__fpregs_activate(new_fpu);
+ trace_x86_fpu_regs_activated(new_fpu);
prefetch(&new_fpu->state);
} else {
__fpregs_deactivate_hw();
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 36b90bbfc69f..48df486b02f9 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -122,6 +122,7 @@ enum xfeature {
#define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
#define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
+#define XFEATURE_MASK_PT (1 << XFEATURE_PT_UNIMPLEMENTED_SO_FAR)
#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
@@ -231,6 +232,12 @@ struct xstate_header {
} __attribute__((packed));
/*
+ * xstate_header.xcomp_bv[63] indicates that the extended_state_area
+ * is in compacted format.
+ */
+#define XCOMP_BV_COMPACTED_FORMAT ((u64)1 << 63)
+
+/*
* This is our most modern FPU state format, as saved by the XSAVE
* and restored by the XRSTOR instructions.
*
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 38951b0fcc5a..ae55a43e09c0 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -18,6 +18,9 @@
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
+/* Supervisor features */
+#define XFEATURE_MASK_SUPERVISOR (XFEATURE_MASK_PT)
+
/* Supported features which support lazy state saving */
#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
XFEATURE_MASK_SSE | \
@@ -39,7 +42,6 @@
#define REX_PREFIX
#endif
-extern unsigned int xstate_size;
extern u64 xfeatures_mask;
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
@@ -48,5 +50,9 @@ extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
void fpu__xstate_clear_all_cpu_caps(void);
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field);
-
+int using_compacted_format(void);
+int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
+ void __user *ubuf, struct xregs_state *xsave);
+int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
+ struct xregs_state *xsave);
#endif
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index 74a2e312e8a2..02aff0867211 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -48,6 +48,7 @@
/* AVX VEX prefixes */
#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
+#define INAT_PFX_EVEX 15 /* EVEX prefix */
#define INAT_LSTPFX_MAX 3
#define INAT_LGCPFX_MAX 11
@@ -89,6 +90,7 @@
#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
+#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
/* Attribute making macros for attribute tables */
#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
@@ -141,7 +143,13 @@ static inline int inat_last_prefix_id(insn_attr_t attr)
static inline int inat_is_vex_prefix(insn_attr_t attr)
{
attr &= INAT_PFX_MASK;
- return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3;
+ return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
+ attr == INAT_PFX_EVEX;
+}
+
+static inline int inat_is_evex_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
}
static inline int inat_is_vex3_prefix(insn_attr_t attr)
@@ -216,6 +224,11 @@ static inline int inat_accept_vex(insn_attr_t attr)
static inline int inat_must_vex(insn_attr_t attr)
{
- return attr & INAT_VEXONLY;
+ return attr & (INAT_VEXONLY | INAT_EVEXONLY);
+}
+
+static inline int inat_must_evex(insn_attr_t attr)
+{
+ return attr & INAT_EVEXONLY;
}
#endif
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index e7814b74caf8..b3e32b010ab1 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -91,6 +91,7 @@ struct insn {
#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
/* VEX bit fields */
+#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
#define X86_VEX2_M 1 /* VEX2.M always 1 */
#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
@@ -133,6 +134,13 @@ static inline int insn_is_avx(struct insn *insn)
return (insn->vex_prefix.value != 0);
}
+static inline int insn_is_evex(struct insn *insn)
+{
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+ return (insn->vex_prefix.nbytes == 4);
+}
+
/* Ensure this instruction is decoded completely */
static inline int insn_complete(struct insn *insn)
{
@@ -144,8 +152,10 @@ static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
{
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
return X86_VEX2_M;
- else
+ else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VEX */
return X86_VEX3_M(insn->vex_prefix.bytes[1]);
+ else /* EVEX */
+ return X86_EVEX_M(insn->vex_prefix.bytes[1]);
}
static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
new file mode 100644
index 000000000000..627719475457
--- /dev/null
+++ b/arch/x86/include/asm/intel-family.h
@@ -0,0 +1,68 @@
+#ifndef _ASM_X86_INTEL_FAMILY_H
+#define _ASM_X86_INTEL_FAMILY_H
+
+/*
+ * "Big Core" Processors (Branded as Core, Xeon, etc...)
+ *
+ * The "_X" parts are generally the EP and EX Xeons, or the
+ * "Extreme" ones, like Broadwell-E.
+ *
+ * Things ending in "2" are usually because we have no better
+ * name for them. There's no processor called "SILVERMONT2".
+ */
+
+#define INTEL_FAM6_CORE_YONAH 0x0E
+#define INTEL_FAM6_CORE2_MEROM 0x0F
+#define INTEL_FAM6_CORE2_MEROM_L 0x16
+#define INTEL_FAM6_CORE2_PENRYN 0x17
+#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
+
+#define INTEL_FAM6_NEHALEM 0x1E
+#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */
+#define INTEL_FAM6_NEHALEM_EP 0x1A
+#define INTEL_FAM6_NEHALEM_EX 0x2E
+#define INTEL_FAM6_WESTMERE 0x25
+#define INTEL_FAM6_WESTMERE_EP 0x2C
+#define INTEL_FAM6_WESTMERE_EX 0x2F
+
+#define INTEL_FAM6_SANDYBRIDGE 0x2A
+#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
+#define INTEL_FAM6_IVYBRIDGE 0x3A
+#define INTEL_FAM6_IVYBRIDGE_X 0x3E
+
+#define INTEL_FAM6_HASWELL_CORE 0x3C
+#define INTEL_FAM6_HASWELL_X 0x3F
+#define INTEL_FAM6_HASWELL_ULT 0x45
+#define INTEL_FAM6_HASWELL_GT3E 0x46
+
+#define INTEL_FAM6_BROADWELL_CORE 0x3D
+#define INTEL_FAM6_BROADWELL_XEON_D 0x56
+#define INTEL_FAM6_BROADWELL_GT3E 0x47
+#define INTEL_FAM6_BROADWELL_X 0x4F
+
+#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
+#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
+#define INTEL_FAM6_SKYLAKE_X 0x55
+#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
+#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
+
+/* "Small Core" Processors (Atom) */
+
+#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
+#define INTEL_FAM6_ATOM_LINCROFT 0x26
+#define INTEL_FAM6_ATOM_PENWELL 0x27
+#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
+#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
+#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
+#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
+#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
+#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
+#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
+
+/* Xeon Phi */
+
+#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
+
+#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 7c5af123bdbd..9d6b097aa73d 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -12,9 +12,17 @@
#define _ASM_X86_INTEL_MID_H
#include <linux/sfi.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
extern int intel_mid_pci_init(void);
+extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+
+#define INTEL_MID_PWR_LSS_OFFSET 4
+#define INTEL_MID_PWR_LSS_TYPE (1 << 7)
+
+extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev);
+
extern int get_gpio_by_name(const char *name);
extern void intel_scu_device_register(struct platform_device *pdev);
extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
@@ -34,13 +42,28 @@ struct devs_id {
void *(*get_platform_data)(void *info);
/* Custom handler for devices */
void (*device_handler)(struct sfi_device_table_entry *pentry,
- struct devs_id *dev);
+ struct devs_id *dev);
};
-#define sfi_device(i) \
- static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
+#define sfi_device(i) \
+ static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
__attribute__((__section__(".x86_intel_mid_dev.init"))) = &i
+/**
+* struct mid_sd_board_info - template for SD device creation
+* @name: identifies the driver
+* @bus_num: board-specific identifier for a given SD controller
+* @max_clk: the maximum frequency device supports
+* @platform_data: the particular data stored there is driver-specific
+*/
+struct mid_sd_board_info {
+ char name[SFI_NAME_LEN];
+ int bus_num;
+ unsigned short addr;
+ u32 max_clk;
+ void *platform_data;
+};
+
/*
* Medfield is the follow-up of Moorestown, it combines two chip solution into
* one. Other than that it also added always-on and constant tsc and lapic
@@ -60,7 +83,7 @@ extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
/**
* struct intel_mid_ops - Interface between intel-mid & sub archs
* @arch_setup: arch_setup function to re-initialize platform
- * structures (x86_init, x86_platform_init)
+ * structures (x86_init, x86_platform_init)
*
* This structure can be extended if any new interface is required
* between intel-mid & its sub arch files.
@@ -70,20 +93,20 @@ struct intel_mid_ops {
};
/* Helper API's for INTEL_MID_OPS_INIT */
-#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
- [cpuid] = get_##cpuname##_ops
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
+ [cpuid] = get_##cpuname##_ops
/* Maximum number of CPU ops */
-#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
/*
* For every new cpu addition, a weak get_<cpuname>_ops() function needs be
* declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
*/
-#define INTEL_MID_OPS_INIT {\
- DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
- DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
- DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+#define INTEL_MID_OPS_INIT { \
+ DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
+ DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
+ DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
};
#ifdef CONFIG_X86_INTEL_MID
@@ -100,8 +123,8 @@ static inline bool intel_mid_has_msic(void)
#else /* !CONFIG_X86_INTEL_MID */
-#define intel_mid_identify_cpu() (0)
-#define intel_mid_has_msic() (0)
+#define intel_mid_identify_cpu() 0
+#define intel_mid_has_msic() 0
#endif /* !CONFIG_X86_INTEL_MID */
@@ -117,35 +140,38 @@ extern enum intel_mid_timer_options intel_mid_timer_options;
* Penwell uses spread spectrum clock, so the freq number is not exactly
* the same as reported by MSR based on SDM.
*/
-#define FSB_FREQ_83SKU 83200
-#define FSB_FREQ_100SKU 99840
-#define FSB_FREQ_133SKU 133000
+#define FSB_FREQ_83SKU 83200
+#define FSB_FREQ_100SKU 99840
+#define FSB_FREQ_133SKU 133000
-#define FSB_FREQ_167SKU 167000
-#define FSB_FREQ_200SKU 200000
-#define FSB_FREQ_267SKU 267000
-#define FSB_FREQ_333SKU 333000
-#define FSB_FREQ_400SKU 400000
+#define FSB_FREQ_167SKU 167000
+#define FSB_FREQ_200SKU 200000
+#define FSB_FREQ_267SKU 267000
+#define FSB_FREQ_333SKU 333000
+#define FSB_FREQ_400SKU 400000
/* Bus Select SoC Fuse value */
-#define BSEL_SOC_FUSE_MASK 0x7
-#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */
-#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */
-#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */
+#define BSEL_SOC_FUSE_MASK 0x7
+/* FSB 133MHz */
+#define BSEL_SOC_FUSE_001 0x1
+/* FSB 100MHz */
+#define BSEL_SOC_FUSE_101 0x5
+/* FSB 83MHz */
+#define BSEL_SOC_FUSE_111 0x7
-#define SFI_MTMR_MAX_NUM 8
-#define SFI_MRTC_MAX 8
+#define SFI_MTMR_MAX_NUM 8
+#define SFI_MRTC_MAX 8
extern void intel_scu_devices_create(void);
extern void intel_scu_devices_destroy(void);
/* VRTC timer */
-#define MRST_VRTC_MAP_SZ (1024)
-/*#define MRST_VRTC_PGOFFSET (0xc00) */
+#define MRST_VRTC_MAP_SZ 1024
+/* #define MRST_VRTC_PGOFFSET 0xc00 */
extern void intel_mid_rtc_init(void);
-/* the offset for the mapping of global gpio pin to irq */
-#define INTEL_MID_IRQ_OFFSET 0x100
+/* The offset for the mapping of global gpio pin to irq */
+#define INTEL_MID_IRQ_OFFSET 0x100
#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
new file mode 100644
index 000000000000..2674ee3de748
--- /dev/null
+++ b/arch/x86/include/asm/kaslr.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_KASLR_H_
+#define _ASM_KASLR_H_
+
+unsigned long kaslr_get_random_long(const char *purpose);
+
+#ifdef CONFIG_RANDOMIZE_MEMORY
+extern unsigned long page_offset_base;
+extern unsigned long vmalloc_base;
+
+void kernel_randomize_memory(void);
+#else
+static inline void kernel_randomize_memory(void) { }
+#endif /* CONFIG_RANDOMIZE_MEMORY */
+
+#endif
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index e5f5dc9787d5..1ef9d581b5d9 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
unsigned long *sp, unsigned long bp);
+extern void show_stack_regs(struct pt_regs *regs);
extern void __show_regs(struct pt_regs *regs, int all);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long, struct pt_regs *, int signr);
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4421b5da409d..d1d1e5094c28 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t;
#define RELATIVECALL_OPCODE 0xe8
#define RELATIVE_ADDR_SIZE 4
#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR) \
- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
- THREAD_SIZE - (unsigned long)(ADDR))) \
- ? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + \
- THREAD_SIZE - (unsigned long)(ADDR)))
+#define CUR_STACK_SIZE(ADDR) \
+ (current_top_of_stack() - (unsigned long)(ADDR))
+#define MIN_STACK_SIZE(ADDR) \
+ (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \
+ MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
#define flush_insn_slot(p) do { } while (0)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e0fbe7e70dc1..69e62862b622 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -27,6 +27,7 @@
#include <linux/irqbypass.h>
#include <linux/hyperv.h>
+#include <asm/apic.h>
#include <asm/pvclock-abi.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
@@ -1368,4 +1369,14 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+static inline int kvm_cpu_get_apicid(int mps_cpu)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+ return __default_cpu_present_to_apicid(mps_cpu);
+#else
+ WARN_ON_ONCE(1);
+ return BAD_APICID;
+#endif
+}
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index a7f9181f63f3..ed80003ce3e2 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -22,7 +22,6 @@
#define _ASM_X86_LIVEPATCH_H
#include <asm/setup.h>
-#include <linux/module.h>
#include <linux/ftrace.h>
static inline int klp_check_compiler_support(void)
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 4ad6560847b1..7511978093eb 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -50,9 +50,9 @@ static inline void local_sub(long i, local_t *l)
* true if the result is zero, or false for all
* other cases.
*/
-static inline int local_sub_and_test(long i, local_t *l)
+static inline bool local_sub_and_test(long i, local_t *l)
{
- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
+ GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
}
/**
@@ -63,9 +63,9 @@ static inline int local_sub_and_test(long i, local_t *l)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline int local_dec_and_test(local_t *l)
+static inline bool local_dec_and_test(local_t *l)
{
- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
+ GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
}
/**
@@ -76,9 +76,9 @@ static inline int local_dec_and_test(local_t *l)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline int local_inc_and_test(local_t *l)
+static inline bool local_inc_and_test(local_t *l)
{
- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
+ GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
}
/**
@@ -90,9 +90,9 @@ static inline int local_inc_and_test(local_t *l)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline int local_add_negative(long i, local_t *l)
+static inline bool local_add_negative(long i, local_t *l)
{
- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
+ GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
}
/**
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 9d3a96c4da78..da0d81fa0b54 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -133,40 +133,14 @@ static inline unsigned int x86_cpuid_family(void)
#ifdef CONFIG_MICROCODE
extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
-extern int __init save_microcode_in_initrd(void);
void reload_early_microcode(void);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
#else
static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
-static inline int __init save_microcode_in_initrd(void) { return 0; }
static inline void reload_early_microcode(void) { }
static inline bool
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
#endif
-static inline unsigned long get_initrd_start(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- return initrd_start;
-#else
- return 0;
-#endif
-}
-
-static inline unsigned long get_initrd_start_addr(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
-#ifdef CONFIG_X86_32
- unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
-
- return (unsigned long)__pa_nodebug(*initrd_start_p);
-#else
- return get_initrd_start();
-#endif
-#else /* CONFIG_BLK_DEV_INITRD */
- return 0;
-#endif
-}
-
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index adfc847a395e..15eb75484cc0 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -62,7 +62,6 @@ extern int apply_microcode_amd(int cpu);
extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
#define PATCH_MAX_SIZE PAGE_SIZE
-extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
#ifdef CONFIG_MICROCODE_AMD
extern void __init load_ucode_amd_bsp(unsigned int family);
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 603417f8dd6c..5e69154c9f07 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -70,9 +70,4 @@ static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL;
static inline void reload_ucode_intel(void) {}
#endif
-#ifdef CONFIG_HOTPLUG_CPU
-extern int save_mc_for_early(u8 *mc);
-#else
-static inline int save_mc_for_early(u8 *mc) { return 0; }
-#endif
#endif /* _ASM_X86_MICROCODE_INTEL_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 5a73a9c62c39..56f4c6676b29 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -64,8 +64,6 @@
#define MSR_OFFCORE_RSP_0 0x000001a6
#define MSR_OFFCORE_RSP_1 0x000001a7
-#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad
-#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae
#define MSR_TURBO_RATIO_LIMIT 0x000001ad
#define MSR_TURBO_RATIO_LIMIT1 0x000001ae
#define MSR_TURBO_RATIO_LIMIT2 0x000001af
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 7dc1d8fef7fd..b5fee97813cd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr,
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
- if (msr_tracepoint_active(__tracepoint_read_msr))
+ if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
@@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "memory");
- if (msr_tracepoint_active(__tracepoint_read_msr))
+ if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
index 85e6cda45a02..e9355a84fc67 100644
--- a/arch/x86/include/asm/mutex_32.h
+++ b/arch/x86/include/asm/mutex_32.h
@@ -101,7 +101,7 @@ static inline int __mutex_fastpath_trylock(atomic_t *count,
int (*fail_fn)(atomic_t *))
{
/* cmpxchg because it never induces a false contention state. */
- if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
return 1;
return 0;
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 07537a44216e..d9850758464e 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -118,10 +118,10 @@ do { \
static inline int __mutex_fastpath_trylock(atomic_t *count,
int (*fail_fn)(atomic_t *))
{
- if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
return 1;
- else
- return 0;
+
+ return 0;
}
#endif /* _ASM_X86_MUTEX_64_H */
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 0deeb2d26df7..f37f2d8a2989 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -97,7 +97,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
*/
static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
- if (!current_set_polling_and_test()) {
+ if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
mb();
clflush((void *)&current_thread_info()->flags);
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index 3a52ee0e726d..3bae4969ac65 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -13,7 +13,8 @@
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
-#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#define __PAGE_OFFSET_BASE _AC(CONFIG_PAGE_OFFSET, UL)
+#define __PAGE_OFFSET __PAGE_OFFSET_BASE
#define __START_KERNEL_map __PAGE_OFFSET
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index d5c2f8b40faa..9215e0527647 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,6 +1,10 @@
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
+#ifndef __ASSEMBLY__
+#include <asm/kaslr.h>
+#endif
+
#ifdef CONFIG_KASAN
#define KASAN_STACK_ORDER 1
#else
@@ -32,7 +36,12 @@
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
-#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL)
+#ifdef CONFIG_RANDOMIZE_MEMORY
+#define __PAGE_OFFSET page_offset_base
+#else
+#define __PAGE_OFFSET __PAGE_OFFSET_BASE
+#endif /* CONFIG_RANDOMIZE_MEMORY */
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e0ba66ca68c6..e02e3f80d363 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -510,14 +510,15 @@ do { \
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
- int old__; \
- asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (var) \
+ bool old__; \
+ asm volatile("btr %2,"__percpu_arg(1)"\n\t" \
+ CC_SET(c) \
+ : CC_OUT(c) (old__), "+m" (var) \
: "dIr" (bit)); \
old__; \
})
-static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
+static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
const unsigned long __percpu *addr)
{
unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
@@ -529,14 +530,14 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
#endif
}
-static inline int x86_this_cpu_variable_test_bit(int nr,
+static inline bool x86_this_cpu_variable_test_bit(int nr,
const unsigned long __percpu *addr)
{
- int oldbit;
+ bool oldbit;
asm volatile("bt "__percpu_arg(2)",%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit)
+ CC_SET(c)
+ : CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index bf7f8b55b0f9..b6d425999f99 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -81,7 +81,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *page;
- page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
+
+ if (mm == &init_mm)
+ gfp &= ~__GFP_ACCOUNT;
+ page = alloc_pages(gfp, 0);
if (!page)
return NULL;
if (!pgtable_pmd_page_ctor(page)) {
@@ -125,7 +129,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ gfp_t gfp = GFP_KERNEL_ACCOUNT;
+
+ if (mm == &init_mm)
+ gfp &= ~__GFP_ACCOUNT;
+ return (pud_t *)get_zeroed_page(gfp);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1a27396b6ea0..437feb436efa 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -480,7 +480,7 @@ pte_t *populate_extra_pte(unsigned long vaddr);
static inline int pte_none(pte_t pte)
{
- return !pte.pte;
+ return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
}
#define __HAVE_ARCH_PTE_SAME
@@ -552,7 +552,8 @@ static inline int pmd_none(pmd_t pmd)
{
/* Only check low word on 32-bit platforms, since it might be
out of sync with upper half. */
- return (unsigned long)native_pmd_val(pmd) == 0;
+ unsigned long val = native_pmd_val(pmd);
+ return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
}
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
@@ -616,7 +617,7 @@ static inline unsigned long pages_to_mb(unsigned long npg)
#if CONFIG_PGTABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
- return native_pud_val(pud) == 0;
+ return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
}
static inline int pud_present(pud_t pud)
@@ -694,6 +695,12 @@ static inline int pgd_bad(pgd_t pgd)
static inline int pgd_none(pgd_t pgd)
{
+ /*
+ * There is no need to do a workaround for the KNL stray
+ * A/D bit erratum here. PGDs only point to page tables
+ * except on 32-bit non-PAE which is not supported on
+ * KNL.
+ */
return !native_pgd_val(pgd);
}
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
@@ -729,6 +736,23 @@ extern int direct_gbpages;
void init_mem_mapping(void);
void early_alloc_pgt_buf(void);
+#ifdef CONFIG_X86_64
+/* Realmode trampoline initialization. */
+extern pgd_t trampoline_pgd_entry;
+static inline void __meminit init_trampoline_default(void)
+{
+ /* Default trampoline pgd value */
+ trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
+}
+# ifdef CONFIG_RANDOMIZE_MEMORY
+void __meminit init_trampoline(void);
+# else
+# define init_trampoline init_trampoline_default
+# endif
+#else
+static inline void init_trampoline(void) { }
+#endif
+
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 2ee781114d34..7e8ec7ae10fa 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -140,18 +140,32 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */
-/* Encode and de-code a swap entry */
+/*
+ * Encode and de-code a swap entry
+ *
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
+ * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes. We need to start storing swap entries above
+ * there. We also need to avoid using A and D because of an
+ * erratum where they can be incorrectly set by hardware on
+ * non-present PTEs.
+ */
+#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
#define SWP_TYPE_BITS 5
-#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+/* Place the offset above the type: */
+#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
-#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
+#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
& ((1U << SWP_TYPE_BITS) - 1))
-#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
+#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
- ((type) << (_PAGE_BIT_PRESENT + 1)) \
- | ((offset) << SWP_OFFSET_SHIFT) })
+ ((type) << (SWP_TYPE_FIRST_BIT)) \
+ | ((offset) << SWP_OFFSET_FIRST_BIT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index e6844dfb4471..6fdef9eef2d5 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -5,6 +5,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/kaslr.h>
/*
* These are used to make use of C type-checking..
@@ -53,10 +54,16 @@ typedef struct { pteval_t pte; } pte_t;
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
-#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
-#define VMALLOC_START _AC(0xffffc90000000000, UL)
-#define VMALLOC_END _AC(0xffffe8ffffffffff, UL)
-#define VMEMMAP_START _AC(0xffffea0000000000, UL)
+#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+#define VMALLOC_SIZE_TB _AC(32, UL)
+#define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
+#define VMEMMAP_START _AC(0xffffea0000000000, UL)
+#ifdef CONFIG_RANDOMIZE_MEMORY
+#define VMALLOC_START vmalloc_base
+#else
+#define VMALLOC_START __VMALLOC_BASE
+#endif /* CONFIG_RANDOMIZE_MEMORY */
+#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 7b5efe264eff..f1218f512f62 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -70,6 +70,12 @@
_PAGE_PKEY_BIT2 | \
_PAGE_PKEY_BIT3)
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+#define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
+#else
+#define _PAGE_KNL_ERRATUM_MASK 0
+#endif
+
#ifdef CONFIG_KMEMCHECK
#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
#else
@@ -475,8 +481,6 @@ extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags);
-void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
- unsigned numpages);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index fbc5e92e1ecc..643eba42d620 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -26,13 +26,11 @@
* @n: length of the copy in bytes
*
* Copy data to persistent memory media via non-temporal stores so that
- * a subsequent arch_wmb_pmem() can flush cpu and memory controller
- * write buffers to guarantee durability.
+ * a subsequent pmem driver flush operation will drain posted write queues.
*/
-static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
- size_t n)
+static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
{
- int unwritten;
+ int rem;
/*
* We are copying between two kernel buffers, if
@@ -40,59 +38,36 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
* fault) we would have already reported a general protection fault
* before the WARN+BUG.
*/
- unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
- (void __user *) src, n);
- if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
- __func__, dst, src, unwritten))
+ rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
+ if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
+ __func__, dst, src, rem))
BUG();
}
-static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
- size_t n)
+static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
{
if (static_cpu_has(X86_FEATURE_MCE_RECOVERY))
- return memcpy_mcsafe(dst, (void __force *) src, n);
- memcpy(dst, (void __force *) src, n);
+ return memcpy_mcsafe(dst, src, n);
+ memcpy(dst, src, n);
return 0;
}
/**
- * arch_wmb_pmem - synchronize writes to persistent memory
- *
- * After a series of arch_memcpy_to_pmem() operations this drains data
- * from cpu write buffers and any platform (memory controller) buffers
- * to ensure that written data is durable on persistent memory media.
- */
-static inline void arch_wmb_pmem(void)
-{
- /*
- * wmb() to 'sfence' all previous writes such that they are
- * architecturally visible to 'pcommit'. Note, that we've
- * already arranged for pmem writes to avoid the cache via
- * arch_memcpy_to_pmem().
- */
- wmb();
- pcommit_sfence();
-}
-
-/**
* arch_wb_cache_pmem - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
- * instruction. This function requires explicit ordering with an
- * arch_wmb_pmem() call.
+ * instruction.
*/
-static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
+static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
- void *vaddr = (void __force *)addr;
- void *vend = vaddr + size;
+ void *vend = addr + size;
void *p;
- for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
+ for (p = (void *)((unsigned long)addr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
@@ -113,16 +88,14 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * This function requires explicit ordering with an arch_wmb_pmem() call.
*/
-static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
- void *vaddr = (void __force *)addr;
size_t len;
/* TODO: skip the write-back by always using non-temporal stores */
- len = copy_from_iter_nocache(vaddr, bytes, i);
+ len = copy_from_iter_nocache(addr, bytes, i);
if (__iter_needs_pmem_wb(i))
arch_wb_cache_pmem(addr, bytes);
@@ -136,28 +109,16 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with an arch_wmb_pmem() call.
*/
-static inline void arch_clear_pmem(void __pmem *addr, size_t size)
+static inline void arch_clear_pmem(void *addr, size_t size)
{
- void *vaddr = (void __force *)addr;
-
- memset(vaddr, 0, size);
+ memset(addr, 0, size);
arch_wb_cache_pmem(addr, size);
}
-static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
+static inline void arch_invalidate_pmem(void *addr, size_t size)
{
- clflush_cache_range((void __force *) addr, size);
-}
-
-static inline bool __arch_has_wmb_pmem(void)
-{
- /*
- * We require that wmb() be an 'sfence', that is only guaranteed on
- * 64-bit builds
- */
- return static_cpu_has(X86_FEATURE_PCOMMIT);
+ clflush_cache_range(addr, size);
}
#endif /* CONFIG_ARCH_HAS_PMEM_API */
#endif /* __ASM_X86_PMEM_H__ */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index d397deb58146..17f218645701 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -81,7 +81,7 @@ static __always_inline void __preempt_count_sub(int val)
*/
static __always_inline bool __preempt_count_dec_and_test(void)
{
- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
}
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 62c6cc3cc5d3..63def9537a2d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -367,10 +367,15 @@ DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
#endif /* X86_64 */
-extern unsigned int xstate_size;
+extern unsigned int fpu_kernel_xstate_size;
+extern unsigned int fpu_user_xstate_size;
struct perf_event;
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
struct thread_struct {
/* Cached TLS descriptors: */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
@@ -419,6 +424,11 @@ struct thread_struct {
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
+ mm_segment_t addr_limit;
+
+ unsigned int sig_on_uaccess_err:1;
+ unsigned int uaccess_err:1; /* uaccess failed */
+
/* Floating point and extended processor state */
struct fpu fpu;
/*
@@ -490,11 +500,6 @@ static inline void load_sp0(struct tss_struct *tss,
#define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT */
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -716,6 +721,7 @@ static inline void spin_lock_prefetch(const void *x)
.sp0 = TOP_OF_INIT_STACK, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
+ .addr_limit = KERNEL_DS, \
}
extern unsigned long thread_saved_pc(struct task_struct *tsk);
@@ -765,8 +771,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE_MAX
-#define INIT_THREAD { \
- .sp0 = TOP_OF_INIT_STACK \
+#define INIT_THREAD { \
+ .sp0 = TOP_OF_INIT_STACK, \
+ .addr_limit = KERNEL_DS, \
}
/*
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6271281f947d..2b5d686ea9f3 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -83,12 +83,6 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code);
-extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch);
-extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch,
- unsigned long phase1_result);
-
-extern long syscall_trace_enter(struct pt_regs *);
-
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index fdcc04020636..7c1c89598688 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -69,29 +69,22 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
}
static __always_inline
-u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
-{
- u64 delta = rdtsc_ordered() - src->tsc_timestamp;
- return pvclock_scale_delta(delta, src->tsc_to_system_mul,
- src->tsc_shift);
-}
-
-static __always_inline
unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
cycle_t *cycles, u8 *flags)
{
unsigned version;
- cycle_t ret, offset;
- u8 ret_flags;
+ cycle_t offset;
+ u64 delta;
version = src->version;
+ /* Make the latest version visible */
+ smp_rmb();
- offset = pvclock_get_nsec_offset(src);
- ret = src->system_time + offset;
- ret_flags = src->flags;
-
- *cycles = ret;
- *flags = ret_flags;
+ delta = rdtsc_ordered() - src->tsc_timestamp;
+ offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+ *cycles = src->system_time + offset;
+ *flags = src->flags;
return version;
}
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 4916144e3c42..fac9a5c0abe9 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -99,5 +99,7 @@
#define REQUIRED_MASK14 0
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 0
+#define REQUIRED_MASK17 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 8f7866a5b9a4..661dd305694a 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -1,11 +1,13 @@
#ifndef _ASM_X86_RMWcc
#define _ASM_X86_RMWcc
-#ifdef CC_HAVE_ASM_GOTO
+#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
+
+/* Use asm goto */
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
- asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
+ asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \
: : "m" (var), ## __VA_ARGS__ \
: "memory" : cc_label); \
return 0; \
@@ -19,15 +21,17 @@ cc_label: \
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
-#else /* !CC_HAVE_ASM_GOTO */
+#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+
+/* Use flags output or a set instruction */
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
- char c; \
- asm volatile (fullop "; set" cc " %1" \
- : "+m" (var), "=qm" (c) \
+ bool c; \
+ asm volatile (fullop ";" CC_SET(cc) \
+ : "+m" (var), CC_OUT(cc) (c) \
: __VA_ARGS__ : "memory"); \
- return c != 0; \
+ return c; \
} while (0)
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
@@ -36,6 +40,6 @@ do { \
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
-#endif /* CC_HAVE_ASM_GOTO */
+#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
#endif /* _ASM_X86_RMWcc */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 453744c1d347..8dbc762ad132 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -77,7 +77,7 @@ static inline void __down_read(struct rw_semaphore *sem)
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline bool __down_read_trylock(struct rw_semaphore *sem)
{
long result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
@@ -93,7 +93,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
: "+m" (sem->count), "=&a" (result), "=&r" (tmp)
: "i" (RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
- return result >= 0 ? 1 : 0;
+ return result >= 0;
}
/*
@@ -134,9 +134,10 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline bool __down_write_trylock(struct rw_semaphore *sem)
{
- long result, tmp;
+ bool result;
+ long tmp0, tmp1;
asm volatile("# beginning __down_write_trylock\n\t"
" mov %0,%1\n\t"
"1:\n\t"
@@ -144,14 +145,14 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/* was the active mask 0 before? */
" jnz 2f\n\t"
" mov %1,%2\n\t"
- " add %3,%2\n\t"
+ " add %4,%2\n\t"
LOCK_PREFIX " cmpxchg %2,%0\n\t"
" jnz 1b\n\t"
"2:\n\t"
- " sete %b1\n\t"
- " movzbl %b1, %k1\n\t"
+ CC_SET(e)
"# ending __down_write_trylock\n\t"
- : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
+ CC_OUT(e) (result)
: "er" (RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc");
return result;
@@ -213,23 +214,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
: "memory", "cc");
}
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
- : "+m" (sem->count)
- : "er" (delta));
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
- return delta + xadd(&sem->count, delta);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_X86_RWSEM_H */
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 2138c9ae19ee..dd1e7d6387ab 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -81,9 +81,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
static inline int __gen_sigismember(sigset_t *set, int _sig)
{
- int ret;
- asm("btl %2,%1\n\tsbbl %0,%0"
- : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+ unsigned char ret;
+ asm("btl %2,%1\n\tsetc %0"
+ : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
return ret;
}
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 66b057306f40..ebd0c164cd4e 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -33,6 +33,7 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
}
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
@@ -135,6 +136,7 @@ int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
int common_cpu_die(unsigned int cpu);
void native_cpu_die(unsigned int cpu);
+void hlt_play_dead(void);
void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
@@ -147,6 +149,7 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
void smp_store_boot_cpu_info(void);
void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
+#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
#else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu) wbinvd()
@@ -172,12 +175,6 @@ extern int safe_smp_processor_id(void);
#elif defined(CONFIG_X86_64_SMP)
#define raw_smp_processor_id() (this_cpu_read(cpu_number))
-#define stack_smp_processor_id() \
-({ \
- struct thread_info *ti; \
- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
- ti->cpu; \
-})
#define safe_smp_processor_id() smp_processor_id()
#endif
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index d96d04377765..587d7914ea4b 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -253,52 +253,6 @@ static inline void clwb(volatile void *__p)
: [pax] "a" (p));
}
-/**
- * pcommit_sfence() - persistent commit and fence
- *
- * The PCOMMIT instruction ensures that data that has been flushed from the
- * processor's cache hierarchy with CLWB, CLFLUSHOPT or CLFLUSH is accepted to
- * memory and is durable on the DIMM. The primary use case for this is
- * persistent memory.
- *
- * This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT
- * with appropriate fencing.
- *
- * Example:
- * void flush_and_commit_buffer(void *vaddr, unsigned int size)
- * {
- * unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
- * void *vend = vaddr + size;
- * void *p;
- *
- * for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
- * p < vend; p += boot_cpu_data.x86_clflush_size)
- * clwb(p);
- *
- * // SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes
- * // MFENCE via mb() also works
- * wmb();
- *
- * // PCOMMIT and the required SFENCE for ordering
- * pcommit_sfence();
- * }
- *
- * After this function completes the data pointed to by 'vaddr' has been
- * accepted to memory and will be durable if the 'vaddr' points to persistent
- * memory.
- *
- * PCOMMIT must always be ordered by an MFENCE or SFENCE, so to help simplify
- * things we include both the PCOMMIT and the required SFENCE in the
- * alternatives generated by pcommit_sfence().
- */
-static inline void pcommit_sfence(void)
-{
- alternative(ASM_NOP7,
- ".byte 0x66, 0x0f, 0xae, 0xf8\n\t" /* pcommit */
- "sfence",
- X86_FEATURE_PCOMMIT);
-}
-
#define nop() asm volatile ("nop")
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 7c247e7404be..0944218af9e2 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -14,7 +14,7 @@ extern int kstack_depth_to_print;
struct thread_info;
struct stacktrace_ops;
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+typedef unsigned long (*walk_stack_t)(struct task_struct *task,
unsigned long *stack,
unsigned long bp,
const struct stacktrace_ops *ops,
@@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
int *graph);
extern unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index f28a24b51dc7..cbf8847d02a0 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -79,10 +79,10 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
+ unsigned char oldbit;
- asm volatile("lock; bts %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "+m" (ADDR)
+ asm volatile("lock; bts %2,%1\n\tsetc %0"
+ : "=qm" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
}
@@ -97,10 +97,10 @@ static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
+ unsigned char oldbit;
- asm volatile("lock; btr %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "+m" (ADDR)
+ asm volatile("lock; btr %2,%1\n\tsetc %0"
+ : "=qm" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
}
@@ -115,10 +115,10 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
+ unsigned char oldbit;
- asm volatile("lock; btc %2,%1\n\tsbbl %0,%0"
- : "=r" (oldbit), "+m" (ADDR)
+ asm volatile("lock; btc %2,%1\n\tsetc %0"
+ : "=qm" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
}
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 30c133ac05cd..89bff044a6f5 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -57,9 +57,6 @@ struct thread_info {
__u32 flags; /* low level flags */
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
- mm_segment_t addr_limit;
- unsigned int sig_on_uaccess_error:1;
- unsigned int uaccess_err:1; /* uaccess failed */
};
#define INIT_THREAD_INFO(tsk) \
@@ -67,7 +64,6 @@ struct thread_info {
.task = &tsk, \
.flags = 0, \
.cpu = 0, \
- .addr_limit = KERNEL_DS, \
}
#define init_thread_info (init_thread_union.thread_info)
@@ -186,11 +182,6 @@ static inline unsigned long current_stack_pointer(void)
# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
#endif
-/* Load thread_info address into "reg" */
-#define GET_THREAD_INFO(reg) \
- _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
- _ASM_SUB $(THREAD_SIZE),reg ;
-
/*
* ASM operand which evaluates to a 'thread_info' address of
* the current task, if it is known that "reg" is exactly "off"
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 7f991bd5031b..cf75871d2f81 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -25,16 +25,6 @@
#ifndef _ASM_X86_TOPOLOGY_H
#define _ASM_X86_TOPOLOGY_H
-#ifdef CONFIG_X86_32
-# ifdef CONFIG_SMP
-# define ENABLE_TOPO_DEFINES
-# endif
-#else
-# ifdef CONFIG_SMP
-# define ENABLE_TOPO_DEFINES
-# endif
-#endif
-
/*
* to preserve the visibility of NUMA_NO_NODE definition,
* moved to there from here. May be used independent of
@@ -46,6 +36,7 @@
#include <linux/cpumask.h>
#include <asm/mpspec.h>
+#include <asm/percpu.h>
/* Mappings between logical cpu number and node number */
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
@@ -123,12 +114,20 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
-#ifdef ENABLE_TOPO_DEFINES
+#ifdef CONFIG_SMP
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
extern unsigned int __max_logical_packages;
#define topology_max_packages() (__max_logical_packages)
+
+extern int __max_smt_threads;
+
+static inline int topology_max_smt_threads(void)
+{
+ return __max_smt_threads;
+}
+
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
extern int topology_phys_to_logical_pkg(unsigned int pkg);
#else
@@ -136,6 +135,7 @@ extern int topology_phys_to_logical_pkg(unsigned int pkg);
static inline int
topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
+static inline int topology_max_smt_threads(void) { return 1; }
#endif
static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
new file mode 100644
index 000000000000..9217ab1f5bf6
--- /dev/null
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -0,0 +1,119 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM x86_fpu
+
+#if !defined(_TRACE_FPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FPU_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(x86_fpu,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu),
+
+ TP_STRUCT__entry(
+ __field(struct fpu *, fpu)
+ __field(bool, fpregs_active)
+ __field(bool, fpstate_active)
+ __field(int, counter)
+ __field(u64, xfeatures)
+ __field(u64, xcomp_bv)
+ ),
+
+ TP_fast_assign(
+ __entry->fpu = fpu;
+ __entry->fpregs_active = fpu->fpregs_active;
+ __entry->fpstate_active = fpu->fpstate_active;
+ __entry->counter = fpu->counter;
+ if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+ __entry->xfeatures = fpu->state.xsave.header.xfeatures;
+ __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
+ }
+ ),
+ TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+ __entry->fpu,
+ __entry->fpregs_active,
+ __entry->fpstate_active,
+ __entry->counter,
+ __entry->xfeatures,
+ __entry->xcomp_bv
+ )
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_state,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_before_save,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_after_save,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_before_restore,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_after_restore,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_regs_activated,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_regs_deactivated,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_activate_state,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_deactivate_state,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_init_state,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_dropped,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_copy_src,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_copy_dst,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_xstate_check_failed,
+ TP_PROTO(struct fpu *fpu),
+ TP_ARGS(fpu)
+);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH asm/trace/
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE fpu
+#endif /* _TRACE_FPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 7428697c5b8d..33b6365c22fe 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -35,7 +35,7 @@ extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
-extern int check_tsc_disabled(void);
+extern unsigned long native_calibrate_cpu(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
@@ -52,7 +52,6 @@ extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);
-/* MSR based TSC calibration for Intel Atom SoC platforms */
-unsigned long try_msr_calibrate_tsc(void);
+unsigned long cpu_khz_from_msr(void);
#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 2982387ba817..c03bfb68c503 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -29,12 +29,12 @@
#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
#define get_ds() (KERNEL_DS)
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
+#define get_fs() (current->thread.addr_limit)
+#define set_fs(x) (current->thread.addr_limit = (x))
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define user_addr_max() (current_thread_info()->addr_limit.seg)
+#define user_addr_max() (current->thread.addr_limit.seg)
#define __addr_ok(addr) \
((unsigned long __force)(addr) < user_addr_max())
@@ -342,7 +342,26 @@ do { \
} while (0)
#ifdef CONFIG_X86_32
-#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
+#define __get_user_asm_u64(x, ptr, retval, errret) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ asm volatile(ASM_STAC "\n" \
+ "1: movl %2,%%eax\n" \
+ "2: movl %3,%%edx\n" \
+ "3: " ASM_CLAC "\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: mov %4,%0\n" \
+ " xorl %%eax,%%eax\n" \
+ " xorl %%edx,%%edx\n" \
+ " jmp 3b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=r" (retval), "=A"(x) \
+ : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
+ "i" (errret), "0" (retval)); \
+})
+
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else
#define __get_user_asm_u64(x, ptr, retval, errret) \
@@ -429,7 +448,7 @@ do { \
#define __get_user_nocheck(x, ptr, size) \
({ \
int __gu_err; \
- unsigned long __gu_val; \
+ __inttype(*(ptr)) __gu_val; \
__uaccess_begin(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
__uaccess_end(); \
@@ -468,13 +487,13 @@ struct __large_struct { unsigned long buf[100]; };
* uaccess_try and catch
*/
#define uaccess_try do { \
- current_thread_info()->uaccess_err = 0; \
+ current->thread.uaccess_err = 0; \
__uaccess_begin(); \
barrier();
#define uaccess_catch(err) \
__uaccess_end(); \
- (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
+ (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
} while (0)
/**
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 2b19caa4081c..32712a925f26 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -26,6 +26,8 @@
# define __ARCH_WANT_COMPAT_SYS_GETDENTS64
# define __ARCH_WANT_COMPAT_SYS_PREADV64
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
+# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
+# define __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
# endif
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 14c63c7e8337..a002b07a7099 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -72,7 +72,6 @@
#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
#define SECONDARY_EXEC_ENABLE_PML 0x00020000
#define SECONDARY_EXEC_XSAVES 0x00100000
-#define SECONDARY_EXEC_PCOMMIT 0x00200000
#define SECONDARY_EXEC_TSC_SCALING 0x02000000
#define PIN_BASED_EXT_INTR_MASK 0x00000001
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 4dcdf74dfed8..6ba793178441 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -168,20 +168,22 @@ struct x86_legacy_devices {
* struct x86_legacy_features - legacy x86 features
*
* @rtc: this device has a CMOS real-time clock present
- * @ebda_search: it's safe to search for the EBDA signature in the hardware's
- * low RAM
+ * @reserve_bios_regions: boot code will search for the EBDA address and the
+ * start of the 640k - 1M BIOS region. If false, the platform must
+ * ensure that its memory map correctly reserves sub-1MB regions as needed.
* @devices: legacy x86 devices, refer to struct x86_legacy_devices
* documentation for further details.
*/
struct x86_legacy_features {
int rtc;
- int ebda_search;
+ int reserve_bios_regions;
struct x86_legacy_devices devices;
};
/**
* struct x86_platform_ops - platform specific runtime functions
- * @calibrate_tsc: calibrate TSC
+ * @calibrate_cpu: calibrate CPU
+ * @calibrate_tsc: calibrate TSC, if different from CPU
* @get_wallclock: get time from HW clock like RTC etc.
* @set_wallclock: set time back to HW clock
* @is_untracked_pat_range exclude from PAT logic
@@ -200,6 +202,7 @@ struct x86_legacy_features {
* semantics.
*/
struct x86_platform_ops {
+ unsigned long (*calibrate_cpu)(void);
unsigned long (*calibrate_tsc)(void);
void (*get_wallclock)(struct timespec *ts);
int (*set_wallclock)(const struct timespec *ts);
diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h
index 0d809e9fc975..3bdd10d71223 100644
--- a/arch/x86/include/asm/xen/cpuid.h
+++ b/arch/x86/include/asm/xen/cpuid.h
@@ -76,15 +76,18 @@
/*
* Leaf 5 (0x40000x04)
* HVM-specific features
+ * EAX: Features
+ * EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
*/
-/* EAX Features */
/* Virtualized APIC registers */
#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0)
/* Virtualized x2APIC accesses */
#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1)
/* Memory mapped from other domains has valid IOMMU entries */
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
+/* vcpu id is present in EBX */
+#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3)
#define XEN_CPUID_MAX_NUM_LEAVES 4
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 5b15d94a33f8..37fee272618f 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -78,7 +78,6 @@
#define EXIT_REASON_PML_FULL 62
#define EXIT_REASON_XSAVES 63
#define EXIT_REASON_XRSTORS 64
-#define EXIT_REASON_PCOMMIT 65
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -127,8 +126,7 @@
{ EXIT_REASON_INVVPID, "INVVPID" }, \
{ EXIT_REASON_INVPCID, "INVPCID" }, \
{ EXIT_REASON_XSAVES, "XSAVES" }, \
- { EXIT_REASON_XRSTORS, "XRSTORS" }, \
- { EXIT_REASON_PCOMMIT, "PCOMMIT" }
+ { EXIT_REASON_XRSTORS, "XRSTORS" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 9414f84584e4..90d84c3eee53 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -28,7 +28,7 @@
#include <linux/acpi_pmtmr.h>
#include <linux/efi.h>
#include <linux/cpumask.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/dmi.h>
#include <linux/irq.h>
#include <linux/slab.h>
@@ -161,13 +161,15 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
/**
* acpi_register_lapic - register a local apic and generates a logic cpu number
* @id: local apic id to register
+ * @acpiid: ACPI id to register
* @enabled: this cpu is enabled or not
*
* Returns the logic cpu number which maps to the local apic
*/
-static int acpi_register_lapic(int id, u8 enabled)
+static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
{
unsigned int ver = 0;
+ int cpu;
if (id >= MAX_LOCAL_APIC) {
printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
@@ -182,7 +184,11 @@ static int acpi_register_lapic(int id, u8 enabled)
if (boot_cpu_physical_apicid != -1U)
ver = apic_version[boot_cpu_physical_apicid];
- return generic_processor_info(id, ver);
+ cpu = generic_processor_info(id, ver);
+ if (cpu >= 0)
+ early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
+
+ return cpu;
}
static int __init
@@ -212,7 +218,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
if (!apic->apic_id_valid(apic_id) && enabled)
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
else
- acpi_register_lapic(apic_id, enabled);
+ acpi_register_lapic(apic_id, processor->uid, enabled);
#else
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
#endif
@@ -240,6 +246,7 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
* when we use CPU hotplug.
*/
acpi_register_lapic(processor->id, /* APIC ID */
+ processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
return 0;
@@ -258,6 +265,7 @@ acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
acpi_table_print_madt_entry(header);
acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
+ processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
return 0;
@@ -714,7 +722,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
{
int cpu;
- cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
+ cpu = acpi_register_lapic(physid, U32_MAX, ACPI_MADT_ENABLED);
if (cpu < 0) {
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
return cpu;
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 4b28159e0421..bdfad642123f 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -5,7 +5,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/cpu.h>
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 8e3842fc8bea..42d27a62a404 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -20,7 +20,6 @@
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
-#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitmap.h>
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index a147e676fc7b..4fdf6230d93c 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
#include <asm/amd_nb.h>
@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
i++;
- if (i == 0)
- return 0;
+ if (!i)
+ return -ENODEV;
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
if (!nb)
@@ -219,24 +219,22 @@ int amd_set_subcaches(int cpu, unsigned long mask)
return 0;
}
-static int amd_cache_gart(void)
+static void amd_cache_gart(void)
{
u16 i;
- if (!amd_nb_has_feature(AMD_NB_GART))
- return 0;
-
- flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
- if (!flush_words) {
- amd_northbridges.flags &= ~AMD_NB_GART;
- return -ENOMEM;
- }
+ if (!amd_nb_has_feature(AMD_NB_GART))
+ return;
- for (i = 0; i != amd_nb_num(); i++)
- pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
- &flush_words[i]);
+ flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
+ if (!flush_words) {
+ amd_northbridges.flags &= ~AMD_NB_GART;
+ pr_notice("Cannot initialize GART flush words, GART support disabled\n");
+ return;
+ }
- return 0;
+ for (i = 0; i != amd_nb_num(); i++)
+ pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
}
void amd_flush_garts(void)
@@ -278,17 +276,10 @@ EXPORT_SYMBOL_GPL(amd_flush_garts);
static __init int init_amd_nbs(void)
{
- int err = 0;
-
- err = amd_cache_northbridges();
-
- if (err < 0)
- pr_notice("Cannot enumerate AMD northbridges\n");
+ amd_cache_northbridges();
+ amd_cache_gart();
- if (amd_cache_gart() < 0)
- pr_notice("Cannot initialize GART flush words, GART support disabled\n");
-
- return err;
+ return 0;
}
/* This has to go after the PCI subsystem */
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index cefacbad1531..456316f6c868 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -215,26 +215,18 @@ void apbt_setup_secondary_clock(void)
* cpu timers during the offline process due to the ordering of notification.
* the extra interrupt is harmless.
*/
-static int apbt_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
+static int apbt_cpu_dead(unsigned int cpu)
{
- unsigned long cpu = (unsigned long)hcpu;
struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DEAD:
- dw_apb_clockevent_pause(adev->timer);
- if (system_state == SYSTEM_RUNNING) {
- pr_debug("skipping APBT CPU %lu offline\n", cpu);
- } else {
- pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
- dw_apb_clockevent_stop(adev->timer);
- }
- break;
- default:
- pr_debug("APBT notified %lu, no action\n", action);
+ dw_apb_clockevent_pause(adev->timer);
+ if (system_state == SYSTEM_RUNNING) {
+ pr_debug("skipping APBT CPU %u offline\n", cpu);
+ } else {
+ pr_debug("APBT clockevent for cpu %u offline\n", cpu);
+ dw_apb_clockevent_stop(adev->timer);
}
- return NOTIFY_OK;
+ return 0;
}
static __init int apbt_late_init(void)
@@ -242,9 +234,8 @@ static __init int apbt_late_init(void)
if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
!apb_timer_block_enabled)
return 0;
- /* This notifier should be called after workqueue is ready */
- hotcpu_notifier(apbt_cpuhp_notify, -20);
- return 0;
+ return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL,
+ apbt_cpu_dead);
}
fs_initcall(apbt_late_init);
#else
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 60078a67d7e3..7943d38c57ca 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -23,7 +23,7 @@
#include <linux/bootmem.h>
#include <linux/ftrace.h>
#include <linux/ioport.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/timex.h>
@@ -92,8 +92,10 @@ static int apic_extnmi = APIC_EXTNMI_BSP;
*/
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
+DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
#ifdef CONFIG_X86_32
@@ -2045,7 +2047,7 @@ int generic_processor_info(int apicid, int version)
int thiscpu = max + disabled_cpus - 1;
pr_warning(
- "ACPI: NR_CPUS/possible_cpus limit of %i almost"
+ "APIC: NR_CPUS/possible_cpus limit of %i almost"
" reached. Keeping one slot for boot cpu."
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
@@ -2057,7 +2059,7 @@ int generic_processor_info(int apicid, int version)
int thiscpu = max + disabled_cpus;
pr_warning(
- "ACPI: NR_CPUS/possible_cpus limit of %i reached."
+ "APIC: NR_CPUS/possible_cpus limit of %i reached."
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
disabled_cpus++;
@@ -2085,7 +2087,7 @@ int generic_processor_info(int apicid, int version)
if (topology_update_package_map(apicid, cpu) < 0) {
int thiscpu = max + disabled_cpus;
- pr_warning("ACPI: Package limit reached. Processor %d/0x%x ignored.\n",
+ pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
thiscpu, apicid);
disabled_cpus++;
return -ENOSPC;
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 76f89e2b245a..5b2ae106bd4a 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/hardirq.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
@@ -181,7 +181,6 @@ static struct apic apic_flat = {
.get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id,
- .apic_id_mask = 0xFFu << 24,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
@@ -278,7 +277,6 @@ static struct apic apic_physflat = {
.get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id,
- .apic_id_mask = 0xFFu << 24,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 13d19ed58514..c05688b2deff 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -11,7 +11,6 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
@@ -141,7 +140,6 @@ struct apic apic_noop = {
.get_apic_id = noop_get_apic_id,
.set_apic_id = NULL,
- .apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index ab5c2c685a3c..714d4fda0d52 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -269,7 +269,6 @@ static const struct apic apic_numachip1 __refconst = {
.get_apic_id = numachip1_get_apic_id,
.set_apic_id = numachip1_set_apic_id,
- .apic_id_mask = 0xffU << 24,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
@@ -321,7 +320,6 @@ static const struct apic apic_numachip2 __refconst = {
.get_apic_id = numachip2_get_apic_id,
.set_apic_id = numachip2_set_apic_id,
- .apic_id_mask = 0xffU << 24,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index cf9bd896c12d..06dbaa458bfe 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -171,7 +171,6 @@ static struct apic apic_bigsmp = {
.get_apic_id = bigsmp_get_apic_id,
.set_apic_id = NULL,
- .apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 7788ce643bf4..f29501e1a5c1 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -16,7 +16,7 @@
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/nmi.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/delay.h>
#ifdef CONFIG_HARDLOCKUP_DETECTOR
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 84e33ff5a6d5..7491f417a8e4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -39,7 +39,7 @@
#include <linux/mc146818rtc.h>
#include <linux/compiler.h>
#include <linux/acpi.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/syscore_ops.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -981,7 +981,7 @@ static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
return __irq_domain_alloc_irqs(domain, irq, 1,
ioapic_alloc_attr_node(info),
- info, legacy);
+ info, legacy, NULL);
}
/*
@@ -1014,7 +1014,8 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
info->ioapic_pin))
return -ENOMEM;
} else {
- irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true);
+ irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
+ NULL);
if (irq >= 0) {
irq_data = irq_domain_get_irq_data(domain, irq);
data = irq_data->chip_data;
@@ -2567,29 +2568,25 @@ static struct resource * __init ioapic_setup_resources(void)
unsigned long n;
struct resource *res;
char *mem;
- int i, num = 0;
+ int i;
- for_each_ioapic(i)
- num++;
- if (num == 0)
+ if (nr_ioapics == 0)
return NULL;
n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
- n *= num;
+ n *= nr_ioapics;
mem = alloc_bootmem(n);
res = (void *)mem;
- mem += sizeof(struct resource) * num;
+ mem += sizeof(struct resource) * nr_ioapics;
- num = 0;
for_each_ioapic(i) {
- res[num].name = mem;
- res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res[i].name = mem;
+ res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
mem += IOAPIC_RESOURCE_NAME_SIZE;
- num++;
- ioapics[i].iomem_res = res;
+ ioapics[i].iomem_res = &res[i];
}
ioapic_resources = res;
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 2a0f225afebd..3a205d4a12d0 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -8,7 +8,6 @@
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/cpu.h>
-#include <linux/module.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index f316e34abb42..7c43e716c158 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -8,7 +8,7 @@
*/
#include <linux/threads.h>
#include <linux/cpumask.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
@@ -101,7 +101,6 @@ static struct apic apic_default = {
.get_apic_id = default_get_apic_id,
.set_apic_id = NULL,
- .apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 1793dba7a741..c303054b90b5 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -11,10 +11,9 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/dmar.h>
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index aca8b75c1552..6368fa69d2af 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -152,68 +152,48 @@ static void init_x2apic_ldr(void)
}
}
- /*
- * At CPU state changes, update the x2apic cluster sibling info.
- */
-static int
-update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
+/*
+ * At CPU state changes, update the x2apic cluster sibling info.
+ */
+int x2apic_prepare_cpu(unsigned int cpu)
{
- unsigned int this_cpu = (unsigned long)hcpu;
- unsigned int cpu;
- int err = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
- GFP_KERNEL)) {
- err = -ENOMEM;
- } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
- GFP_KERNEL)) {
- free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
- err = -ENOMEM;
- }
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- for_each_online_cpu(cpu) {
- if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
- continue;
- cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
- cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
- }
- free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
- free_cpumask_var(per_cpu(ipi_mask, this_cpu));
- break;
+ if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
+ free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
+ return -ENOMEM;
}
- return notifier_from_errno(err);
+ return 0;
}
-static struct notifier_block x2apic_cpu_notifier = {
- .notifier_call = update_clusterinfo,
-};
-
-static int x2apic_init_cpu_notifier(void)
+int x2apic_dead_cpu(unsigned int this_cpu)
{
- int cpu = smp_processor_id();
-
- zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
+ int cpu;
- BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
-
- cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
- register_hotcpu_notifier(&x2apic_cpu_notifier);
- return 1;
+ for_each_online_cpu(cpu) {
+ if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
+ continue;
+ cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
+ cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
+ }
+ free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
+ free_cpumask_var(per_cpu(ipi_mask, this_cpu));
+ return 0;
}
static int x2apic_cluster_probe(void)
{
- if (x2apic_mode)
- return x2apic_init_cpu_notifier();
- else
+ int cpu = smp_processor_id();
+
+ if (!x2apic_mode)
return 0;
+
+ cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
+ cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
+ x2apic_prepare_cpu, x2apic_dead_cpu);
+ return 1;
}
static const struct cpumask *x2apic_cluster_target_cpus(void)
@@ -270,7 +250,6 @@ static struct apic apic_x2apic_cluster = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,
- .apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index a1242e2c12e6..4f13f54f1b1f 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -126,7 +126,6 @@ static struct apic apic_x2apic_phys = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,
- .apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 29003154fafd..09b59adaea3f 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -12,7 +12,7 @@
#include <linux/proc_fs.h>
#include <linux/threads.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/sched.h>
@@ -582,7 +582,6 @@ static struct apic __refdata apic_x2apic_uv_x = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = set_apic_id,
- .apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
@@ -919,7 +918,7 @@ static void uv_heartbeat(unsigned long ignored)
uv_set_scir_bits(bits);
/* enable next timer period */
- mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
+ mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
}
static void uv_heartbeat_enable(int cpu)
@@ -928,7 +927,7 @@ static void uv_heartbeat_enable(int cpu)
struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
- setup_timer(timer, uv_heartbeat, cpu);
+ setup_pinned_timer(timer, uv_heartbeat, cpu);
timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
add_timer_on(timer, cpu);
uv_cpu_scir_info(cpu)->enabled = 1;
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 674134e9f5e5..2bd5c6ff7ee7 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -31,7 +31,9 @@ void common(void) {
BLANK();
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_status, thread_info, status);
- OFFSET(TI_addr_limit, thread_info, addr_limit);
+
+ BLANK();
+ OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
BLANK();
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c343a54bed39..f5c69d8974e1 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
u64 value;
/* re-enable TopologyExtensions if switched off by BIOS */
- if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+ if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (msr_set_bit(0xc0011005, 54) > 0) {
rdmsrl(0xc0011005, value);
if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
}
}
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0fe6953f421c..809eda03c527 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2,7 +2,7 @@
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/string.h>
#include <linux/ctype.h>
@@ -1452,7 +1452,7 @@ void cpu_init(void)
struct task_struct *me;
struct tss_struct *t;
unsigned long v;
- int cpu = stack_smp_processor_id();
+ int cpu = raw_smp_processor_id();
int i;
wait_for_master_cpu(cpu);
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 73d391ae452f..27e46658ebe3 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -21,7 +21,8 @@
*
*/
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 6e2ffbebbcdb..fcd484d2bb03 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -5,7 +5,7 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/uaccess.h>
#include <asm/cpufeature.h>
@@ -13,6 +13,7 @@
#include <asm/msr.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
+#include <asm/intel-family.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -300,15 +301,14 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
}
/*
- * P4 Xeon errata 037 workaround.
+ * P4 Xeon erratum 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
if (msr_set_bit(MSR_IA32_MISC_ENABLE,
- MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
- > 0) {
+ MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
pr_info("CPU: C0 stepping P4 Xeon detected.\n");
- pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
+ pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
}
}
@@ -509,6 +509,10 @@ static void init_intel(struct cpuinfo_x86 *c)
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
+ if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
+ ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
+ set_cpu_bug(c, X86_BUG_MONITOR);
+
#ifdef CONFIG_X86_64
if (c->x86 == 15)
c->x86_cache_alignment = c->x86_clflush_size * 2;
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index fbb5e90557a5..e42117d5f4d7 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -1,7 +1,7 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <linux/cpu.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
/**
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index 34c89a3e8260..83f1a98d37db 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -46,7 +46,7 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
return;
mce_setup(&m);
- m.bank = 1;
+ m.bank = -1;
/* Fake a memory read error with unknown channel */
m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 92e5e37d97bf..79d8ec849468 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -425,7 +425,7 @@ static u64 mce_rdmsrl(u32 msr)
}
if (rdmsrl_safe(msr, &v)) {
- WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
+ WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
/*
* Return zero in case the access faulted. This should
* not happen normally but can happen if the CPU does
@@ -1309,7 +1309,7 @@ static void __restart_timer(struct timer_list *t, unsigned long interval)
if (timer_pending(t)) {
if (time_before(when, t->expires))
- mod_timer_pinned(t, when);
+ mod_timer(t, when);
} else {
t->expires = round_jiffies(when);
add_timer_on(t, smp_processor_id());
@@ -1735,7 +1735,7 @@ static void __mcheck_cpu_init_timer(void)
struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned int cpu = smp_processor_id();
- setup_timer(t, mce_timer_fn, cpu);
+ setup_pinned_timer(t, mce_timer_fn, cpu);
mce_start_timer(cpu, t);
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 10b0661651e0..7b7f3be783d4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -93,7 +93,7 @@ const char * const amd_df_mcablock_names[] = {
EXPORT_SYMBOL_GPL(amd_df_mcablock_names);
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
-static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
+static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */
static void amd_threshold_interrupt(void);
static void amd_deferred_error_interrupt(void);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 8581963894c7..27a0228c9cae 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -56,24 +56,24 @@ static u8 *container;
static size_t container_size;
static u32 ucode_new_rev;
-u8 amd_ucode_patch[PATCH_MAX_SIZE];
+static u8 amd_ucode_patch[PATCH_MAX_SIZE];
static u16 this_equiv_id;
static struct cpio_data ucode_cpio;
-/*
- * Microcode patch container file is prepended to the initrd in cpio format.
- * See Documentation/x86/early-microcode.txt
- */
-static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
-
static struct cpio_data __init find_ucode_in_initrd(void)
{
- long offset = 0;
+#ifdef CONFIG_BLK_DEV_INITRD
char *path;
void *start;
size_t size;
+ /*
+ * Microcode patch container file is prepended to the initrd in cpio
+ * format. See Documentation/x86/early-microcode.txt
+ */
+ static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
+
#ifdef CONFIG_X86_32
struct boot_params *p;
@@ -89,9 +89,12 @@ static struct cpio_data __init find_ucode_in_initrd(void)
path = ucode_path;
start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
size = boot_params.hdr.ramdisk_size;
-#endif
+#endif /* !CONFIG_X86_32 */
- return find_cpio_data(path, start, size, &offset);
+ return find_cpio_data(path, start, size, NULL);
+#else
+ return (struct cpio_data){ NULL, 0, "" };
+#endif
}
static size_t compute_container_size(u8 *data, u32 total_size)
@@ -289,11 +292,11 @@ void __init load_ucode_amd_bsp(unsigned int family)
size = &ucode_cpio.size;
#endif
- cp = find_ucode_in_initrd();
- if (!cp.data) {
- if (!load_builtin_amd_microcode(&cp, family))
- return;
- }
+ if (!load_builtin_amd_microcode(&cp, family))
+ cp = find_ucode_in_initrd();
+
+ if (!(cp.data && cp.size))
+ return;
*data = cp.data;
*size = cp.size;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index ac360bfbbdb6..df04b2d033f6 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -60,7 +60,6 @@ static bool dis_ucode_ldr;
static DEFINE_MUTEX(microcode_mutex);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
-EXPORT_SYMBOL_GPL(ucode_cpu_info);
/*
* Operations that are run on a target cpu:
@@ -175,24 +174,24 @@ void load_ucode_ap(void)
}
}
-int __init save_microcode_in_initrd(void)
+static int __init save_microcode_in_initrd(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
if (c->x86 >= 6)
- save_microcode_in_initrd_intel();
+ return save_microcode_in_initrd_intel();
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
- save_microcode_in_initrd_amd();
+ return save_microcode_in_initrd_amd();
break;
default:
break;
}
- return 0;
+ return -EINVAL;
}
void reload_early_microcode(void)
@@ -691,4 +690,5 @@ int __init microcode_init(void)
return error;
}
+fs_initcall(save_microcode_in_initrd);
late_initcall(microcode_init);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 65cbbcd48fe4..cdc0deab00c9 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -40,9 +40,13 @@
#include <asm/msr.h>
/*
- * Temporary microcode blobs pointers storage. We note here the pointers to
- * microcode blobs we've got from whatever storage (detached initrd, builtin).
- * Later on, we put those into final storage mc_saved_data.mc_saved.
+ * Temporary microcode blobs pointers storage. We note here during early load
+ * the pointers to microcode blobs we've got from whatever storage (detached
+ * initrd, builtin). Later on, we put those into final storage
+ * mc_saved_data.mc_saved.
+ *
+ * Important: those are offsets from the beginning of initrd or absolute
+ * addresses within the kernel image when built-in.
*/
static unsigned long mc_tmp_ptrs[MAX_UCODE_COUNT];
@@ -51,8 +55,15 @@ static struct mc_saved_data {
struct microcode_intel **mc_saved;
} mc_saved_data;
+/* Microcode blobs within the initrd. 0 if builtin. */
+static struct ucode_blobs {
+ unsigned long start;
+ bool valid;
+} blobs;
+
+/* Go through saved patches and find the one suitable for the current CPU. */
static enum ucode_state
-load_microcode_early(struct microcode_intel **saved,
+find_microcode_patch(struct microcode_intel **saved,
unsigned int num_saved, struct ucode_cpu_info *uci)
{
struct microcode_intel *ucode_ptr, *new_mc = NULL;
@@ -121,13 +132,13 @@ load_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
if (!mcs->mc_saved) {
copy_ptrs(mc_saved_tmp, mc_ptrs, offset, count);
- return load_microcode_early(mc_saved_tmp, count, uci);
+ return find_microcode_patch(mc_saved_tmp, count, uci);
} else {
#ifdef CONFIG_X86_32
microcode_phys(mc_saved_tmp, mcs);
- return load_microcode_early(mc_saved_tmp, count, uci);
+ return find_microcode_patch(mc_saved_tmp, count, uci);
#else
- return load_microcode_early(mcs->mc_saved, count, uci);
+ return find_microcode_patch(mcs->mc_saved, count, uci);
#endif
}
}
@@ -450,8 +461,6 @@ static void show_saved_mc(void)
#endif
}
-#ifdef CONFIG_HOTPLUG_CPU
-static DEFINE_MUTEX(x86_cpu_microcode_mutex);
/*
* Save this mc into mc_saved_data. So it will be loaded early when a CPU is
* hot added or resumes.
@@ -459,19 +468,18 @@ static DEFINE_MUTEX(x86_cpu_microcode_mutex);
* Please make sure this mc should be a valid microcode patch before calling
* this function.
*/
-int save_mc_for_early(u8 *mc)
+static void save_mc_for_early(u8 *mc)
{
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Synchronization during CPU hotplug. */
+ static DEFINE_MUTEX(x86_cpu_microcode_mutex);
+
struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
unsigned int mc_saved_count_init;
unsigned int num_saved;
struct microcode_intel **mc_saved;
- int ret = 0;
- int i;
+ int ret, i;
- /*
- * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
- * hotplug.
- */
mutex_lock(&x86_cpu_microcode_mutex);
mc_saved_count_init = mc_saved_data.num_saved;
@@ -509,11 +517,8 @@ int save_mc_for_early(u8 *mc)
out:
mutex_unlock(&x86_cpu_microcode_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(save_mc_for_early);
#endif
+}
static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
{
@@ -532,37 +537,6 @@ static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
#endif
}
-static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
-static __init enum ucode_state
-scan_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- unsigned long start, unsigned long size,
- struct ucode_cpu_info *uci)
-{
- struct cpio_data cd;
- long offset = 0;
-#ifdef CONFIG_X86_32
- char *p = (char *)__pa_nodebug(ucode_name);
-#else
- char *p = ucode_name;
-#endif
-
- cd.data = NULL;
- cd.size = 0;
-
- /* try built-in microcode if no initrd */
- if (!size) {
- if (!load_builtin_intel_microcode(&cd))
- return UCODE_ERROR;
- } else {
- cd = find_cpio_data(p, (void *)start, size, &offset);
- if (!cd.data)
- return UCODE_ERROR;
- }
-
- return get_matching_model_microcode(start, cd.data, cd.size,
- mcs, mc_ptrs, uci);
-}
-
/*
* Print ucode update info.
*/
@@ -680,38 +654,117 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
*/
int __init save_microcode_in_initrd_intel(void)
{
- unsigned int count = mc_saved_data.num_saved;
struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
- int ret = 0;
+ unsigned int count = mc_saved_data.num_saved;
+ unsigned long offset = 0;
+ int ret;
if (!count)
- return ret;
+ return 0;
+
+ /*
+ * We have found a valid initrd but it might've been relocated in the
+ * meantime so get its updated address.
+ */
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && blobs.valid)
+ offset = initrd_start;
- copy_ptrs(mc_saved, mc_tmp_ptrs, get_initrd_start(), count);
+ copy_ptrs(mc_saved, mc_tmp_ptrs, offset, count);
ret = save_microcode(&mc_saved_data, mc_saved, count);
if (ret)
pr_err("Cannot save microcode patches from initrd.\n");
-
- show_saved_mc();
+ else
+ show_saved_mc();
return ret;
}
+static __init enum ucode_state
+__scan_microcode_initrd(struct cpio_data *cd, struct ucode_blobs *blbp)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
+ char *p = IS_ENABLED(CONFIG_X86_32) ? (char *)__pa_nodebug(ucode_name)
+ : ucode_name;
+# ifdef CONFIG_X86_32
+ unsigned long start = 0, size;
+ struct boot_params *params;
+
+ params = (struct boot_params *)__pa_nodebug(&boot_params);
+ size = params->hdr.ramdisk_size;
+
+ /*
+ * Set start only if we have an initrd image. We cannot use initrd_start
+ * because it is not set that early yet.
+ */
+ start = (size ? params->hdr.ramdisk_image : 0);
+
+# else /* CONFIG_X86_64 */
+ unsigned long start = 0, size;
+
+ size = (u64)boot_params.ext_ramdisk_size << 32;
+ size |= boot_params.hdr.ramdisk_size;
+
+ if (size) {
+ start = (u64)boot_params.ext_ramdisk_image << 32;
+ start |= boot_params.hdr.ramdisk_image;
+
+ start += PAGE_OFFSET;
+ }
+# endif
+
+ *cd = find_cpio_data(p, (void *)start, size, NULL);
+ if (cd->data) {
+ blbp->start = start;
+ blbp->valid = true;
+
+ return UCODE_OK;
+ } else
+#endif /* CONFIG_BLK_DEV_INITRD */
+ return UCODE_ERROR;
+}
+
+static __init enum ucode_state
+scan_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
+ struct ucode_cpu_info *uci, struct ucode_blobs *blbp)
+{
+ struct cpio_data cd = { NULL, 0, "" };
+ enum ucode_state ret;
+
+ /* try built-in microcode first */
+ if (load_builtin_intel_microcode(&cd))
+ /*
+ * Invalidate blobs as we might've gotten an initrd too,
+ * supplied by the boot loader, by mistake or simply forgotten
+ * there. That's fine, we ignore it since we've found builtin
+ * microcode already.
+ */
+ blbp->valid = false;
+ else {
+ ret = __scan_microcode_initrd(&cd, blbp);
+ if (ret != UCODE_OK)
+ return ret;
+ }
+
+ return get_matching_model_microcode(blbp->start, cd.data, cd.size,
+ mcs, mc_ptrs, uci);
+}
+
static void __init
_load_ucode_intel_bsp(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- unsigned long start, unsigned long size)
+ struct ucode_blobs *blbp)
{
struct ucode_cpu_info uci;
enum ucode_state ret;
collect_cpu_info_early(&uci);
- ret = scan_microcode(mcs, mc_ptrs, start, size, &uci);
+ ret = scan_microcode(mcs, mc_ptrs, &uci, blbp);
if (ret != UCODE_OK)
return;
- ret = load_microcode(mcs, mc_ptrs, start, &uci);
+ ret = load_microcode(mcs, mc_ptrs, blbp->start, &uci);
if (ret != UCODE_OK)
return;
@@ -720,54 +773,60 @@ _load_ucode_intel_bsp(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
void __init load_ucode_intel_bsp(void)
{
- u64 start, size;
-#ifdef CONFIG_X86_32
- struct boot_params *p;
-
- p = (struct boot_params *)__pa_nodebug(&boot_params);
- size = p->hdr.ramdisk_size;
+ struct ucode_blobs *blobs_p;
+ struct mc_saved_data *mcs;
+ unsigned long *ptrs;
- /*
- * Set start only if we have an initrd image. We cannot use initrd_start
- * because it is not set that early yet.
- */
- start = (size ? p->hdr.ramdisk_image : 0);
-
- _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
- (unsigned long *)__pa_nodebug(&mc_tmp_ptrs),
- start, size);
+#ifdef CONFIG_X86_32
+ mcs = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+ ptrs = (unsigned long *)__pa_nodebug(&mc_tmp_ptrs);
+ blobs_p = (struct ucode_blobs *)__pa_nodebug(&blobs);
#else
- size = boot_params.hdr.ramdisk_size;
- start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
-
- _load_ucode_intel_bsp(&mc_saved_data, mc_tmp_ptrs, start, size);
+ mcs = &mc_saved_data;
+ ptrs = mc_tmp_ptrs;
+ blobs_p = &blobs;
#endif
+
+ _load_ucode_intel_bsp(mcs, ptrs, blobs_p);
}
void load_ucode_intel_ap(void)
{
- unsigned long *mcs_tmp_p;
- struct mc_saved_data *mcs_p;
+ struct ucode_blobs *blobs_p;
+ unsigned long *ptrs, start = 0;
+ struct mc_saved_data *mcs;
struct ucode_cpu_info uci;
enum ucode_state ret;
-#ifdef CONFIG_X86_32
- mcs_tmp_p = (unsigned long *)__pa_nodebug(mc_tmp_ptrs);
- mcs_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+#ifdef CONFIG_X86_32
+ mcs = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+ ptrs = (unsigned long *)__pa_nodebug(mc_tmp_ptrs);
+ blobs_p = (struct ucode_blobs *)__pa_nodebug(&blobs);
#else
- mcs_tmp_p = mc_tmp_ptrs;
- mcs_p = &mc_saved_data;
+ mcs = &mc_saved_data;
+ ptrs = mc_tmp_ptrs;
+ blobs_p = &blobs;
#endif
/*
* If there is no valid ucode previously saved in memory, no need to
* update ucode on this AP.
*/
- if (!mcs_p->num_saved)
+ if (!mcs->num_saved)
return;
+ if (blobs_p->valid) {
+ start = blobs_p->start;
+
+ /*
+ * Pay attention to CONFIG_RANDOMIZE_MEMORY=y as it shuffles
+ * physmem mapping too and there we have the initrd.
+ */
+ start += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+ }
+
collect_cpu_info_early(&uci);
- ret = load_microcode(mcs_p, mcs_tmp_p, get_initrd_start_addr(), &uci);
+ ret = load_microcode(mcs, ptrs, start, &uci);
if (ret != UCODE_OK)
return;
@@ -784,7 +843,7 @@ void reload_ucode_intel(void)
collect_cpu_info_early(&uci);
- ret = load_microcode_early(mc_saved_data.mc_saved,
+ ret = find_microcode_patch(mc_saved_data.mc_saved,
mc_saved_data.num_saved, &uci);
if (ret != UCODE_OK)
return;
@@ -794,6 +853,7 @@ void reload_ucode_intel(void)
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
{
+ static struct cpu_signature prev;
struct cpuinfo_x86 *c = &cpu_data(cpu_num);
unsigned int val[2];
@@ -808,8 +868,13 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
}
csig->rev = c->microcode;
- pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
- cpu_num, csig->sig, csig->pf, csig->rev);
+
+ /* No extra locking on prev, races are harmless. */
+ if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
+ pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
+ csig->sig, csig->pf, csig->rev);
+ prev = *csig;
+ }
return 0;
}
@@ -838,6 +903,7 @@ static int apply_microcode_intel(int cpu)
struct ucode_cpu_info *uci;
struct cpuinfo_x86 *c;
unsigned int val[2];
+ static int prev_rev;
/* We should bind the task to the CPU */
if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -872,11 +938,14 @@ static int apply_microcode_intel(int cpu)
return -1;
}
- pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
- cpu, val[1],
- mc->hdr.date & 0xffff,
- mc->hdr.date >> 24,
- (mc->hdr.date >> 16) & 0xff);
+ if (val[1] != prev_rev) {
+ pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
+ val[1],
+ mc->hdr.date & 0xffff,
+ mc->hdr.date >> 24,
+ (mc->hdr.date >> 16) & 0xff);
+ prev_rev = val[1];
+ }
c = &cpu_data(cpu);
diff --git a/arch/x86/kernel/cpu/microcode/intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c
index 2ce1a7dc45b7..406cb6c0d9dd 100644
--- a/arch/x86/kernel/cpu/microcode/intel_lib.c
+++ b/arch/x86/kernel/cpu/microcode/intel_lib.c
@@ -141,7 +141,6 @@ int microcode_sanity_check(void *mc, int print_err)
}
return 0;
}
-EXPORT_SYMBOL_GPL(microcode_sanity_check);
/*
* Returns 1 if update has been found, 0 otherwise.
@@ -183,4 +182,3 @@ int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
return find_matching_signature(mc, csig, cpf);
}
-EXPORT_SYMBOL_GPL(has_newer_microcode);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 10c11b4da31d..8f44c5a50ab8 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -13,7 +13,8 @@
#include <linux/types.h>
#include <linux/time.h>
#include <linux/clocksource.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/hardirq.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 31e951ce6dff..3b442b64c72d 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -17,7 +17,6 @@
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/smp.h>
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 16e37a2581ac..fdc55215d44d 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -4,7 +4,7 @@
*/
#define DEBUG
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mm.h>
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index d76f13d6d8d6..6d9b45549109 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -2,7 +2,6 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
-#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 7d393ecdeee6..28f1b54b7fad 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -38,7 +38,7 @@
#include <linux/stop_machine.h>
#include <linux/kvm_para.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/sort.h>
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 2e8caf03f593..181eabecae25 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -12,7 +12,7 @@
*/
#include <linux/percpu.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/smp.h>
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index f6f50c4ceaec..cfa97ff67bda 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -39,9 +39,9 @@ __setup("nordrand", x86_rdrand_setup);
*/
#define SANITY_CHECK_LOOPS 8
+#ifdef CONFIG_ARCH_RANDOM
void x86_init_rdrand(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_ARCH_RANDOM
unsigned long tmp;
int i;
@@ -55,5 +55,5 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
return;
}
}
-#endif
}
+#endif
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 8cac429b6a1d..1ff0598d309c 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -22,7 +22,8 @@
*/
#include <linux/dmi.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 9ef978d69c22..9616cf76940c 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -20,7 +20,7 @@
#include <linux/delay.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 2bb25c3fe2e8..92e8f0a7159c 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
static void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
- struct thread_info *tinfo, int *graph)
+ struct task_struct *task, int *graph)
{
- struct task_struct *task;
unsigned long ret_addr;
int index;
if (addr != (unsigned long)return_to_handler)
return;
- task = tinfo->task;
index = task->curr_ret_stack;
if (!task->ret_stack || index < *graph)
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
static inline void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
- struct thread_info *tinfo, int *graph)
+ struct task_struct *task, int *graph)
{ }
#endif
@@ -79,28 +77,36 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
-static inline int valid_stack_ptr(struct thread_info *tinfo,
+static inline int valid_stack_ptr(struct task_struct *task,
void *p, unsigned int size, void *end)
{
- void *t = tinfo;
+ void *t = task_stack_page(task);
if (end) {
if (p < end && p >= (end-THREAD_SIZE))
return 1;
else
return 0;
}
- return p > t && p < t + THREAD_SIZE - size;
+ return p >= t && p < t + THREAD_SIZE - size;
}
unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph)
{
struct stack_frame *frame = (struct stack_frame *)bp;
- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+ /*
+ * If we overflowed the stack into a guard page, jump back to the
+ * bottom of the usable stack.
+ */
+ if ((unsigned long)task_stack_page(task) - (unsigned long)stack <
+ PAGE_SIZE)
+ stack = (unsigned long *)task_stack_page(task);
+
+ while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
unsigned long addr;
addr = *stack;
@@ -112,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
} else {
ops->address(data, addr, 0);
}
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
}
stack++;
}
@@ -121,7 +127,7 @@ print_context_stack(struct thread_info *tinfo,
EXPORT_SYMBOL_GPL(print_context_stack);
unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph)
@@ -129,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
struct stack_frame *frame = (struct stack_frame *)bp;
unsigned long *ret_addr = &frame->return_address;
- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+ while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
unsigned long addr = *ret_addr;
if (!__kernel_text_address(addr))
@@ -139,7 +145,7 @@ print_context_stack_bp(struct thread_info *tinfo,
break;
frame = frame->next_frame;
ret_addr = &frame->return_address;
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
}
return (unsigned long)frame;
@@ -199,6 +205,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
show_stack_log_lvl(task, NULL, sp, bp, "");
}
+void show_stack_regs(struct pt_regs *regs)
+{
+ show_stack_log_lvl(current, regs, (unsigned long *)regs->sp, regs->bp, "");
+}
+
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
@@ -228,6 +239,8 @@ unsigned long oops_begin(void)
EXPORT_SYMBOL_GPL(oops_begin);
NOKPROBE_SYMBOL(oops_begin);
+void __noreturn rewind_stack_do_exit(int signr);
+
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
if (regs && kexec_should_crash(current))
@@ -249,7 +262,13 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
- do_exit(signr);
+
+ /*
+ * We're not going to return, but we might be on an IST stack or
+ * have very little stack space left. Rewind the stack and kill
+ * the task.
+ */
+ rewind_stack_do_exit(signr);
}
NOKPROBE_SYMBOL(oops_end);
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 464ffd69b92e..09675712eba8 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -7,7 +7,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
bp = stack_frame(task, regs);
for (;;) {
- struct thread_info *context;
void *end_stack;
end_stack = is_hardirq_stack(stack, cpu);
if (!end_stack)
end_stack = is_softirq_stack(stack, cpu);
- context = task_thread_info(task);
- bp = ops->walk_stack(context, stack, bp, ops, data,
+ bp = ops->walk_stack(task, stack, bp, ops, data,
end_stack, &graph);
/* Stop if not on irq stack */
@@ -98,7 +96,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
int i;
if (sp == NULL) {
- if (task)
+ if (regs)
+ sp = (unsigned long *)regs->sp;
+ else if (task)
sp = (unsigned long *)task->thread.sp;
else
sp = (unsigned long *)&sp;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 5f1c6266eb30..9ee4520ce83c 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -7,7 +7,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
- struct thread_info *tinfo;
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long dummy;
unsigned used = 0;
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* current stack address. If the stacks consist of nested
* exceptions
*/
- tinfo = task_thread_info(task);
while (!done) {
unsigned long *stack_end;
enum stack_type stype;
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (ops->stack(data, id) < 0)
break;
- bp = ops->walk_stack(tinfo, stack, bp, ops,
+ bp = ops->walk_stack(task, stack, bp, ops,
data, stack_end, &graph);
ops->stack(data, "<EOE>");
/*
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (ops->stack(data, "IRQ") < 0)
break;
- bp = ops->walk_stack(tinfo, stack, bp,
+ bp = ops->walk_stack(task, stack, bp,
ops, data, stack_end, &graph);
/*
* We link to the next stack (which would be
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
/*
* This handles the process stack:
*/
- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+ bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
put_cpu();
}
EXPORT_SYMBOL(dump_trace);
@@ -266,7 +264,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
* back trace for this cpu:
*/
if (sp == NULL) {
- if (task)
+ if (regs)
+ sp = (unsigned long *)regs->sp;
+ else if (task)
sp = (unsigned long *)task->thread.sp;
else
sp = (unsigned long *)&sp;
@@ -274,6 +274,8 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
+ unsigned long word;
+
if (stack >= irq_stack && stack <= irq_stack_end) {
if (stack == irq_stack_end) {
stack = (unsigned long *) (irq_stack_end[-1]);
@@ -283,12 +285,18 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (kstack_end(stack))
break;
}
+
+ if (probe_kernel_address(stack, word))
+ break;
+
if ((i % STACKSLOTS_PER_LINE) == 0) {
if (i != 0)
pr_cont("\n");
- printk("%s %016lx", log_lvl, *stack++);
+ printk("%s %016lx", log_lvl, word);
} else
- pr_cont(" %016lx", *stack++);
+ pr_cont(" %016lx", word);
+
+ stack++;
touch_nmi_watchdog();
}
preempt_enable();
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index bca14c899137..57b71373bae3 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -11,7 +11,11 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/pci_ids.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
#include <drm/i915_drm.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
@@ -21,6 +25,9 @@
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/irq_remapping.h>
+#include <asm/early_ioremap.h>
+
+#define dev_err(msg) pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
static void __init fix_hypertransport_config(int num, int slot, int func)
{
@@ -76,6 +83,13 @@ static void __init nvidia_bugs(int num, int slot, int func)
#ifdef CONFIG_ACPI
#ifdef CONFIG_X86_IO_APIC
/*
+ * Only applies to Nvidia root ports (bus 0) and not to
+ * Nvidia graphics cards with PCI ports on secondary buses.
+ */
+ if (num)
+ return;
+
+ /*
* All timer overrides on Nvidia are
* wrong unless HPET is enabled.
* Unfortunately that's not true on many Asus boards.
@@ -590,6 +604,61 @@ static void __init force_disable_hpet(int num, int slot, int func)
#endif
}
+#define BCM4331_MMIO_SIZE 16384
+#define BCM4331_PM_CAP 0x40
+#define bcma_aread32(reg) ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
+#define bcma_awrite32(reg, val) iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
+
+static void __init apple_airport_reset(int bus, int slot, int func)
+{
+ void __iomem *mmio;
+ u16 pmcsr;
+ u64 addr;
+ int i;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
+ return;
+
+ /* Card may have been put into PCI_D3hot by grub quirk */
+ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
+ mdelay(10);
+
+ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+ dev_err("Cannot power up Apple AirPort card\n");
+ return;
+ }
+ }
+
+ addr = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+ addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
+ addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
+ if (!mmio) {
+ dev_err("Cannot iomap Apple AirPort card\n");
+ return;
+ }
+
+ pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
+
+ for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
+ udelay(10);
+
+ bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+ bcma_aread32(BCMA_RESET_CTL);
+ udelay(1);
+
+ bcma_awrite32(BCMA_RESET_CTL, 0);
+ bcma_aread32(BCMA_RESET_CTL);
+ udelay(10);
+
+ early_iounmap(mmio, BCM4331_MMIO_SIZE);
+}
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
@@ -603,12 +672,6 @@ struct chipset {
void (*f)(int num, int slot, int func);
};
-/*
- * Only works for devices on the root bus. If you add any devices
- * not on bus 0 readd another loop level in early_quirks(). But
- * be careful because at least the Nvidia quirk here relies on
- * only matching on bus 0.
- */
static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -638,9 +701,13 @@ static struct chipset early_qrk[] __initdata = {
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_BROADCOM, 0x4331,
+ PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
};
+static void __init early_pci_scan_bus(int bus);
+
/**
* check_dev_quirk - apply early quirks to a given PCI device
* @num: bus number
@@ -649,7 +716,7 @@ static struct chipset early_qrk[] __initdata = {
*
* Check the vendor & device ID against the early quirks table.
*
- * If the device is single function, let early_quirks() know so we don't
+ * If the device is single function, let early_pci_scan_bus() know so we don't
* poke at this device again.
*/
static int __init check_dev_quirk(int num, int slot, int func)
@@ -658,6 +725,7 @@ static int __init check_dev_quirk(int num, int slot, int func)
u16 vendor;
u16 device;
u8 type;
+ u8 sec;
int i;
class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
@@ -685,25 +753,36 @@ static int __init check_dev_quirk(int num, int slot, int func)
type = read_pci_config_byte(num, slot, func,
PCI_HEADER_TYPE);
+
+ if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+ sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
+ if (sec > num)
+ early_pci_scan_bus(sec);
+ }
+
if (!(type & 0x80))
return -1;
return 0;
}
-void __init early_quirks(void)
+static void __init early_pci_scan_bus(int bus)
{
int slot, func;
- if (!early_pci_allowed())
- return;
-
/* Poor man's PCI discovery */
- /* Only scan the root bus */
for (slot = 0; slot < 32; slot++)
for (func = 0; func < 8; func++) {
/* Only probe function 0 on single fn devices */
- if (check_dev_quirk(0, slot, func))
+ if (check_dev_quirk(bus, slot, func))
break;
}
}
+
+void __init early_quirks(void)
+{
+ if (!early_pci_allowed())
+ return;
+
+ early_pci_scan_bus(0);
+}
diff --git a/arch/x86/kernel/ebda.c b/arch/x86/kernel/ebda.c
index afe65dffee80..4312f8ae71b7 100644
--- a/arch/x86/kernel/ebda.c
+++ b/arch/x86/kernel/ebda.c
@@ -6,66 +6,92 @@
#include <asm/bios_ebda.h>
/*
+ * This function reserves all conventional PC system BIOS related
+ * firmware memory areas (some of which are data, some of which
+ * are code), that must not be used by the kernel as available
+ * RAM.
+ *
* The BIOS places the EBDA/XBDA at the top of conventional
* memory, and usually decreases the reported amount of
- * conventional memory (int 0x12) too. This also contains a
- * workaround for Dell systems that neglect to reserve EBDA.
- * The same workaround also avoids a problem with the AMD768MPX
- * chipset: reserve a page before VGA to prevent PCI prefetch
- * into it (errata #56). Usually the page is reserved anyways,
- * unless you have no PS/2 mouse plugged in.
+ * conventional memory (int 0x12) too.
+ *
+ * This means that as a first approximation on most systems we can
+ * guess the reserved BIOS area by looking at the low BIOS RAM size
+ * value and assume that everything above that value (up to 1MB) is
+ * reserved.
+ *
+ * But life in firmware country is not that simple:
+ *
+ * - This code also contains a quirk for Dell systems that neglect
+ * to reserve the EBDA area in the 'RAM size' value ...
+ *
+ * - The same quirk also avoids a problem with the AMD768MPX
+ * chipset: reserve a page before VGA to prevent PCI prefetch
+ * into it (errata #56). (Usually the page is reserved anyways,
+ * unless you have no PS/2 mouse plugged in.)
+ *
+ * - Plus paravirt systems don't have a reliable value in the
+ * 'BIOS RAM size' pointer we can rely on, so we must quirk
+ * them too.
+ *
+ * Due to those various problems this function is deliberately
+ * very conservative and tries to err on the side of reserving
+ * too much, to not risk reserving too little.
+ *
+ * Losing a small amount of memory in the bottom megabyte is
+ * rarely a problem, as long as we have enough memory to install
+ * the SMP bootup trampoline which *must* be in this area.
*
- * This functions is deliberately very conservative. Losing
- * memory in the bottom megabyte is rarely a problem, as long
- * as we have enough memory to install the trampoline. Using
- * memory that is in use by the BIOS or by some DMA device
- * the BIOS didn't shut down *is* a big problem.
+ * Using memory that is in use by the BIOS or by some DMA device
+ * the BIOS didn't shut down *is* a big problem to the kernel,
+ * obviously.
*/
-#define BIOS_LOWMEM_KILOBYTES 0x413
-#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
-#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
+#define BIOS_RAM_SIZE_KB_PTR 0x413
-void __init reserve_ebda_region(void)
+#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
+#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
+
+void __init reserve_bios_regions(void)
{
- unsigned int lowmem, ebda_addr;
+ unsigned int bios_start, ebda_start;
/*
- * To determine the position of the EBDA and the
- * end of conventional memory, we need to look at
- * the BIOS data area. In a paravirtual environment
- * that area is absent. We'll just have to assume
- * that the paravirt case can handle memory setup
- * correctly, without our help.
+ * NOTE: In a paravirtual environment the BIOS reserved
+ * area is absent. We'll just have to assume that the
+ * paravirt case can handle memory setup correctly,
+ * without our help.
*/
- if (!x86_platform.legacy.ebda_search)
+ if (!x86_platform.legacy.reserve_bios_regions)
return;
- /* end of low (conventional) memory */
- lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
- lowmem <<= 10;
-
- /* start of EBDA area */
- ebda_addr = get_bios_ebda();
-
/*
- * Note: some old Dells seem to need 4k EBDA without
- * reporting so, so just consider the memory above 0x9f000
- * to be off limits (bugzilla 2990).
+ * BIOS RAM size is encoded in kilobytes, convert it
+ * to bytes to get a first guess at where the BIOS
+ * firmware area starts:
*/
+ bios_start = *(unsigned short *)__va(BIOS_RAM_SIZE_KB_PTR);
+ bios_start <<= 10;
- /* If the EBDA address is below 128K, assume it is bogus */
- if (ebda_addr < INSANE_CUTOFF)
- ebda_addr = LOWMEM_CAP;
+ /*
+ * If bios_start is less than 128K, assume it is bogus
+ * and bump it up to 640K. Similarly, if bios_start is above 640K,
+ * don't trust it.
+ */
+ if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
+ bios_start = BIOS_START_MAX;
- /* If lowmem is less than 128K, assume it is bogus */
- if (lowmem < INSANE_CUTOFF)
- lowmem = LOWMEM_CAP;
+ /* Get the start address of the EBDA page: */
+ ebda_start = get_bios_ebda();
- /* Use the lower of the lowmem and EBDA markers as the cutoff */
- lowmem = min(lowmem, ebda_addr);
- lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
+ /*
+ * If the EBDA start address is sane and is below the BIOS region,
+ * then also reserve everything from the EBDA start address up to
+ * the BIOS region.
+ */
+ if (ebda_start >= BIOS_START_MIN && ebda_start < bios_start)
+ bios_start = ebda_start;
- /* reserve all memory between lowmem and the 1MB mark */
- memblock_reserve(lowmem, 0x100000 - lowmem);
+ /* Reserve all memory between bios_start and the 1MB mark: */
+ memblock_reserve(bios_start, 0x100000 - bios_start);
}
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 4d38416e2a7f..04f89caef9c4 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -57,7 +57,7 @@
# error "Need more than one PGD for the ESPFIX hack"
#endif
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/* This contains the *bottom* address of the espfix stack */
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 97027545a72d..3fc03a09a93b 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -8,10 +8,14 @@
#include <asm/fpu/internal.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/signal.h>
+#include <asm/fpu/types.h>
#include <asm/traps.h>
#include <linux/hardirq.h>
+#define CREATE_TRACE_POINTS
+#include <asm/trace/fpu.h>
+
/*
* Represents the initial FPU state. It's mostly (but not completely) zeroes,
* depending on the FPU hardware format:
@@ -192,6 +196,7 @@ void fpu__save(struct fpu *fpu)
WARN_ON_FPU(fpu != &current->thread.fpu);
preempt_disable();
+ trace_x86_fpu_before_save(fpu);
if (fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(fpu)) {
if (use_eager_fpu())
@@ -200,6 +205,7 @@ void fpu__save(struct fpu *fpu)
fpregs_deactivate(fpu);
}
}
+ trace_x86_fpu_after_save(fpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(fpu__save);
@@ -222,7 +228,14 @@ void fpstate_init(union fpregs_state *state)
return;
}
- memset(state, 0, xstate_size);
+ memset(state, 0, fpu_kernel_xstate_size);
+
+ /*
+ * XRSTORS requires that this bit is set in xcomp_bv, or
+ * it will #GP. Make sure it is replaced after the memset().
+ */
+ if (static_cpu_has(X86_FEATURE_XSAVES))
+ state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
if (static_cpu_has(X86_FEATURE_FXSR))
fpstate_init_fxstate(&state->fxsave);
@@ -247,7 +260,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* leak into the child task:
*/
if (use_eager_fpu())
- memset(&dst_fpu->state.xsave, 0, xstate_size);
+ memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
/*
* Save current FPU registers directly into the child
@@ -266,7 +279,8 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
*/
preempt_disable();
if (!copy_fpregs_to_fpstate(dst_fpu)) {
- memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
+ memcpy(&src_fpu->state, &dst_fpu->state,
+ fpu_kernel_xstate_size);
if (use_eager_fpu())
copy_kernel_to_fpregs(&src_fpu->state);
@@ -275,6 +289,9 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
}
preempt_enable();
+ trace_x86_fpu_copy_src(src_fpu);
+ trace_x86_fpu_copy_dst(dst_fpu);
+
return 0;
}
@@ -288,7 +305,9 @@ void fpu__activate_curr(struct fpu *fpu)
if (!fpu->fpstate_active) {
fpstate_init(&fpu->state);
+ trace_x86_fpu_init_state(fpu);
+ trace_x86_fpu_activate_state(fpu);
/* Safe to do for the current task: */
fpu->fpstate_active = 1;
}
@@ -314,7 +333,9 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
} else {
if (!fpu->fpstate_active) {
fpstate_init(&fpu->state);
+ trace_x86_fpu_init_state(fpu);
+ trace_x86_fpu_activate_state(fpu);
/* Safe to do for current and for stopped child tasks: */
fpu->fpstate_active = 1;
}
@@ -347,7 +368,9 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
fpu->last_cpu = -1;
} else {
fpstate_init(&fpu->state);
+ trace_x86_fpu_init_state(fpu);
+ trace_x86_fpu_activate_state(fpu);
/* Safe to do for stopped child tasks: */
fpu->fpstate_active = 1;
}
@@ -432,9 +455,11 @@ void fpu__restore(struct fpu *fpu)
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
+ trace_x86_fpu_before_restore(fpu);
fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state);
fpu->counter++;
+ trace_x86_fpu_after_restore(fpu);
kernel_fpu_enable();
}
EXPORT_SYMBOL_GPL(fpu__restore);
@@ -463,6 +488,8 @@ void fpu__drop(struct fpu *fpu)
fpu->fpstate_active = 0;
+ trace_x86_fpu_dropped(fpu);
+
preempt_enable();
}
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index aacfd7a82cec..93982aebb398 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -145,8 +145,8 @@ static void __init fpu__init_system_generic(void)
* This is inherent to the XSAVE architecture which puts all state
* components into a single, continuous memory block:
*/
-unsigned int xstate_size;
-EXPORT_SYMBOL_GPL(xstate_size);
+unsigned int fpu_kernel_xstate_size;
+EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
/* Get alignment of the TYPE. */
#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
@@ -178,7 +178,7 @@ static void __init fpu__init_task_struct_size(void)
* Add back the dynamically-calculated register state
* size.
*/
- task_size += xstate_size;
+ task_size += fpu_kernel_xstate_size;
/*
* We dynamically size 'struct fpu', so we require that
@@ -195,7 +195,7 @@ static void __init fpu__init_task_struct_size(void)
}
/*
- * Set up the xstate_size based on the legacy FPU context size.
+ * Set up the user and kernel xstate sizes based on the legacy FPU context size.
*
* We set this up first, and later it will be overwritten by
* fpu__init_system_xstate() if the CPU knows about xstates.
@@ -208,7 +208,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
on_boot_cpu = 0;
/*
- * Note that xstate_size might be overwriten later during
+ * Note that xstate sizes might be overwritten later during
* fpu__init_system_xstate().
*/
@@ -219,27 +219,17 @@ static void __init fpu__init_system_xstate_size_legacy(void)
*/
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
- xstate_size = sizeof(struct swregs_state);
+ fpu_kernel_xstate_size = sizeof(struct swregs_state);
} else {
if (boot_cpu_has(X86_FEATURE_FXSR))
- xstate_size = sizeof(struct fxregs_state);
+ fpu_kernel_xstate_size =
+ sizeof(struct fxregs_state);
else
- xstate_size = sizeof(struct fregs_state);
+ fpu_kernel_xstate_size =
+ sizeof(struct fregs_state);
}
- /*
- * Quirk: we don't yet handle the XSAVES* instructions
- * correctly, as we don't correctly convert between
- * standard and compacted format when interfacing
- * with user-space - so disable it for now.
- *
- * The difference is small: with recent CPUs the
- * compacted format is only marginally smaller than
- * the standard FPU state format.
- *
- * ( This is easy to backport while we are fixing
- * XSAVES* support. )
- */
- setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+
+ fpu_user_xstate_size = fpu_kernel_xstate_size;
}
/*
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 81422dfb152b..c114b132d121 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -4,6 +4,7 @@
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/regset.h>
+#include <asm/fpu/xstate.h>
/*
* The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
@@ -85,21 +86,26 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return -ENODEV;
- fpu__activate_fpstate_read(fpu);
-
xsave = &fpu->state.xsave;
- /*
- * Copy the 48bytes defined by the software first into the xstate
- * memory layout in the thread struct, so that we can copy the entire
- * xstateregs to the user using one user_regset_copyout().
- */
- memcpy(&xsave->i387.sw_reserved,
- xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
- /*
- * Copy the xstate memory layout.
- */
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+ fpu__activate_fpstate_read(fpu);
+
+ if (using_compacted_format()) {
+ ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave);
+ } else {
+ fpstate_sanitize_xstate(fpu);
+ /*
+ * Copy the 48 bytes defined by the software into the xsave
+ * area in the thread struct, so that we can copy the whole
+ * area to user using one user_regset_copyout().
+ */
+ memcpy(&xsave->i387.sw_reserved, xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
+
+ /*
+ * Copy the xstate memory layout.
+ */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+ }
return ret;
}
@@ -114,11 +120,27 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return -ENODEV;
- fpu__activate_fpstate_write(fpu);
+ /*
+ * A whole standard-format XSAVE buffer is needed:
+ */
+ if ((pos != 0) || (count < fpu_user_xstate_size))
+ return -EFAULT;
xsave = &fpu->state.xsave;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+ fpu__activate_fpstate_write(fpu);
+
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ ret = copyin_to_xsaves(kbuf, ubuf, xsave);
+ else
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+
+ /*
+ * In case of failure, mark all states as init:
+ */
+ if (ret)
+ fpstate_init(&fpu->state);
+
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 31c6a60505e6..9e231d88bb33 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -8,8 +8,10 @@
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/regset.h>
+#include <asm/fpu/xstate.h>
#include <asm/sigframe.h>
+#include <asm/trace/fpu.h>
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
@@ -31,7 +33,7 @@ static inline int check_for_xstate(struct fxregs_state __user *buf,
/* Check for the first magic field and other error scenarios. */
if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
fx_sw->xstate_size < min_xstate_size ||
- fx_sw->xstate_size > xstate_size ||
+ fx_sw->xstate_size > fpu_user_xstate_size ||
fx_sw->xstate_size > fx_sw->extended_size)
return -1;
@@ -88,7 +90,8 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
if (!use_xsave())
return err;
- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
+ err |= __put_user(FP_XSTATE_MAGIC2,
+ (__u32 *)(buf + fpu_user_xstate_size));
/*
* Read the xfeatures which we copied (directly from the cpu or
@@ -125,7 +128,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
else
err = copy_fregs_to_user((struct fregs_state __user *) buf);
- if (unlikely(err) && __clear_user(buf, xstate_size))
+ if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
err = -EFAULT;
return err;
}
@@ -167,7 +170,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1;
- if (fpregs_active()) {
+ if (fpregs_active() || using_compacted_format()) {
/* Save the live register state to the user directly. */
if (copy_fpregs_to_sigframe(buf_fx))
return -1;
@@ -175,8 +178,19 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
if (ia32_fxstate)
copy_fxregs_to_kernel(&tsk->thread.fpu);
} else {
+ /*
+ * It is a *bug* if kernel uses compacted-format for xsave
+ * area and we copy it out directly to a signal frame. It
+ * should have been handled above by saving the registers
+ * directly.
+ */
+ if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+ WARN_ONCE(1, "x86/fpu: saving compacted-format xsave area to a signal frame!\n");
+ return -1;
+ }
+
fpstate_sanitize_xstate(&tsk->thread.fpu);
- if (__copy_to_user(buf_fx, xsave, xstate_size))
+ if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
return -1;
}
@@ -250,7 +264,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
- int state_size = xstate_size;
+ int state_size = fpu_kernel_xstate_size;
u64 xfeatures = 0;
int fx_only = 0;
@@ -282,6 +296,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
*/
state_size = sizeof(struct fxregs_state);
fx_only = 1;
+ trace_x86_fpu_xstate_check_failed(fpu);
} else {
state_size = fx_sw_user.xstate_size;
xfeatures = fx_sw_user.xfeatures;
@@ -308,9 +323,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
*/
fpu__drop(fpu);
- if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
- __copy_from_user(&env, buf, sizeof(env))) {
+ if (using_compacted_format()) {
+ err = copyin_to_xsaves(NULL, buf_fx,
+ &fpu->state.xsave);
+ } else {
+ err = __copy_from_user(&fpu->state.xsave,
+ buf_fx, state_size);
+ }
+
+ if (err || __copy_from_user(&env, buf, sizeof(env))) {
fpstate_init(&fpu->state);
+ trace_x86_fpu_init_state(fpu);
err = -1;
} else {
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
@@ -341,7 +364,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
static inline int xstate_sigframe_size(void)
{
- return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
+ return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
+ fpu_user_xstate_size;
}
/*
@@ -385,12 +409,12 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
*/
void fpu__init_prepare_fx_sw_frame(void)
{
- int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
+ int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE;
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = size;
fx_sw_reserved.xfeatures = xfeatures_mask;
- fx_sw_reserved.xstate_size = xstate_size;
+ fx_sw_reserved.xstate_size = fpu_user_xstate_size;
if (config_enabled(CONFIG_IA32_EMULATION) ||
config_enabled(CONFIG_X86_32)) {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 4ea2a59483c7..680049aa4593 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -11,6 +11,7 @@
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/regset.h>
+#include <asm/fpu/xstate.h>
#include <asm/tlbflush.h>
@@ -44,6 +45,13 @@ static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] =
static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
/*
+ * The XSAVE area of kernel can be in standard or compacted format;
+ * it is always in standard format for user mode. This is the user
+ * mode standard format size used for signal and ptrace frames.
+ */
+unsigned int fpu_user_xstate_size;
+
+/*
* Clear all of the X86_FEATURE_* bits that are unavailable
* when the CPU has no XSAVE support.
*/
@@ -105,6 +113,27 @@ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
}
EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
+static int xfeature_is_supervisor(int xfeature_nr)
+{
+ /*
+ * We currently do not support supervisor states, but if
+ * we did, we could find out like this.
+ *
+ * SDM says: If state component 'i' is a user state component,
+ * ECX[0] return 0; if state component i is a supervisor
+ * state component, ECX[0] returns 1.
+ */
+ u32 eax, ebx, ecx, edx;
+
+ cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
+ return !!(ecx & 1);
+}
+
+static int xfeature_is_user(int xfeature_nr)
+{
+ return !xfeature_is_supervisor(xfeature_nr);
+}
+
/*
* When executing XSAVEOPT (or other optimized XSAVE instructions), if
* a processor implementation detects that an FPU state component is still
@@ -171,7 +200,7 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
*/
while (xfeatures) {
if (xfeatures & 0x1) {
- int offset = xstate_offsets[feature_bit];
+ int offset = xstate_comp_offsets[feature_bit];
int size = xstate_sizes[feature_bit];
memcpy((void *)fx + offset,
@@ -192,6 +221,15 @@ void fpu__init_cpu_xstate(void)
{
if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask)
return;
+ /*
+ * Make it clear that XSAVES supervisor states are not yet
+ * implemented should anyone expect it to work by changing
+ * bits in XFEATURE_MASK_* macros and XCR0.
+ */
+ WARN_ONCE((xfeatures_mask & XFEATURE_MASK_SUPERVISOR),
+ "x86/fpu: XSAVES supervisor states are not yet implemented.\n");
+
+ xfeatures_mask &= ~XFEATURE_MASK_SUPERVISOR;
cr4_set_bits(X86_CR4_OSXSAVE);
xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
@@ -217,13 +255,29 @@ static void __init setup_xstate_features(void)
/* start at the beginnning of the "extended state" */
unsigned int last_good_offset = offsetof(struct xregs_state,
extended_state_area);
+ /*
+ * The FP xstates and SSE xstates are legacy states. They are always
+ * in the fixed offsets in the xsave area in either compacted form
+ * or standard form.
+ */
+ xstate_offsets[0] = 0;
+ xstate_sizes[0] = offsetof(struct fxregs_state, xmm_space);
+ xstate_offsets[1] = xstate_sizes[0];
+ xstate_sizes[1] = FIELD_SIZEOF(struct fxregs_state, xmm_space);
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
if (!xfeature_enabled(i))
continue;
cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
- xstate_offsets[i] = ebx;
+
+ /*
+ * If an xfeature is supervisor state, the offset
+ * in EBX is invalid. We leave it to -1.
+ */
+ if (xfeature_is_user(i))
+ xstate_offsets[i] = ebx;
+
xstate_sizes[i] = eax;
/*
* In our xstate size checks, we assume that the
@@ -233,8 +287,6 @@ static void __init setup_xstate_features(void)
WARN_ONCE(last_good_offset > xstate_offsets[i],
"x86/fpu: misordered xstate at %d\n", last_good_offset);
last_good_offset = xstate_offsets[i];
-
- printk(KERN_INFO "x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n", i, ebx, i, eax);
}
}
@@ -263,6 +315,33 @@ static void __init print_xstate_features(void)
}
/*
+ * This check is important because it is easy to get XSTATE_*
+ * confused with XSTATE_BIT_*.
+ */
+#define CHECK_XFEATURE(nr) do { \
+ WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
+ WARN_ON(nr >= XFEATURE_MAX); \
+} while (0)
+
+/*
+ * We could cache this like xstate_size[], but we only use
+ * it here, so it would be a waste of space.
+ */
+static int xfeature_is_aligned(int xfeature_nr)
+{
+ u32 eax, ebx, ecx, edx;
+
+ CHECK_XFEATURE(xfeature_nr);
+ cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
+ /*
+ * The value returned by ECX[1] indicates the alignment
+ * of state component 'i' when the compacted format
+ * of the extended region of an XSAVE area is used:
+ */
+ return !!(ecx & 2);
+}
+
+/*
* This function sets up offsets and sizes of all extended states in
* xsave area. This supports both standard format and compacted format
* of the xsave aread.
@@ -299,10 +378,29 @@ static void __init setup_xstate_comp(void)
else
xstate_comp_sizes[i] = 0;
- if (i > FIRST_EXTENDED_XFEATURE)
+ if (i > FIRST_EXTENDED_XFEATURE) {
xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
+ xstate_comp_sizes[i-1];
+ if (xfeature_is_aligned(i))
+ xstate_comp_offsets[i] =
+ ALIGN(xstate_comp_offsets[i], 64);
+ }
+ }
+}
+
+/*
+ * Print out xstate component offsets and sizes
+ */
+static void __init print_xstate_offset_size(void)
+{
+ int i;
+
+ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
+ if (!xfeature_enabled(i))
+ continue;
+ pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
+ i, xstate_comp_offsets[i], i, xstate_sizes[i]);
}
}
@@ -322,13 +420,11 @@ static void __init setup_init_fpu_buf(void)
setup_xstate_features();
print_xstate_features();
- if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
init_fpstate.xsave.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
- init_fpstate.xsave.header.xfeatures = xfeatures_mask;
- }
/*
- * Init all the features state with header_bv being 0x0
+ * Init all the features state with header.xfeatures being 0x0
*/
copy_kernel_to_xregs_booting(&init_fpstate.xsave);
@@ -339,58 +435,19 @@ static void __init setup_init_fpu_buf(void)
copy_xregs_to_kernel_booting(&init_fpstate.xsave);
}
-static int xfeature_is_supervisor(int xfeature_nr)
-{
- /*
- * We currently do not support supervisor states, but if
- * we did, we could find out like this.
- *
- * SDM says: If state component i is a user state component,
- * ECX[0] return 0; if state component i is a supervisor
- * state component, ECX[0] returns 1.
- u32 eax, ebx, ecx, edx;
- cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx;
- return !!(ecx & 1);
- */
- return 0;
-}
-/*
-static int xfeature_is_user(int xfeature_nr)
-{
- return !xfeature_is_supervisor(xfeature_nr);
-}
-*/
-
-/*
- * This check is important because it is easy to get XSTATE_*
- * confused with XSTATE_BIT_*.
- */
-#define CHECK_XFEATURE(nr) do { \
- WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
- WARN_ON(nr >= XFEATURE_MAX); \
-} while (0)
-
-/*
- * We could cache this like xstate_size[], but we only use
- * it here, so it would be a waste of space.
- */
-static int xfeature_is_aligned(int xfeature_nr)
+static int xfeature_uncompacted_offset(int xfeature_nr)
{
u32 eax, ebx, ecx, edx;
- CHECK_XFEATURE(xfeature_nr);
- cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
/*
- * The value returned by ECX[1] indicates the alignment
- * of state component i when the compacted format
- * of the extended region of an XSAVE area is used
+ * Only XSAVES supports supervisor states and it uses compacted
+ * format. Checking a supervisor state's uncompacted offset is
+ * an error.
*/
- return !!(ecx & 2);
-}
-
-static int xfeature_uncompacted_offset(int xfeature_nr)
-{
- u32 eax, ebx, ecx, edx;
+ if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) {
+ WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
+ return -1;
+ }
CHECK_XFEATURE(xfeature_nr);
cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
@@ -415,7 +472,7 @@ static int xfeature_size(int xfeature_nr)
* that it is obvious which aspect of 'XSAVES' is being handled
* by the calling code.
*/
-static int using_compacted_format(void)
+int using_compacted_format(void)
{
return boot_cpu_has(X86_FEATURE_XSAVES);
}
@@ -530,11 +587,12 @@ static void do_extra_xstate_size_checks(void)
*/
paranoid_xstate_size += xfeature_size(i);
}
- XSTATE_WARN_ON(paranoid_xstate_size != xstate_size);
+ XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size);
}
+
/*
- * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
+ * Get total size of enabled xstates in XCR0/xfeatures_mask.
*
* Note the SDM's wording here. "sub-function 0" only enumerates
* the size of the *user* states. If we use it to size a buffer
@@ -544,34 +602,33 @@ static void do_extra_xstate_size_checks(void)
* Note that we do not currently set any bits on IA32_XSS so
* 'XCR0 | IA32_XSS == XCR0' for now.
*/
-static unsigned int __init calculate_xstate_size(void)
+static unsigned int __init get_xsaves_size(void)
{
unsigned int eax, ebx, ecx, edx;
- unsigned int calculated_xstate_size;
+ /*
+ * - CPUID function 0DH, sub-function 1:
+ * EBX enumerates the size (in bytes) required by
+ * the XSAVES instruction for an XSAVE area
+ * containing all the state components
+ * corresponding to bits currently set in
+ * XCR0 | IA32_XSS.
+ */
+ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
+ return ebx;
+}
- if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
- /*
- * - CPUID function 0DH, sub-function 0:
- * EBX enumerates the size (in bytes) required by
- * the XSAVE instruction for an XSAVE area
- * containing all the *user* state components
- * corresponding to bits currently set in XCR0.
- */
- cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
- calculated_xstate_size = ebx;
- } else {
- /*
- * - CPUID function 0DH, sub-function 1:
- * EBX enumerates the size (in bytes) required by
- * the XSAVES instruction for an XSAVE area
- * containing all the state components
- * corresponding to bits currently set in
- * XCR0 | IA32_XSS.
- */
- cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
- calculated_xstate_size = ebx;
- }
- return calculated_xstate_size;
+static unsigned int __init get_xsave_size(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ /*
+ * - CPUID function 0DH, sub-function 0:
+ * EBX enumerates the size (in bytes) required by
+ * the XSAVE instruction for an XSAVE area
+ * containing all the *user* state components
+ * corresponding to bits currently set in XCR0.
+ */
+ cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
+ return ebx;
}
/*
@@ -591,7 +648,15 @@ static bool is_supported_xstate_size(unsigned int test_xstate_size)
static int init_xstate_size(void)
{
/* Recompute the context size for enabled features: */
- unsigned int possible_xstate_size = calculate_xstate_size();
+ unsigned int possible_xstate_size;
+ unsigned int xsave_size;
+
+ xsave_size = get_xsave_size();
+
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ possible_xstate_size = get_xsaves_size();
+ else
+ possible_xstate_size = xsave_size;
/* Ensure we have the space to store all enabled: */
if (!is_supported_xstate_size(possible_xstate_size))
@@ -601,8 +666,13 @@ static int init_xstate_size(void)
* The size is OK, we are definitely going to use xsave,
* make it known to the world that we need more space.
*/
- xstate_size = possible_xstate_size;
+ fpu_kernel_xstate_size = possible_xstate_size;
do_extra_xstate_size_checks();
+
+ /*
+ * User space is always in standard format.
+ */
+ fpu_user_xstate_size = xsave_size;
return 0;
}
@@ -644,8 +714,13 @@ void __init fpu__init_system_xstate(void)
xfeatures_mask = eax + ((u64)edx << 32);
if ((xfeatures_mask & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
+ /*
+ * This indicates that something really unexpected happened
+ * with the enumeration. Disable XSAVE and try to continue
+ * booting without it. This is too early to BUG().
+ */
pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
- BUG();
+ goto out_disable;
}
xfeatures_mask &= fpu__get_supported_xfeatures_mask();
@@ -653,21 +728,29 @@ void __init fpu__init_system_xstate(void)
/* Enable xstate instructions to be able to continue with initialization: */
fpu__init_cpu_xstate();
err = init_xstate_size();
- if (err) {
- /* something went wrong, boot without any XSAVE support */
- fpu__init_disable_system_xstate();
- return;
- }
+ if (err)
+ goto out_disable;
+
+ /*
+ * Update info used for ptrace frames; use standard-format size and no
+ * supervisor xstates:
+ */
+ update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR);
- update_regset_xstate_info(xstate_size, xfeatures_mask);
fpu__init_prepare_fx_sw_frame();
setup_init_fpu_buf();
setup_xstate_comp();
+ print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
xfeatures_mask,
- xstate_size,
+ fpu_kernel_xstate_size,
boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
+ return;
+
+out_disable:
+ /* something went wrong, try to boot without any XSAVE support */
+ fpu__init_disable_system_xstate();
}
/*
@@ -693,6 +776,11 @@ void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
{
int feature_nr = fls64(xstate_feature_mask) - 1;
+ if (!xfeature_enabled(feature_nr)) {
+ WARN_ON_FPU(1);
+ return NULL;
+ }
+
return (void *)xsave + xstate_comp_offsets[feature_nr];
}
/*
@@ -887,16 +975,16 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return -EINVAL;
- /* Set the bits we need in PKRU */
+ /* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
new_pkru_bits |= PKRU_AD_BIT;
if (init_val & PKEY_DISABLE_WRITE)
new_pkru_bits |= PKRU_WD_BIT;
- /* Shift the bits in to the correct place in PKRU for pkey. */
+ /* Shift the bits in to the correct place in PKRU for pkey: */
new_pkru_bits <<= pkey_shift;
- /* Locate old copy of the state in the xsave buffer */
+ /* Locate old copy of the state in the xsave buffer: */
old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU);
/*
@@ -909,9 +997,10 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
else
new_pkru_state.pkru = old_pkru_state->pkru;
- /* mask off any old bits in place */
+ /* Mask off any old bits in place: */
new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
- /* Set the newly-requested bits */
+
+ /* Set the newly-requested bits: */
new_pkru_state.pkru |= new_pkru_bits;
/*
@@ -925,8 +1014,168 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
*/
new_pkru_state.pad = 0;
- fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state,
- sizeof(new_pkru_state));
+ fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state));
+
+ return 0;
+}
+
+/*
+ * This is similar to user_regset_copyout(), but will not add offset to
+ * the source data pointer or increment pos, count, kbuf, and ubuf.
+ */
+static inline int xstate_copyout(unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf,
+ const void *data, const int start_pos,
+ const int end_pos)
+{
+ if ((count == 0) || (pos < start_pos))
+ return 0;
+
+ if (end_pos < 0 || pos < end_pos) {
+ unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
+
+ if (kbuf) {
+ memcpy(kbuf + pos, data, copy);
+ } else {
+ if (__copy_to_user(ubuf + pos, data, copy))
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Convert from kernel XSAVES compacted format to standard format and copy
+ * to a ptrace buffer. It supports partial copy but pos always starts from
+ * zero. This is called from xstateregs_get() and there we check the CPU
+ * has XSAVES.
+ */
+int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
+ void __user *ubuf, struct xregs_state *xsave)
+{
+ unsigned int offset, size;
+ int ret, i;
+ struct xstate_header header;
+
+ /*
+ * Currently copy_regset_to_user() starts from pos 0:
+ */
+ if (unlikely(pos != 0))
+ return -EFAULT;
+
+ /*
+ * The destination is a ptrace buffer; we put in only user xstates:
+ */
+ memset(&header, 0, sizeof(header));
+ header.xfeatures = xsave->header.xfeatures;
+ header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
+
+ /*
+ * Copy xregs_state->header:
+ */
+ offset = offsetof(struct xregs_state, header);
+ size = sizeof(header);
+
+ ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < XFEATURE_MAX; i++) {
+ /*
+ * Copy only in-use xstates:
+ */
+ if ((header.xfeatures >> i) & 1) {
+ void *src = __raw_xsave_addr(xsave, 1 << i);
+
+ offset = xstate_offsets[i];
+ size = xstate_sizes[i];
+
+ ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count);
+
+ if (ret)
+ return ret;
+
+ if (offset + size >= count)
+ break;
+ }
+
+ }
+
+ /*
+ * Fill xsave->i387.sw_reserved value for ptrace frame:
+ */
+ offset = offsetof(struct fxregs_state, sw_reserved);
+ size = sizeof(xstate_fx_sw_bytes);
+
+ ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Convert from a ptrace standard-format buffer to kernel XSAVES format
+ * and copy to the target thread. This is called from xstateregs_set() and
+ * there we check the CPU has XSAVES and a whole standard-sized buffer
+ * exists.
+ */
+int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
+ struct xregs_state *xsave)
+{
+ unsigned int offset, size;
+ int i;
+ u64 xfeatures;
+ u64 allowed_features;
+
+ offset = offsetof(struct xregs_state, header);
+ size = sizeof(xfeatures);
+
+ if (kbuf) {
+ memcpy(&xfeatures, kbuf + offset, size);
+ } else {
+ if (__copy_from_user(&xfeatures, ubuf + offset, size))
+ return -EFAULT;
+ }
+
+ /*
+ * Reject if the user sets any disabled or supervisor features:
+ */
+ allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR;
+
+ if (xfeatures & ~allowed_features)
+ return -EINVAL;
+
+ for (i = 0; i < XFEATURE_MAX; i++) {
+ u64 mask = ((u64)1 << i);
+
+ if (xfeatures & mask) {
+ void *dst = __raw_xsave_addr(xsave, 1 << i);
+
+ offset = xstate_offsets[i];
+ size = xstate_sizes[i];
+
+ if (kbuf) {
+ memcpy(dst, kbuf + offset, size);
+ } else {
+ if (__copy_from_user(dst, ubuf + offset, size))
+ return -EFAULT;
+ }
+ }
+ }
+
+ /*
+ * The state that came in from userspace was user-state only.
+ * Mask all the user states out of 'xfeatures':
+ */
+ xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
+
+ /*
+ * Add back in the features that came in from userspace:
+ */
+ xsave->header.xfeatures |= xfeatures;
return 0;
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index d784bb547a9d..2dda0bc4576e 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -26,7 +26,7 @@ static void __init i386_default_early_setup(void)
x86_init.resources.reserve_resources = i386_reserve_resources;
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
- reserve_ebda_region();
+ reserve_bios_regions();
}
asmlinkage __visible void __init i386_start_kernel(void)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b72fb0b71dd1..99d48e7d2974 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -183,7 +183,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
copy_bootdata(__va(real_mode_data));
x86_early_init_platform_quirks();
- reserve_ebda_region();
+ reserve_bios_regions();
switch (boot_params.hdr.hardware_subarch) {
case X86_SUBARCH_INTEL_MID:
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 5df831ef1442..9f8efc9f0075 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -38,7 +38,7 @@
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
L4_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map)
@@ -299,6 +299,7 @@ ENTRY(secondary_startup_64)
pushq $__KERNEL_CS # set correct cs
pushq %rax # target address in negative space
lretq
+ENDPROC(secondary_startup_64)
#include "verify_cpu.S"
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index f112af7aa62e..3d747070fe67 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -710,31 +710,29 @@ static void hpet_work(struct work_struct *w)
complete(&hpet_work->complete);
}
-static int hpet_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
+static int hpet_cpuhp_online(unsigned int cpu)
{
- unsigned long cpu = (unsigned long)hcpu;
struct hpet_work_struct work;
+
+ INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
+ init_completion(&work.complete);
+ /* FIXME: add schedule_work_on() */
+ schedule_delayed_work_on(cpu, &work.work, 0);
+ wait_for_completion(&work.complete);
+ destroy_delayed_work_on_stack(&work.work);
+ return 0;
+}
+
+static int hpet_cpuhp_dead(unsigned int cpu)
+{
struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
- init_completion(&work.complete);
- /* FIXME: add schedule_work_on() */
- schedule_delayed_work_on(cpu, &work.work, 0);
- wait_for_completion(&work.complete);
- destroy_delayed_work_on_stack(&work.work);
- break;
- case CPU_DEAD:
- if (hdev) {
- free_irq(hdev->irq, hdev);
- hdev->flags &= ~HPET_DEV_USED;
- per_cpu(cpu_hpet_dev, cpu) = NULL;
- }
- break;
- }
- return NOTIFY_OK;
+ if (!hdev)
+ return 0;
+ free_irq(hdev->irq, hdev);
+ hdev->flags &= ~HPET_DEV_USED;
+ per_cpu(cpu_hpet_dev, cpu) = NULL;
+ return 0;
}
#else
@@ -750,11 +748,8 @@ static void hpet_reserve_msi_timers(struct hpet_data *hd)
}
#endif
-static int hpet_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- return NOTIFY_OK;
-}
+#define hpet_cpuhp_online NULL
+#define hpet_cpuhp_dead NULL
#endif
@@ -931,7 +926,7 @@ out_nohpet:
*/
static __init int hpet_late_init(void)
{
- int cpu;
+ int ret;
if (boot_hpet_disable)
return -ENODEV;
@@ -961,16 +956,20 @@ static __init int hpet_late_init(void)
if (boot_cpu_has(X86_FEATURE_ARAT))
return 0;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu) {
- hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
- }
-
/* This notifier should be called after workqueue is ready */
- __hotcpu_notifier(hpet_cpuhp_notify, -20);
- cpu_notifier_register_done();
-
+ ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
+ hpet_cpuhp_online, NULL);
+ if (ret)
+ return ret;
+ ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
+ hpet_cpuhp_dead);
+ if (ret)
+ goto err_cpuhp;
return 0;
+
+err_cpuhp:
+ cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
+ return ret;
}
fs_initcall(hpet_late_init);
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 2bcfb5f2bc44..8771766d46b6 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -36,13 +36,14 @@
#include <linux/percpu.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
+#include <asm/user.h>
/* Per cpu debug control register value */
DEFINE_PER_CPU(unsigned long, cpu_dr7);
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 64341aa485ae..1f9b878ef5ef 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/spinlock_types.h>
#include <asm/checksum.h>
#include <asm/pgtable.h>
@@ -42,3 +43,5 @@ EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(___preempt_schedule);
EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
+
+EXPORT_SYMBOL(__sw_hweight32);
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index efb82f07b29c..6ebe00cb4a3b 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -3,7 +3,7 @@
*
*/
#include <linux/clockchips.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/timex.h>
#include <linux/i8253.h>
diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
index a979b5bd2fc0..50c89e8a95f2 100644
--- a/arch/x86/kernel/io_delay.c
+++ b/arch/x86/kernel/io_delay.c
@@ -6,7 +6,7 @@
* outb_p/inb_p API uses.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/dmi.h>
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 38da8f29a9c8..1f38d9a4d9de 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -8,7 +8,6 @@
* io_apic.c.)
*/
-#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
@@ -130,11 +129,9 @@ void irq_ctx_init(int cpu)
void do_softirq_own_stack(void)
{
- struct thread_info *curstk;
struct irq_stack *irqstk;
u32 *isp, *prev_esp;
- curstk = current_stack();
irqstk = __this_cpu_read(softirq_stack);
/* build the stack frame on the softirq stack */
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 206d0b90a3ab..4a7903714065 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -11,7 +11,6 @@
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index dc1404bf8e4b..bdb83e431d89 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -8,7 +8,7 @@
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/stat.h>
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 38cf7a741250..7847e5c0e0b5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* normal page fault.
*/
regs->ip = (unsigned long)cur->addr;
+ /*
+ * Trap flag (TF) has been set here because this fault
+ * happened where the single stepping will be done.
+ * So clear it by resetting the current kprobe:
+ */
+ regs->flags &= ~X86_EFLAGS_TF;
+
+ /*
+ * If the TF flag was set before the kprobe hit,
+ * don't touch it:
+ */
regs->flags |= kcb->kprobe_old_flags;
+
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index eea2a6f72b31..1726c4c12336 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -21,7 +21,7 @@
*/
#include <linux/context_tracking.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
@@ -301,8 +301,6 @@ static void kvm_register_steal_time(void)
if (!has_steal_clock)
return;
- memset(st, 0, sizeof(*st));
-
wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
pr_info("kvm-stealtime: cpu %d, msr %llx\n",
cpu, (unsigned long long) slow_virt_to_phys(st));
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 97340f2c437c..068c4a929de6 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -16,7 +16,6 @@
#include <linux/mc146818rtc.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
-#include <linux/module.h>
#include <linux/smp.h>
#include <linux/pci.h>
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 33ee3e0efd65..1939a0269377 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -3,7 +3,7 @@
* compiled in a FTRACE-compatible way.
*/
#include <linux/spinlock.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/jump_label.h>
#include <asm/paravirt.h>
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 7b3b3f24c3ea..ad5bc9578a73 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -19,7 +19,8 @@
*/
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/efi.h>
#include <linux/bcd.h>
#include <linux/highmem.h>
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 7c577a178859..5069ef560d83 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -2,7 +2,7 @@
#include <linux/pci.h>
#include <linux/cache.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
index b2f8a33b36ff..24a50301f150 100644
--- a/arch/x86/kernel/platform-quirks.c
+++ b/arch/x86/kernel/platform-quirks.c
@@ -7,12 +7,12 @@
void __init x86_early_init_platform_quirks(void)
{
x86_platform.legacy.rtc = 1;
- x86_platform.legacy.ebda_search = 0;
+ x86_platform.legacy.reserve_bios_regions = 0;
x86_platform.legacy.devices.pnpbios = 1;
switch (boot_params.hdr.hardware_subarch) {
case X86_SUBARCH_PC:
- x86_platform.legacy.ebda_search = 1;
+ x86_platform.legacy.reserve_bios_regions = 1;
break;
case X86_SUBARCH_XEN:
case X86_SUBARCH_LGUEST:
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 92f70147a9a6..0c5315d322c8 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -3,7 +3,7 @@
* Copyright (c) 2015, Intel Corporation.
*/
#include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/ioport.h>
static int found(u64 start, u64 end, void *data)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 96becbbb52e0..62c0b0ea2ce4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -7,7 +7,8 @@
#include <linux/prctl.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/random.h>
@@ -404,7 +405,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
if (c->x86_vendor != X86_VENDOR_INTEL)
return 0;
- if (!cpu_has(c, X86_FEATURE_MWAIT))
+ if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
return 0;
return 1;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9f950917528b..d86be29c38c7 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -25,7 +25,7 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/mc146818rtc.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6e789ca1f841..63236d8f84bf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -26,7 +26,7 @@
#include <linux/user.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 99bfc025111d..06c58ce46762 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -61,11 +61,16 @@ void pvclock_resume(void)
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
{
unsigned version;
- cycle_t ret;
u8 flags;
do {
- version = __pvclock_read_cycles(src, &ret, &flags);
+ version = src->version;
+ /* Make the latest version visible */
+ smp_rmb();
+
+ flags = src->flags;
+ /* Make sure that the version double-check is last. */
+ smp_rmb();
} while ((src->version & 1) || version != src->version);
return flags & valid_flags;
@@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
do {
version = __pvclock_read_cycles(src, &ret, &flags);
+ /* Make sure that the version double-check is last. */
+ smp_rmb();
} while ((src->version & 1) || version != src->version);
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index a9b31eb815f2..63bf27d972b7 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -1,6 +1,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/pm.h>
@@ -55,6 +55,19 @@ bool port_cf9_safe = false;
*/
/*
+ * Some machines require the "reboot=a" commandline options
+ */
+static int __init set_acpi_reboot(const struct dmi_system_id *d)
+{
+ if (reboot_type != BOOT_ACPI) {
+ reboot_type = BOOT_ACPI;
+ pr_info("%s series board detected. Selecting %s-method for reboots.\n",
+ d->ident, "ACPI");
+ }
+ return 0;
+}
+
+/*
* Some machines require the "reboot=b" or "reboot=k" commandline options,
* this quirk makes that automatic.
*/
@@ -395,6 +408,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
},
},
+ { /* Handle problems with rebooting on Dell Optiplex 7450 AIO */
+ .callback = set_acpi_reboot,
+ .ident = "Dell OptiPlex 7450 AIO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7450 AIO"),
+ },
+ },
/* Hewlett-Packard */
{ /* Handle problems with rebooting on HP laptops */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c4e7b3991b60..991b77986d57 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -36,7 +36,7 @@
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/edd.h>
@@ -113,6 +113,7 @@
#include <asm/prom.h>
#include <asm/microcode.h>
#include <asm/mmu_context.h>
+#include <asm/kaslr.h>
/*
* max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -399,10 +400,6 @@ static void __init reserve_initrd(void)
memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
}
-static void __init early_initrd_acpi_init(void)
-{
- early_acpi_table_init((void *)initrd_start, initrd_end - initrd_start);
-}
#else
static void __init early_reserve_initrd(void)
{
@@ -410,9 +407,6 @@ static void __init early_reserve_initrd(void)
static void __init reserve_initrd(void)
{
}
-static void __init early_initrd_acpi_init(void)
-{
-}
#endif /* CONFIG_BLK_DEV_INITRD */
static void __init parse_setup_data(void)
@@ -942,6 +936,8 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
+ kernel_randomize_memory();
+
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();
@@ -1146,7 +1142,7 @@ void __init setup_arch(char **cmdline_p)
reserve_initrd();
- early_initrd_acpi_init();
+ acpi_table_upgrade();
vsmp_init();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e4fcb87ba7a6..7a40e068302d 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -236,6 +236,8 @@ void __init setup_per_cpu_areas(void)
early_per_cpu_map(x86_cpu_to_apicid, cpu);
per_cpu(x86_bios_cpu_apicid, cpu) =
early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+ per_cpu(x86_cpu_to_acpiid, cpu) =
+ early_per_cpu_map(x86_cpu_to_acpiid, cpu);
#endif
#ifdef CONFIG_X86_32
per_cpu(x86_cpu_to_logical_apicid, cpu) =
@@ -271,6 +273,7 @@ void __init setup_per_cpu_areas(void)
#ifdef CONFIG_X86_LOCAL_APIC
early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+ early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
#endif
#ifdef CONFIG_X86_32
early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index dc3c0b1c816f..b44564bf86a8 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -1,11 +1,104 @@
#include <linux/compat.h>
#include <linux/uaccess.h>
+/*
+ * The compat_siginfo_t structure and handing code is very easy
+ * to break in several ways. It must always be updated when new
+ * updates are made to the main siginfo_t, and
+ * copy_siginfo_to_user32() must be updated when the
+ * (arch-independent) copy_siginfo_to_user() is updated.
+ *
+ * It is also easy to put a new member in the compat_siginfo_t
+ * which has implicit alignment which can move internal structure
+ * alignment around breaking the ABI. This can happen if you,
+ * for instance, put a plain 64-bit value in there.
+ */
+static inline void signal_compat_build_tests(void)
+{
+ int _sifields_offset = offsetof(compat_siginfo_t, _sifields);
+
+ /*
+ * If adding a new si_code, there is probably new data in
+ * the siginfo. Make sure folks bumping the si_code
+ * limits also have to look at this code. Make sure any
+ * new fields are handled in copy_siginfo_to_user32()!
+ */
+ BUILD_BUG_ON(NSIGILL != 8);
+ BUILD_BUG_ON(NSIGFPE != 8);
+ BUILD_BUG_ON(NSIGSEGV != 4);
+ BUILD_BUG_ON(NSIGBUS != 5);
+ BUILD_BUG_ON(NSIGTRAP != 4);
+ BUILD_BUG_ON(NSIGCHLD != 6);
+ BUILD_BUG_ON(NSIGSYS != 1);
+
+ /* This is part of the ABI and can never change in size: */
+ BUILD_BUG_ON(sizeof(compat_siginfo_t) != 128);
+ /*
+ * The offsets of all the (unioned) si_fields are fixed
+ * in the ABI, of course. Make sure none of them ever
+ * move and are always at the beginning:
+ */
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
+#define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
+
+ /*
+ * Ensure that the size of each si_field never changes.
+ * If it does, it is a sign that the
+ * copy_siginfo_to_user32() code below needs to updated
+ * along with the size in the CHECK_SI_SIZE().
+ *
+ * We repeat this check for both the generic and compat
+ * siginfos.
+ *
+ * Note: it is OK for these to grow as long as the whole
+ * structure stays within the padding size (checked
+ * above).
+ */
+#define CHECK_CSI_SIZE(name, size) BUILD_BUG_ON(size != sizeof(((compat_siginfo_t *)0)->_sifields.name))
+#define CHECK_SI_SIZE(name, size) BUILD_BUG_ON(size != sizeof(((siginfo_t *)0)->_sifields.name))
+
+ CHECK_CSI_OFFSET(_kill);
+ CHECK_CSI_SIZE (_kill, 2*sizeof(int));
+ CHECK_SI_SIZE (_kill, 2*sizeof(int));
+
+ CHECK_CSI_OFFSET(_timer);
+ CHECK_CSI_SIZE (_timer, 5*sizeof(int));
+ CHECK_SI_SIZE (_timer, 6*sizeof(int));
+
+ CHECK_CSI_OFFSET(_rt);
+ CHECK_CSI_SIZE (_rt, 3*sizeof(int));
+ CHECK_SI_SIZE (_rt, 4*sizeof(int));
+
+ CHECK_CSI_OFFSET(_sigchld);
+ CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
+ CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
+
+ CHECK_CSI_OFFSET(_sigchld_x32);
+ CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
+ /* no _sigchld_x32 in the generic siginfo_t */
+
+ CHECK_CSI_OFFSET(_sigfault);
+ CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
+ CHECK_SI_SIZE (_sigfault, 8*sizeof(int));
+
+ CHECK_CSI_OFFSET(_sigpoll);
+ CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
+ CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
+
+ CHECK_CSI_OFFSET(_sigsys);
+ CHECK_CSI_SIZE (_sigsys, 3*sizeof(int));
+ CHECK_SI_SIZE (_sigsys, 4*sizeof(int));
+
+ /* any new si_fields should be added here */
+}
+
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err = 0;
bool ia32 = test_thread_flag(TIF_IA32);
+ signal_compat_build_tests();
+
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
@@ -32,6 +125,21 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
&to->_sifields._pad[0]);
switch (from->si_code >> 16) {
case __SI_FAULT >> 16:
+ if (from->si_signo == SIGBUS &&
+ (from->si_code == BUS_MCEERR_AR ||
+ from->si_code == BUS_MCEERR_AO))
+ put_user_ex(from->si_addr_lsb, &to->si_addr_lsb);
+
+ if (from->si_signo == SIGSEGV) {
+ if (from->si_code == SEGV_BNDERR) {
+ compat_uptr_t lower = (unsigned long)&to->si_lower;
+ compat_uptr_t upper = (unsigned long)&to->si_upper;
+ put_user_ex(lower, &to->si_lower);
+ put_user_ex(upper, &to->si_upper);
+ }
+ if (from->si_code == SEGV_PKUERR)
+ put_user_ex(from->si_pkey, &to->si_pkey);
+ }
break;
case __SI_SYS >> 16:
put_user_ex(from->si_syscall, &to->si_syscall);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fafe8b923cac..2a6e84a30a54 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -43,7 +43,7 @@
#include <linux/init.h>
#include <linux/smp.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/bootmem.h>
@@ -105,6 +105,9 @@ static unsigned int max_physical_pkg_id __read_mostly;
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
+/* Maximum number of SMT threads on any online core */
+int __max_smt_threads __read_mostly;
+
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{
unsigned long flags;
@@ -493,7 +496,7 @@ void set_cpu_sibling_map(int cpu)
bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct cpuinfo_x86 *o;
- int i;
+ int i, threads;
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
@@ -550,6 +553,10 @@ void set_cpu_sibling_map(int cpu)
if (match_die(c, o) && !topology_same_node(c, o))
primarily_use_numa_for_topology();
}
+
+ threads = cpumask_weight(topology_sibling_cpumask(cpu));
+ if (threads > __max_smt_threads)
+ __max_smt_threads = threads;
}
/* maps the cpu to the sched domain representing multi-core */
@@ -1285,7 +1292,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
cpumask_copy(cpu_callin_mask, cpumask_of(0));
mb();
- current_thread_info()->cpu = 0; /* needed? */
for_each_possible_cpu(i) {
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
@@ -1441,6 +1447,21 @@ __init void prefill_possible_map(void)
#ifdef CONFIG_HOTPLUG_CPU
+/* Recompute SMT state for all CPUs on offline */
+static void recompute_smt_state(void)
+{
+ int max_threads, cpu;
+
+ max_threads = 0;
+ for_each_online_cpu (cpu) {
+ int threads = cpumask_weight(topology_sibling_cpumask(cpu));
+
+ if (threads > max_threads)
+ max_threads = threads;
+ }
+ __max_smt_threads = max_threads;
+}
+
static void remove_siblinginfo(int cpu)
{
int sibling;
@@ -1465,6 +1486,7 @@ static void remove_siblinginfo(int cpu)
c->phys_proc_id = 0;
c->cpu_core_id = 0;
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
+ recompute_smt_state();
}
static void remove_cpu_from_maps(int cpu)
@@ -1622,7 +1644,7 @@ static inline void mwait_play_dead(void)
}
}
-static inline void hlt_play_dead(void)
+void hlt_play_dead(void)
{
if (__this_cpu_read(cpu_info.x86) >= 4)
wbinvd();
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 9ee98eefc44d..4738f5e0f2ab 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -5,7 +5,7 @@
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 9b0185fbe3eb..654f6c66fe45 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -323,25 +323,16 @@ static int tboot_wait_for_aps(int num_aps)
return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
}
-static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int tboot_dying_cpu(unsigned int cpu)
{
- switch (action) {
- case CPU_DYING:
- atomic_inc(&ap_wfs_count);
- if (num_online_cpus() == 1)
- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
- return NOTIFY_BAD;
- break;
+ atomic_inc(&ap_wfs_count);
+ if (num_online_cpus() == 1) {
+ if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
+ return -EBUSY;
}
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block tboot_cpu_notifier =
-{
- .notifier_call = tboot_cpu_callback,
-};
-
#ifdef CONFIG_DEBUG_FS
#define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \
@@ -417,8 +408,8 @@ static __init int tboot_late_init(void)
tboot_create_trampoline();
atomic_set(&ap_wfs_count, 0);
- register_hotcpu_notifier(&tboot_cpu_notifier);
-
+ cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL,
+ tboot_dying_cpu);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("tboot_log", S_IRUSR,
arch_debugfs_dir, NULL, &tboot_log_fops);
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index cb4a01b41e27..222e84e2432e 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -9,7 +9,6 @@
* as published by the Free Software Foundation; version 2
* of the License.
*/
-#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/asm.h>
@@ -74,7 +73,3 @@ int rodata_test(void)
return 0;
}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Testcase for marking rodata as read-only");
-MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index d1590486204a..b70ca12dd389 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -21,7 +21,7 @@
#include <linux/kdebug.h>
#include <linux/kgdb.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <linux/string.h>
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
local_irq_disable();
}
+/*
+ * In IST context, we explicitly disable preemption. This serves two
+ * purposes: it makes it much less likely that we would accidentally
+ * schedule in IST context and it will force a warning if we somehow
+ * manage to schedule by accident.
+ */
void ist_enter(struct pt_regs *regs)
{
if (user_mode(regs)) {
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
rcu_nmi_enter();
}
- /*
- * We are atomic because we're on the IST stack; or we're on
- * x86_32, in which case we still shouldn't schedule; or we're
- * on x86_64 and entered from user mode, in which case we're
- * still atomic unless ist_begin_non_atomic is called.
- */
- preempt_count_add(HARDIRQ_OFFSET);
+ preempt_disable();
/* This code is a bit fragile. Test it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
void ist_exit(struct pt_regs *regs)
{
- preempt_count_sub(HARDIRQ_OFFSET);
+ preempt_enable_no_resched();
if (!user_mode(regs))
rcu_nmi_exit();
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
BUG_ON((unsigned long)(current_top_of_stack() -
current_stack_pointer()) >= THREAD_SIZE);
- preempt_count_sub(HARDIRQ_OFFSET);
+ preempt_enable_no_resched();
}
/**
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
*/
void ist_end_non_atomic(void)
{
- preempt_count_add(HARDIRQ_OFFSET);
+ preempt_disable();
}
static nokprobe_inline int
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 38ba6de56ede..1ef87e887051 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -3,7 +3,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/timer.h>
#include <linux/acpi_pmtmr.h>
#include <linux/cpufreq.h>
@@ -239,7 +239,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
return ns;
}
-static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+static void set_cyc2ns_scale(unsigned long khz, int cpu)
{
unsigned long long tsc_now, ns_now;
struct cyc2ns_data *data;
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
local_irq_save(flags);
sched_clock_idle_sleep_event();
- if (!cpu_khz)
+ if (!khz)
goto done;
data = cyc2ns_write_begin(cpu);
@@ -261,7 +261,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
* time function is continuous; see the comment near struct
* cyc2ns_data.
*/
- clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
+ clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz,
NSEC_PER_MSEC, 0);
/*
@@ -335,12 +335,6 @@ int check_tsc_unstable(void)
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);
-int check_tsc_disabled(void)
-{
- return tsc_disabled;
-}
-EXPORT_SYMBOL_GPL(check_tsc_disabled);
-
#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
@@ -665,19 +659,77 @@ success:
}
/**
- * native_calibrate_tsc - calibrate the tsc on boot
+ * native_calibrate_tsc
+ * Determine TSC frequency via CPUID, else return 0.
*/
unsigned long native_calibrate_tsc(void)
{
+ unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
+ unsigned int crystal_khz;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+
+ if (boot_cpu_data.cpuid_level < 0x15)
+ return 0;
+
+ eax_denominator = ebx_numerator = ecx_hz = edx = 0;
+
+ /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
+ cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
+
+ if (ebx_numerator == 0 || eax_denominator == 0)
+ return 0;
+
+ crystal_khz = ecx_hz / 1000;
+
+ if (crystal_khz == 0) {
+ switch (boot_cpu_data.x86_model) {
+ case 0x4E: /* SKL */
+ case 0x5E: /* SKL */
+ crystal_khz = 24000; /* 24.0 MHz */
+ break;
+ case 0x5C: /* BXT */
+ crystal_khz = 19200; /* 19.2 MHz */
+ break;
+ }
+ }
+
+ return crystal_khz * ebx_numerator / eax_denominator;
+}
+
+static unsigned long cpu_khz_from_cpuid(void)
+{
+ unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+
+ if (boot_cpu_data.cpuid_level < 0x16)
+ return 0;
+
+ eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
+
+ cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
+
+ return eax_base_mhz * 1000;
+}
+
+/**
+ * native_calibrate_cpu - calibrate the cpu on boot
+ */
+unsigned long native_calibrate_cpu(void)
+{
u64 tsc1, tsc2, delta, ref1, ref2;
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
unsigned long flags, latch, ms, fast_calibrate;
int hpet = is_hpet_enabled(), i, loopmin;
- /* Calibrate TSC using MSR for Intel Atom SoCs */
- local_irq_save(flags);
- fast_calibrate = try_msr_calibrate_tsc();
- local_irq_restore(flags);
+ fast_calibrate = cpu_khz_from_cpuid();
+ if (fast_calibrate)
+ return fast_calibrate;
+
+ fast_calibrate = cpu_khz_from_msr();
if (fast_calibrate)
return fast_calibrate;
@@ -837,8 +889,12 @@ int recalibrate_cpu_khz(void)
if (!boot_cpu_has(X86_FEATURE_TSC))
return -ENODEV;
+ cpu_khz = x86_platform.calibrate_cpu();
tsc_khz = x86_platform.calibrate_tsc();
- cpu_khz = tsc_khz;
+ if (tsc_khz == 0)
+ tsc_khz = cpu_khz;
+ else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
+ cpu_khz = tsc_khz;
cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
cpu_khz_old, cpu_khz);
@@ -1244,8 +1300,18 @@ void __init tsc_init(void)
return;
}
+ cpu_khz = x86_platform.calibrate_cpu();
tsc_khz = x86_platform.calibrate_tsc();
- cpu_khz = tsc_khz;
+
+ /*
+ * Trust non-zero tsc_khz as authorative,
+ * and use it to sanity check cpu_khz,
+ * which will be off if system timer is off.
+ */
+ if (tsc_khz == 0)
+ tsc_khz = cpu_khz;
+ else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
+ cpu_khz = tsc_khz;
if (!tsc_khz) {
mark_tsc_unstable("could not calculate TSC khz");
@@ -1265,7 +1331,7 @@ void __init tsc_init(void)
*/
for_each_possible_cpu(cpu) {
cyc2ns_init(cpu);
- set_cyc2ns_scale(cpu_khz, cpu);
+ set_cyc2ns_scale(tsc_khz, cpu);
}
if (tsc_disabled > 0)
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 9911a0620f9a..0fe720d64fef 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -1,14 +1,5 @@
/*
- * tsc_msr.c - MSR based TSC calibration on Intel Atom SoC platforms.
- *
- * TSC in Intel Atom SoC runs at a constant rate which can be figured
- * by this formula:
- * <maximum core-clock to bus-clock ratio> * <maximum resolved frequency>
- * See Intel 64 and IA-32 System Programming Guid section 16.12 and 30.11.5
- * for details.
- * Especially some Intel Atom SoCs don't have PIT(i8254) or HPET, so MSR
- * based calibration is the only option.
- *
+ * tsc_msr.c - TSC frequency enumeration via MSR
*
* Copyright (C) 2013 Intel Corporation
* Author: Bin Gao <bin.gao@intel.com>
@@ -22,18 +13,10 @@
#include <asm/apic.h>
#include <asm/param.h>
-/* CPU reference clock frequency: in KHz */
-#define FREQ_80 80000
-#define FREQ_83 83200
-#define FREQ_100 99840
-#define FREQ_133 133200
-#define FREQ_166 166400
-
-#define MAX_NUM_FREQS 8
+#define MAX_NUM_FREQS 9
/*
- * According to Intel 64 and IA-32 System Programming Guide,
- * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
+ * If MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
* read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40].
* Unfortunately some Intel Atom SoCs aren't quite compliant to this,
* so we need manually differentiate SoC families. This is what the
@@ -48,17 +31,18 @@ struct freq_desc {
static struct freq_desc freq_desc_tables[] = {
/* PNW */
- { 6, 0x27, 0, { 0, 0, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
+ { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } },
/* CLV+ */
- { 6, 0x35, 0, { 0, FREQ_133, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
- /* TNG */
- { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
- /* VLV2 */
- { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
- /* ANN */
- { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
- /* AIRMONT */
- { 6, 0x4c, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, FREQ_80, 0, 0, 0 } },
+ { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } },
+ /* TNG - Intel Atom processor Z3400 series */
+ { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } },
+ /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */
+ { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } },
+ /* ANN - Intel Atom processor Z3500 series */
+ { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } },
+ /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */
+ { 6, 0x4c, 1, { 83300, 100000, 133300, 116700,
+ 80000, 93300, 90000, 88900, 87500 } },
};
static int match_cpu(u8 family, u8 model)
@@ -79,16 +63,20 @@ static int match_cpu(u8 family, u8 model)
(freq_desc_tables[cpu_index].freqs[freq_id])
/*
- * Do MSR calibration only for known/supported CPUs.
+ * MSR-based CPU/TSC frequency discovery for certain CPUs.
*
- * Returns the calibration value or 0 if MSR calibration failed.
+ * Set global "lapic_timer_frequency" to bus_clock_cycles/jiffy
+ * Return processor base frequency in KHz, or 0 on failure.
*/
-unsigned long try_msr_calibrate_tsc(void)
+unsigned long cpu_khz_from_msr(void)
{
u32 lo, hi, ratio, freq_id, freq;
unsigned long res;
int cpu_index;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+
cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
if (cpu_index < 0)
return 0;
@@ -100,31 +88,17 @@ unsigned long try_msr_calibrate_tsc(void)
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
ratio = (hi >> 8) & 0x1f;
}
- pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
-
- if (!ratio)
- goto fail;
/* Get FSB FREQ ID */
rdmsr(MSR_FSB_FREQ, lo, hi);
freq_id = lo & 0x7;
freq = id_to_freq(cpu_index, freq_id);
- pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
- freq_id, freq);
- if (!freq)
- goto fail;
/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
res = freq * ratio;
- pr_info("TSC runs at %lu KHz\n", res);
#ifdef CONFIG_X86_LOCAL_APIC
lapic_timer_frequency = (freq * 1000) / HZ;
- pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
#endif
return res;
-
-fail:
- pr_warn("Fast TSC calibration using MSR failed\n");
- return 0;
}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 3dce1ca0a653..01f30e56f99e 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -440,10 +440,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
static inline int is_revectored(int nr, struct revectored_struct *bitmap)
{
- __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
- :"=r" (nr)
- :"m" (*bitmap), "r" (nr));
- return nr;
+ return test_bit(nr, bitmap->__map);
}
#define val_byte(val, n) (((__u8 *)&val)[n])
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index cd05942bc918..95e49f6e4fc3 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -1,7 +1,8 @@
/* Exports for assembly files.
All C exports should go in the respective C files. */
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/spinlock_types.h>
#include <linux/smp.h>
#include <net/checksum.h>
@@ -44,6 +45,9 @@ EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(__sw_hweight32);
+EXPORT_SYMBOL(__sw_hweight64);
+
/*
* Export string functions. We normally rely on gcc builtin for most of these,
* but gcc sometimes decides not to inline them.
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index dad5fe9633a3..76c5e52436c4 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -5,7 +5,7 @@
*/
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <asm/bios_ebda.h>
@@ -92,6 +92,7 @@ static void default_nmi_init(void) { };
static int default_i8042_detect(void) { return 1; };
struct x86_platform_ops x86_platform = {
+ .calibrate_cpu = native_calibrate_cpu,
.calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 769af907f824..3235e0fe7792 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -13,7 +13,7 @@
*/
#include <linux/kvm_host.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid_entry __user *entries)
{
int r, i;
- struct kvm_cpuid_entry *cpuid_entries;
+ struct kvm_cpuid_entry *cpuid_entries = NULL;
r = -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
goto out;
r = -ENOMEM;
- cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
- if (!cpuid_entries)
- goto out;
- r = -EFAULT;
- if (copy_from_user(cpuid_entries, entries,
- cpuid->nent * sizeof(struct kvm_cpuid_entry)))
- goto out_free;
+ if (cpuid->nent) {
+ cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
+ cpuid->nent);
+ if (!cpuid_entries)
+ goto out;
+ r = -EFAULT;
+ if (copy_from_user(cpuid_entries, entries,
+ cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+ goto out;
+ }
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
kvm_x86_ops->cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);
-out_free:
- vfree(cpuid_entries);
out:
+ vfree(cpuid_entries);
return r;
}
@@ -364,7 +366,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
- F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(PCOMMIT);
+ F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB);
/* cpuid 0xD.1.eax */
const u32 kvm_cpuid_D_1_eax_x86_features =
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index e17a74b1d852..35058c2c0eea 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -144,14 +144,6 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
-static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
-}
-
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a2f24af3c999..4e95d3eb2955 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -22,7 +22,6 @@
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
-#include <linux/module.h>
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include <asm/debugreg.h>
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index 3069281904d3..95e0e6481f07 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -25,7 +25,7 @@
#include <linux/list.h>
#include <linux/kvm_host.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/dmar.h>
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 95fcc7b13866..60d91c9d160c 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -20,7 +20,7 @@
*
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kvm_host.h>
#include "irq.h"
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bbb5b283ff63..57549ed47ca5 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -25,7 +25,7 @@
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <asm/processor.h>
@@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */
if (guest_tsc < tsc_deadline)
- __delay(tsc_deadline - guest_tsc);
+ __delay(min(tsc_deadline - guest_tsc,
+ nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
}
static void start_apic_timer(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24e800116ab4..745a5f445ae2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -29,7 +29,8 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/export.h>
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
@@ -336,12 +337,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
- *sptep = spte;
+ WRITE_ONCE(*sptep, spte);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
- *sptep = spte;
+ WRITE_ONCE(*sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@@ -390,7 +391,7 @@ static void __set_spte(u64 *sptep, u64 spte)
*/
smp_wmb();
- ssptep->spte_low = sspte.spte_low;
+ WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@@ -400,7 +401,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
- ssptep->spte_low = sspte.spte_low;
+ WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
/*
* If we map the spte from present to nonpresent, we should clear
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index c146f3c262c3..0149ac59c273 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter)
iter->fixed = false;
iter->start_max = iter->start;
+ iter->range = NULL;
iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
__mtrr_lookup_var_next(iter);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1163e8173e5a..16ef31b87452 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -238,7 +238,9 @@ module_param(nested, int, S_IRUGO);
/* enable / disable AVIC */
static int avic;
+#ifdef CONFIG_X86_LOCAL_APIC
module_param(avic, int, S_IRUGO);
+#endif
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
@@ -981,11 +983,14 @@ static __init int svm_hardware_setup(void)
} else
kvm_disable_tdp();
- if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)))
- avic = false;
-
- if (avic)
- pr_info("AVIC enabled\n");
+ if (avic) {
+ if (!npt_enabled ||
+ !boot_cpu_has(X86_FEATURE_AVIC) ||
+ !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
+ avic = false;
+ else
+ pr_info("AVIC enabled\n");
+ }
return 0;
@@ -1324,7 +1329,7 @@ free_avic:
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
{
u64 entry;
- int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
+ int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu);
if (!kvm_vcpu_apicv_active(vcpu))
@@ -1349,7 +1354,7 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
u64 entry;
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
- int h_physical_id = __default_cpu_present_to_apicid(cpu);
+ int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
if (!kvm_vcpu_apicv_active(vcpu))
@@ -4236,7 +4241,7 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
if (avic_vcpu_is_running(vcpu))
wrmsrl(SVM_AVIC_DOORBELL,
- __default_cpu_present_to_apicid(vcpu->cpu));
+ kvm_cpu_get_apicid(vcpu->cpu));
else
kvm_vcpu_wake_up(vcpu);
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index fb93010beaa4..df07a0a4611f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
unsigned int dest;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
do {
@@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
/* Set SN when the vCPU is preempted */
@@ -2705,8 +2707,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
- SECONDARY_EXEC_XSAVES |
- SECONDARY_EXEC_PCOMMIT;
+ SECONDARY_EXEC_XSAVES;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
@@ -3268,7 +3269,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_SHADOW_VMCS |
SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_ENABLE_PML |
- SECONDARY_EXEC_PCOMMIT |
SECONDARY_EXEC_TSC_SCALING;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
@@ -4856,9 +4856,6 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
if (!enable_pml)
exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
- /* Currently, we allow L1 guest to directly run pcommit instruction. */
- exec_control &= ~SECONDARY_EXEC_PCOMMIT;
-
return exec_control;
}
@@ -4902,9 +4899,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
- if (cpu_has_secondary_exec_ctrls())
+ if (cpu_has_secondary_exec_ctrls()) {
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
vmx_secondary_exec_control(vmx));
+ }
if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
vmcs_write64(EOI_EXIT_BITMAP0, 0);
@@ -4977,6 +4975,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
if (vmx_xsaves_supported())
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
+ if (enable_pml) {
+ ASSERT(vmx->pml_pg);
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ }
+
return 0;
}
@@ -6669,7 +6673,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Checks for #GP/#SS exceptions. */
exn = false;
- if (is_protmode(vcpu)) {
+ if (is_long_mode(vcpu)) {
+ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+ * non-canonical form. This is the only check on the memory
+ * destination for long mode!
+ */
+ exn = is_noncanonical_address(*ret);
+ } else if (is_protmode(vcpu)) {
/* Protected mode: apply checks for segment validity in the
* following order:
* - segment type check (#GP(0) may be thrown)
@@ -6686,17 +6696,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
* execute-only code segment
*/
exn = ((s.type & 0xa) == 8);
- }
- if (exn) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
- return 1;
- }
- if (is_long_mode(vcpu)) {
- /* Long mode: #GP(0)/#SS(0) if the memory address is in a
- * non-canonical form. This is an only check for long mode.
- */
- exn = is_noncanonical_address(*ret);
- } else if (is_protmode(vcpu)) {
+ if (exn) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
@@ -7557,13 +7560,6 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
return 1;
}
-static int handle_pcommit(struct kvm_vcpu *vcpu)
-{
- /* we never catch pcommit instruct for L1 guest. */
- WARN_ON(1);
- return 1;
-}
-
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -7614,7 +7610,6 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_XSAVES] = handle_xsaves,
[EXIT_REASON_XRSTORS] = handle_xrstors,
[EXIT_REASON_PML_FULL] = handle_pml_full,
- [EXIT_REASON_PCOMMIT] = handle_pcommit,
};
static const int kvm_vmx_max_exit_handlers =
@@ -7923,8 +7918,6 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* the XSS exit bitmap in vmcs12.
*/
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
- case EXIT_REASON_PCOMMIT:
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT);
default:
return true;
}
@@ -7936,22 +7929,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
}
-static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
-{
- struct page *pml_pg;
-
- pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!pml_pg)
- return -ENOMEM;
-
- vmx->pml_pg = pml_pg;
-
- vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
-
- return 0;
-}
-
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
{
if (vmx->pml_pg) {
@@ -8223,6 +8200,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
+ exit_reason != EXIT_REASON_PML_FULL &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
@@ -8853,6 +8831,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
put_cpu();
}
+/*
+ * Ensure that the current vmcs of the logical processor is the
+ * vmcs01 of the vcpu before calling free_nested().
+ */
+static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int r;
+
+ r = vcpu_load(vcpu);
+ BUG_ON(r);
+ vmx_load_vmcs01(vcpu);
+ free_nested(vmx);
+ vcpu_put(vcpu);
+}
+
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -8861,8 +8855,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
vmx_destroy_pml_buffer(vmx);
free_vpid(vmx->vpid);
leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
- free_nested(vmx);
+ vmx_free_vcpu_nested(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
@@ -8884,14 +8877,26 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (err)
goto free_vcpu;
+ err = -ENOMEM;
+
+ /*
+ * If PML is turned on, failure on enabling PML just results in failure
+ * of creating the vcpu, therefore we can simplify PML logic (by
+ * avoiding dealing with cases, such as enabling PML partially on vcpus
+ * for the guest, etc.
+ */
+ if (enable_pml) {
+ vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!vmx->pml_pg)
+ goto uninit_vcpu;
+ }
+
vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
> PAGE_SIZE);
- err = -ENOMEM;
- if (!vmx->guest_msrs) {
- goto uninit_vcpu;
- }
+ if (!vmx->guest_msrs)
+ goto free_pml;
vmx->loaded_vmcs = &vmx->vmcs01;
vmx->loaded_vmcs->vmcs = alloc_vmcs();
@@ -8935,18 +8940,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->nested.current_vmptr = -1ull;
vmx->nested.current_vmcs12 = NULL;
- /*
- * If PML is turned on, failure on enabling PML just results in failure
- * of creating the vcpu, therefore we can simplify PML logic (by
- * avoiding dealing with cases, such as enabling PML partially on vcpus
- * for the guest, etc.
- */
- if (enable_pml) {
- err = vmx_create_pml_buffer(vmx);
- if (err)
- goto free_vmcs;
- }
-
return &vmx->vcpu;
free_vmcs:
@@ -8954,6 +8947,8 @@ free_vmcs:
free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);
+free_pml:
+ vmx_destroy_pml_buffer(vmx);
uninit_vcpu:
kvm_vcpu_uninit(&vmx->vcpu);
free_vcpu:
@@ -9085,15 +9080,6 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
if (cpu_has_secondary_exec_ctrls())
vmcs_set_secondary_exec_control(secondary_exec_ctl);
-
- if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) {
- if (guest_cpuid_has_pcommit(vcpu))
- vmx->nested.nested_vmx_secondary_ctls_high |=
- SECONDARY_EXEC_PCOMMIT;
- else
- vmx->nested.nested_vmx_secondary_ctls_high &=
- ~SECONDARY_EXEC_PCOMMIT;
- }
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -9706,8 +9692,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_PCOMMIT);
+ SECONDARY_EXEC_APIC_REGISTER_VIRT);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
@@ -10714,7 +10699,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return 0;
vcpu->pre_pcpu = vcpu->cpu;
@@ -10780,7 +10766,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
unsigned long flags;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
do {
@@ -10833,7 +10820,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
int idx, ret = -EINVAL;
if (!kvm_arch_has_assigned_device(kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(kvm->vcpus[0]))
return 0;
idx = srcu_read_lock(&kvm->irq_srcu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c805cf494154..9c496c7e8c00 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -36,7 +36,8 @@
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
@@ -55,9 +56,6 @@
#include <linux/irqbypass.h>
#include <trace/events/kvm.h>
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
#include <asm/debugreg.h>
#include <asm/msr.h>
#include <asm/desc.h>
@@ -68,6 +66,9 @@
#include <asm/div64.h>
#include <asm/irq_remapping.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#define MAX_IO_MSRS 256
#define KVM_MAX_MCE_BANKS 32
#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
@@ -1244,12 +1245,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static unsigned long max_tsc_khz;
-static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
-{
- return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
- vcpu->arch.virtual_tsc_shift);
-}
-
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{
u64 v = (u64)khz * (1000000 + ppm);
@@ -2314,6 +2309,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2:
+ case MSR_IA32_PERF_CTL:
msr_info->data = 0;
break;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2972,6 +2968,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
| KVM_VCPUEVENT_VALID_SMM))
return -EINVAL;
+ if (events->exception.injected &&
+ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+ return -EINVAL;
+
process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr;
@@ -3036,6 +3036,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
if (dbgregs->flags)
return -EINVAL;
+ if (dbgregs->dr6 & ~0xffffffffull)
+ return -EINVAL;
+ if (dbgregs->dr7 & ~0xffffffffull)
+ return -EINVAL;
+
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = dbgregs->dr6;
@@ -5548,9 +5553,10 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
}
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
-static void tsc_bad(void *info)
+static int kvmclock_cpu_down_prep(unsigned int cpu)
{
__this_cpu_write(cpu_tsc_khz, 0);
+ return 0;
}
static void tsc_khz_changed(void *data)
@@ -5655,35 +5661,18 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
.notifier_call = kvmclock_cpufreq_notifier
};
-static int kvmclock_cpu_notifier(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int kvmclock_cpu_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, tsc_bad, NULL, 1);
- break;
- }
- return NOTIFY_OK;
+ tsc_khz_changed(NULL);
+ return 0;
}
-static struct notifier_block kvmclock_cpu_notifier_block = {
- .notifier_call = kvmclock_cpu_notifier,
- .priority = -INT_MAX
-};
-
static void kvm_timer_init(void)
{
int cpu;
max_tsc_khz = tsc_khz;
- cpu_notifier_register_begin();
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
@@ -5698,12 +5687,9 @@ static void kvm_timer_init(void)
CPUFREQ_TRANSITION_NOTIFIER);
}
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
- for_each_online_cpu(cpu)
- smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
-
- __register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
- cpu_notifier_register_done();
+ cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
+ kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
@@ -5892,7 +5878,7 @@ void kvm_arch_exit(void)
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
- unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
+ cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif
@@ -7815,7 +7801,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
slot = id_to_memslot(slots, id);
if (size) {
- if (WARN_ON(slot->npages))
+ if (slot->npages)
return -EEXIST;
/*
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7ce3634ab5fe..a82ca466b62e 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -2,6 +2,7 @@
#define ARCH_X86_KVM_X86_H
#include <linux/kvm_host.h>
+#include <asm/pvclock.h>
#include "kvm_cache_regs.h"
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
@@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;
extern struct static_key kvm_no_apic_vcpu;
+static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
+{
+ return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
+ vcpu->arch.virtual_tsc_shift);
+}
+
/* Same "calling convention" as do_div:
* - divide (n << 32) by base
* - put result in n
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 3847e736702e..25da5bc8d83d 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1233,8 +1233,6 @@ static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val)
static void probe_pci_console(void)
{
u8 cap, common_cap = 0, device_cap = 0;
- /* Offset within BAR0 */
- u32 device_offset;
u32 device_len;
/* Avoid recursive printk into here. */
@@ -1258,24 +1256,16 @@ static void probe_pci_console(void)
u8 vndr = read_pci_config_byte(0, 1, 0, cap);
if (vndr == PCI_CAP_ID_VNDR) {
u8 type, bar;
- u32 offset, length;
type = read_pci_config_byte(0, 1, 0,
cap + offsetof(struct virtio_pci_cap, cfg_type));
bar = read_pci_config_byte(0, 1, 0,
cap + offsetof(struct virtio_pci_cap, bar));
- offset = read_pci_config(0, 1, 0,
- cap + offsetof(struct virtio_pci_cap, offset));
- length = read_pci_config(0, 1, 0,
- cap + offsetof(struct virtio_pci_cap, length));
switch (type) {
case VIRTIO_PCI_CAP_DEVICE_CFG:
- if (bar == 0) {
+ if (bar == 0)
device_cap = cap;
- device_offset = offset;
- device_len = length;
- }
break;
case VIRTIO_PCI_CAP_PCI_CFG:
console_access_cap = cap;
@@ -1297,13 +1287,16 @@ static void probe_pci_console(void)
* emerg_wr. If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE
* it should ignore the access.
*/
+ device_len = read_pci_config(0, 1, 0,
+ device_cap + offsetof(struct virtio_pci_cap, length));
if (device_len < (offsetof(struct virtio_console_config, emerg_wr)
+ sizeof(u32))) {
printk(KERN_ERR "lguest: console missing emerg_wr field\n");
return;
}
- console_cfg_offset = device_offset;
+ console_cfg_offset = read_pci_config(0, 1, 0,
+ device_cap + offsetof(struct virtio_pci_cap, offset));
printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n");
}
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 72a576752a7e..34a74131a12c 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,8 +24,9 @@ lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
+lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
-obj-y += msr.o msr-reg.o msr-reg-export.o
+obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
index a3c668875038..216a629a4a1a 100644
--- a/arch/x86/lib/cache-smp.c
+++ b/arch/x86/lib/cache-smp.c
@@ -1,5 +1,5 @@
#include <linux/smp.h>
-#include <linux/module.h>
+#include <linux/export.h>
static void __wbinvd(void *dummy)
{
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 2b0ef26da0bd..bf603ebbfd8e 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -17,11 +17,11 @@
/* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user)
- GET_THREAD_INFO(%rax)
+ mov PER_CPU_VAR(current_task), %rax
movq %rdi,%rcx
addq %rdx,%rcx
jc bad_to_user
- cmpq TI_addr_limit(%rax),%rcx
+ cmpq TASK_addr_limit(%rax),%rcx
ja bad_to_user
ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
"jmp copy_user_generic_string", \
@@ -32,11 +32,11 @@ ENDPROC(_copy_to_user)
/* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user)
- GET_THREAD_INFO(%rax)
+ mov PER_CPU_VAR(current_task), %rax
movq %rsi,%rcx
addq %rdx,%rcx
jc bad_from_user
- cmpq TI_addr_limit(%rax),%rcx
+ cmpq TASK_addr_limit(%rax),%rcx
ja bad_from_user
ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
"jmp copy_user_generic_string", \
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index aa417a97511c..d6f848d1211d 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/export.h>
unsigned int x86_family(unsigned int sig)
{
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 9845371c5c36..9a7fe6a70491 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -6,7 +6,7 @@
*/
#include <linux/compiler.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/checksum.h>
static inline unsigned short from32to16(unsigned a)
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 28a6654f0d08..8bd53589ecfb 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -5,7 +5,8 @@
* Wrappers of assembly checksum functions for x86-64.
*/
#include <asm/checksum.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
#include <asm/smap.h>
/**
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 2f07c291dcc8..073d1f1a620b 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -11,7 +11,7 @@
* we have to worry about.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/timex.h>
#include <linux/preempt.h>
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 46668cda4ffd..0ef5128c2de8 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -35,8 +35,8 @@
.text
ENTRY(__get_user_1)
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
ASM_STAC
1: movzbl (%_ASM_AX),%edx
@@ -48,8 +48,8 @@ ENDPROC(__get_user_1)
ENTRY(__get_user_2)
add $1,%_ASM_AX
jc bad_get_user
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
ASM_STAC
2: movzwl -1(%_ASM_AX),%edx
@@ -61,8 +61,8 @@ ENDPROC(__get_user_2)
ENTRY(__get_user_4)
add $3,%_ASM_AX
jc bad_get_user
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
ASM_STAC
3: movl -3(%_ASM_AX),%edx
@@ -75,8 +75,8 @@ ENTRY(__get_user_8)
#ifdef CONFIG_X86_64
add $7,%_ASM_AX
jc bad_get_user
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
ASM_STAC
4: movq -7(%_ASM_AX),%rdx
@@ -86,8 +86,8 @@ ENTRY(__get_user_8)
#else
add $7,%_ASM_AX
jc bad_get_user_8
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user_8
ASM_STAC
4: movl -7(%_ASM_AX),%edx
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
new file mode 100644
index 000000000000..02de3d74d2c5
--- /dev/null
+++ b/arch/x86/lib/hweight.S
@@ -0,0 +1,77 @@
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+
+/*
+ * unsigned int __sw_hweight32(unsigned int w)
+ * %rdi: w
+ */
+ENTRY(__sw_hweight32)
+
+#ifdef CONFIG_X86_64
+ movl %edi, %eax # w
+#endif
+ __ASM_SIZE(push,) %__ASM_REG(dx)
+ movl %eax, %edx # w -> t
+ shrl %edx # t >>= 1
+ andl $0x55555555, %edx # t &= 0x55555555
+ subl %edx, %eax # w -= t
+
+ movl %eax, %edx # w -> t
+ shrl $2, %eax # w_tmp >>= 2
+ andl $0x33333333, %edx # t &= 0x33333333
+ andl $0x33333333, %eax # w_tmp &= 0x33333333
+ addl %edx, %eax # w = w_tmp + t
+
+ movl %eax, %edx # w -> t
+ shrl $4, %edx # t >>= 4
+ addl %edx, %eax # w_tmp += t
+ andl $0x0f0f0f0f, %eax # w_tmp &= 0x0f0f0f0f
+ imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101
+ shrl $24, %eax # w = w_tmp >> 24
+ __ASM_SIZE(pop,) %__ASM_REG(dx)
+ ret
+ENDPROC(__sw_hweight32)
+
+ENTRY(__sw_hweight64)
+#ifdef CONFIG_X86_64
+ pushq %rdx
+
+ movq %rdi, %rdx # w -> t
+ movabsq $0x5555555555555555, %rax
+ shrq %rdx # t >>= 1
+ andq %rdx, %rax # t &= 0x5555555555555555
+ movabsq $0x3333333333333333, %rdx
+ subq %rax, %rdi # w -= t
+
+ movq %rdi, %rax # w -> t
+ shrq $2, %rdi # w_tmp >>= 2
+ andq %rdx, %rax # t &= 0x3333333333333333
+ andq %rdi, %rdx # w_tmp &= 0x3333333333333333
+ addq %rdx, %rax # w = w_tmp + t
+
+ movq %rax, %rdx # w -> t
+ shrq $4, %rdx # t >>= 4
+ addq %rdx, %rax # w_tmp += t
+ movabsq $0x0f0f0f0f0f0f0f0f, %rdx
+ andq %rdx, %rax # w_tmp &= 0x0f0f0f0f0f0f0f0f
+ movabsq $0x0101010101010101, %rdx
+ imulq %rdx, %rax # w_tmp *= 0x0101010101010101
+ shrq $56, %rax # w = w_tmp >> 56
+
+ popq %rdx
+ ret
+#else /* CONFIG_X86_32 */
+ /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
+ pushl %ecx
+
+ call __sw_hweight32
+ movl %eax, %ecx # stash away result
+ movl %edx, %eax # second part of input
+ call __sw_hweight32
+ addl %ecx, %eax # result
+
+ popl %ecx
+ ret
+#endif
+ENDPROC(__sw_hweight64)
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 1a416935bac9..1088eb8f3a5f 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -155,14 +155,24 @@ found:
/*
* In 32-bits mode, if the [7:6] bits (mod bits of
* ModRM) on the second byte are not 11b, it is
- * LDS or LES.
+ * LDS or LES or BOUND.
*/
if (X86_MODRM_MOD(b2) != 3)
goto vex_end;
}
insn->vex_prefix.bytes[0] = b;
insn->vex_prefix.bytes[1] = b2;
- if (inat_is_vex3_prefix(attr)) {
+ if (inat_is_evex_prefix(attr)) {
+ b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+ insn->vex_prefix.bytes[2] = b2;
+ b2 = peek_nbyte_next(insn_byte_t, insn, 3);
+ insn->vex_prefix.bytes[3] = b2;
+ insn->vex_prefix.nbytes = 4;
+ insn->next_byte += 4;
+ if (insn->x86_64 && X86_VEX_W(b2))
+ /* VEX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ } else if (inat_is_vex3_prefix(attr)) {
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
insn->vex_prefix.bytes[2] = b2;
insn->vex_prefix.nbytes = 3;
@@ -221,7 +231,9 @@ void insn_get_opcode(struct insn *insn)
m = insn_vex_m_bits(insn);
p = insn_vex_p_bits(insn);
insn->attr = inat_get_avx_attribute(op, m, p);
- if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr))
+ if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
+ (!inat_accept_vex(insn->attr) &&
+ !inat_is_group(insn->attr)))
insn->attr = 0; /* This instruction is bad */
goto end; /* VEX has only 1 byte for opcode */
}
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
new file mode 100644
index 000000000000..f7dfeda83e5c
--- /dev/null
+++ b/arch/x86/lib/kaslr.c
@@ -0,0 +1,90 @@
+/*
+ * Entropy functions used on early boot for KASLR base and memory
+ * randomization. The base randomization is done in the compressed
+ * kernel and memory randomization is done early when the regular
+ * kernel starts. This file is included in the compressed kernel and
+ * normally linked in the regular.
+ */
+#include <asm/kaslr.h>
+#include <asm/msr.h>
+#include <asm/archrandom.h>
+#include <asm/e820.h>
+#include <asm/io.h>
+
+/*
+ * When built for the regular kernel, several functions need to be stubbed out
+ * or changed to their regular kernel equivalent.
+ */
+#ifndef KASLR_COMPRESSED_BOOT
+#include <asm/cpufeature.h>
+#include <asm/setup.h>
+
+#define debug_putstr(v) early_printk(v)
+#define has_cpuflag(f) boot_cpu_has(f)
+#define get_boot_seed() kaslr_offset()
+#endif
+
+#define I8254_PORT_CONTROL 0x43
+#define I8254_PORT_COUNTER0 0x40
+#define I8254_CMD_READBACK 0xC0
+#define I8254_SELECT_COUNTER0 0x02
+#define I8254_STATUS_NOTREADY 0x40
+static inline u16 i8254(void)
+{
+ u16 status, timer;
+
+ do {
+ outb(I8254_PORT_CONTROL,
+ I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+ status = inb(I8254_PORT_COUNTER0);
+ timer = inb(I8254_PORT_COUNTER0);
+ timer |= inb(I8254_PORT_COUNTER0) << 8;
+ } while (status & I8254_STATUS_NOTREADY);
+
+ return timer;
+}
+
+unsigned long kaslr_get_random_long(const char *purpose)
+{
+#ifdef CONFIG_X86_64
+ const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
+#else
+ const unsigned long mix_const = 0x3f39e593UL;
+#endif
+ unsigned long raw, random = get_boot_seed();
+ bool use_i8254 = true;
+
+ debug_putstr(purpose);
+ debug_putstr(" KASLR using");
+
+ if (has_cpuflag(X86_FEATURE_RDRAND)) {
+ debug_putstr(" RDRAND");
+ if (rdrand_long(&raw)) {
+ random ^= raw;
+ use_i8254 = false;
+ }
+ }
+
+ if (has_cpuflag(X86_FEATURE_TSC)) {
+ debug_putstr(" RDTSC");
+ raw = rdtsc();
+
+ random ^= raw;
+ use_i8254 = false;
+ }
+
+ if (use_i8254) {
+ debug_putstr(" i8254");
+ random ^= i8254();
+ }
+
+ /* Circular multiply for better bit diffusion */
+ asm("mul %3"
+ : "=a" (random), "=d" (raw)
+ : "a" (random), "rm" (mix_const));
+ random += raw;
+
+ debug_putstr("...\n");
+
+ return random;
+}
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index a404b4b75533..cad12634d6bd 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -1,5 +1,5 @@
#include <linux/string.h>
-#include <linux/module.h>
+#include <linux/export.h>
#undef memcpy
#undef memset
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
index e5e3ed8dc079..c2311a678332 100644
--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -18,7 +18,7 @@
*/
#include <linux/hardirq.h>
#include <linux/string.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/types.h>
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c
index 8d6ef78b5d01..ff29e8d39414 100644
--- a/arch/x86/lib/msr-reg-export.c
+++ b/arch/x86/lib/msr-reg-export.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/msr.h>
EXPORT_SYMBOL(rdmsr_safe_regs);
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index 518532e6a3fa..ce68b6a9d7d1 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/preempt.h>
#include <linux/smp.h>
#include <asm/msr.h>
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 004c861b1648..d1dee753b949 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/percpu.h>
#include <linux/preempt.h>
#include <asm/msr.h>
#define CREATE_TRACE_POINTS
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index e0817a12d323..c891ece81e5b 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -29,14 +29,14 @@
* as they get called from within inline assembly.
*/
-#define ENTER GET_THREAD_INFO(%_ASM_BX)
+#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX
#define EXIT ASM_CLAC ; \
ret
.text
ENTRY(__put_user_1)
ENTER
- cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+ cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
jae bad_put_user
ASM_STAC
1: movb %al,(%_ASM_CX)
@@ -46,7 +46,7 @@ ENDPROC(__put_user_1)
ENTRY(__put_user_2)
ENTER
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $1,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae bad_put_user
@@ -58,7 +58,7 @@ ENDPROC(__put_user_2)
ENTRY(__put_user_4)
ENTER
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $3,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae bad_put_user
@@ -70,7 +70,7 @@ ENDPROC(__put_user_4)
ENTRY(__put_user_8)
ENTER
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $7,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae bad_put_user
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index bd59090825db..dc0ad12f80bb 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -11,7 +11,7 @@
*/
#include <linux/string.h>
-#include <linux/module.h>
+#include <linux/export.h>
#ifdef __HAVE_ARCH_STRCPY
char *strcpy(char *dest, const char *src)
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index e342586db6e4..b4908789484e 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -5,7 +5,7 @@
*/
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/word-at-a-time.h>
#include <linux/sched.h>
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index b559d9238781..3bc7baf2a711 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -8,7 +8,7 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/blkdev.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/backing-dev.h>
#include <linux/interrupt.h>
#include <asm/uaccess.h>
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 0a42327a59d7..69873589c0ba 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -5,8 +5,8 @@
* Copyright 1997 Linus Torvalds
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
-#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
/*
* Zero Userspace
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index d388de72eaca..767be7c76034 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -13,12 +13,17 @@
# opcode: escape # escaped-name
# EndTable
#
+# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
+# mnemonics that begin with lowercase 'k' accept a VEX prefix
+#
#<group maps>
# GrpTable: GrpXXX
# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
# EndTable
#
# AVX Superscripts
+# (ev): this opcode requires EVEX prefix.
+# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
# (v): this opcode requires VEX prefix.
# (v1): this opcode only supports 128bit VEX.
#
@@ -137,7 +142,7 @@ AVXcode:
# 0x60 - 0x6f
60: PUSHA/PUSHAD (i64)
61: POPA/POPAD (i64)
-62: BOUND Gv,Ma (i64)
+62: BOUND Gv,Ma (i64) | EVEX (Prefix)
63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
64: SEG=FS (Prefix)
65: SEG=GS (Prefix)
@@ -399,17 +404,17 @@ AVXcode: 1
3f:
# 0x0f 0x40-0x4f
40: CMOVO Gv,Ev
-41: CMOVNO Gv,Ev
-42: CMOVB/C/NAE Gv,Ev
+41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
+42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
43: CMOVAE/NB/NC Gv,Ev
-44: CMOVE/Z Gv,Ev
-45: CMOVNE/NZ Gv,Ev
-46: CMOVBE/NA Gv,Ev
-47: CMOVA/NBE Gv,Ev
+44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
+45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
+46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
+47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
48: CMOVS Gv,Ev
49: CMOVNS Gv,Ev
-4a: CMOVP/PE Gv,Ev
-4b: CMOVNP/PO Gv,Ev
+4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
+4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
4c: CMOVL/NGE Gv,Ev
4d: CMOVNL/GE Gv,Ev
4e: CMOVLE/NG Gv,Ev
@@ -426,7 +431,7 @@ AVXcode: 1
58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
-5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
@@ -447,7 +452,7 @@ AVXcode: 1
6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
-6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3)
+6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
# 0x0f 0x70-0x7f
70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
71: Grp12 (1A)
@@ -458,14 +463,14 @@ AVXcode: 1
76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
77: emms | vzeroupper | vzeroall
-78: VMREAD Ey,Gy
-79: VMWRITE Gy,Ey
-7a:
-7b:
+78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
+79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
+7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
+7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
-7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3)
+7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
# 0x0f 0x80-0x8f
# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
80: JO Jz (f64)
@@ -485,16 +490,16 @@ AVXcode: 1
8e: JLE/JNG Jz (f64)
8f: JNLE/JG Jz (f64)
# 0x0f 0x90-0x9f
-90: SETO Eb
-91: SETNO Eb
-92: SETB/C/NAE Eb
-93: SETAE/NB/NC Eb
+90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
+91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
+92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
+93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
94: SETE/Z Eb
95: SETNE/NZ Eb
96: SETBE/NA Eb
97: SETA/NBE Eb
-98: SETS Eb
-99: SETNS Eb
+98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
+99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
9a: SETP/PE Eb
9b: SETNP/PO Eb
9c: SETL/NGE Eb
@@ -564,11 +569,11 @@ d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
-db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1)
+db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
-df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1)
+df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
# 0x0f 0xe0-0xef
e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
@@ -576,16 +581,16 @@ e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
-e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2)
+e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
-eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1)
+eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
-ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1)
+ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
# 0x0f 0xf0-0xff
f0: vlddqu Vx,Mx (F2)
f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
@@ -626,81 +631,105 @@ AVXcode: 2
0e: vtestps Vx,Wx (66),(v)
0f: vtestpd Vx,Wx (66),(v)
# 0x0f 0x38 0x10-0x1f
-10: pblendvb Vdq,Wdq (66)
-11:
-12:
-13: vcvtph2ps Vx,Wx,Ib (66),(v)
-14: blendvps Vdq,Wdq (66)
-15: blendvpd Vdq,Wdq (66)
-16: vpermps Vqq,Hqq,Wqq (66),(v)
+10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
+11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
+12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
+13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
+14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
+15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
+16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
17: vptest Vx,Wx (66)
18: vbroadcastss Vx,Wd (66),(v)
-19: vbroadcastsd Vqq,Wq (66),(v)
-1a: vbroadcastf128 Vqq,Mdq (66),(v)
-1b:
+19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
+1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
+1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
-1f:
+1f: vpabsq Vx,Wx (66),(ev)
# 0x0f 0x38 0x20-0x2f
-20: vpmovsxbw Vx,Ux/Mq (66),(v1)
-21: vpmovsxbd Vx,Ux/Md (66),(v1)
-22: vpmovsxbq Vx,Ux/Mw (66),(v1)
-23: vpmovsxwd Vx,Ux/Mq (66),(v1)
-24: vpmovsxwq Vx,Ux/Md (66),(v1)
-25: vpmovsxdq Vx,Ux/Mq (66),(v1)
-26:
-27:
-28: vpmuldq Vx,Hx,Wx (66),(v1)
-29: vpcmpeqq Vx,Hx,Wx (66),(v1)
-2a: vmovntdqa Vx,Mx (66),(v1)
+20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
+21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
+22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
+23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
+24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
+25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
+26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
+27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
+28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
+29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
+2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
2b: vpackusdw Vx,Hx,Wx (66),(v1)
-2c: vmaskmovps Vx,Hx,Mx (66),(v)
-2d: vmaskmovpd Vx,Hx,Mx (66),(v)
+2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
+2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
2e: vmaskmovps Mx,Hx,Vx (66),(v)
2f: vmaskmovpd Mx,Hx,Vx (66),(v)
# 0x0f 0x38 0x30-0x3f
-30: vpmovzxbw Vx,Ux/Mq (66),(v1)
-31: vpmovzxbd Vx,Ux/Md (66),(v1)
-32: vpmovzxbq Vx,Ux/Mw (66),(v1)
-33: vpmovzxwd Vx,Ux/Mq (66),(v1)
-34: vpmovzxwq Vx,Ux/Md (66),(v1)
-35: vpmovzxdq Vx,Ux/Mq (66),(v1)
-36: vpermd Vqq,Hqq,Wqq (66),(v)
+30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
+31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
+32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
+33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
+34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
+35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
+36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
37: vpcmpgtq Vx,Hx,Wx (66),(v1)
-38: vpminsb Vx,Hx,Wx (66),(v1)
-39: vpminsd Vx,Hx,Wx (66),(v1)
-3a: vpminuw Vx,Hx,Wx (66),(v1)
-3b: vpminud Vx,Hx,Wx (66),(v1)
+38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
+39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
+3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
+3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
3c: vpmaxsb Vx,Hx,Wx (66),(v1)
-3d: vpmaxsd Vx,Hx,Wx (66),(v1)
+3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
3e: vpmaxuw Vx,Hx,Wx (66),(v1)
-3f: vpmaxud Vx,Hx,Wx (66),(v1)
+3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
# 0x0f 0x38 0x40-0x8f
-40: vpmulld Vx,Hx,Wx (66),(v1)
+40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
41: vphminposuw Vdq,Wdq (66),(v1)
-42:
-43:
-44:
+42: vgetexpps/d Vx,Wx (66),(ev)
+43: vgetexpss/d Vx,Hx,Wx (66),(ev)
+44: vplzcntd/q Vx,Wx (66),(ev)
45: vpsrlvd/q Vx,Hx,Wx (66),(v)
-46: vpsravd Vx,Hx,Wx (66),(v)
+46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
47: vpsllvd/q Vx,Hx,Wx (66),(v)
-# Skip 0x48-0x57
+# Skip 0x48-0x4b
+4c: vrcp14ps/d Vpd,Wpd (66),(ev)
+4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+# Skip 0x50-0x57
58: vpbroadcastd Vx,Wx (66),(v)
-59: vpbroadcastq Vx,Wx (66),(v)
-5a: vbroadcasti128 Vqq,Mdq (66),(v)
-# Skip 0x5b-0x77
+59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
+5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
+5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
+# Skip 0x5c-0x63
+64: vpblendmd/q Vx,Hx,Wx (66),(ev)
+65: vblendmps/d Vx,Hx,Wx (66),(ev)
+66: vpblendmb/w Vx,Hx,Wx (66),(ev)
+# Skip 0x67-0x74
+75: vpermi2b/w Vx,Hx,Wx (66),(ev)
+76: vpermi2d/q Vx,Hx,Wx (66),(ev)
+77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
78: vpbroadcastb Vx,Wx (66),(v)
79: vpbroadcastw Vx,Wx (66),(v)
-# Skip 0x7a-0x7f
+7a: vpbroadcastb Vx,Rv (66),(ev)
+7b: vpbroadcastw Vx,Rv (66),(ev)
+7c: vpbroadcastd/q Vx,Rv (66),(ev)
+7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
+7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
+7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
80: INVEPT Gy,Mdq (66)
81: INVPID Gy,Mdq (66)
82: INVPCID Gy,Mdq (66)
+83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
+88: vexpandps/d Vpd,Wpd (66),(ev)
+89: vpexpandd/q Vx,Wx (66),(ev)
+8a: vcompressps/d Wx,Vx (66),(ev)
+8b: vpcompressd/q Wx,Vx (66),(ev)
8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+8d: vpermb/w Vx,Hx,Wx (66),(ev)
8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
# 0x0f 0x38 0x90-0xbf (FMA)
-90: vgatherdd/q Vx,Hx,Wx (66),(v)
-91: vgatherqd/q Vx,Hx,Wx (66),(v)
+90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
+91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
92: vgatherdps/d Vx,Hx,Wx (66),(v)
93: vgatherqps/d Vx,Hx,Wx (66),(v)
94:
@@ -715,6 +744,10 @@ AVXcode: 2
9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+a0: vpscatterdd/q Wx,Vx (66),(ev)
+a1: vpscatterqd/q Wx,Vx (66),(ev)
+a2: vscatterdps/d Wx,Vx (66),(ev)
+a3: vscatterqps/d Wx,Vx (66),(ev)
a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
@@ -725,6 +758,8 @@ ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
+b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
@@ -736,12 +771,15 @@ bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
# 0x0f 0x38 0xc0-0xff
-c8: sha1nexte Vdq,Wdq
+c4: vpconflictd/q Vx,Wx (66),(ev)
+c6: Grp18 (1A)
+c7: Grp19 (1A)
+c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
c9: sha1msg1 Vdq,Wdq
-ca: sha1msg2 Vdq,Wdq
-cb: sha256rnds2 Vdq,Wdq
-cc: sha256msg1 Vdq,Wdq
-cd: sha256msg2 Vdq,Wdq
+ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
+cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
+cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
+cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
db: VAESIMC Vdq,Wdq (66),(v1)
dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
@@ -763,15 +801,15 @@ AVXcode: 3
00: vpermq Vqq,Wqq,Ib (66),(v)
01: vpermpd Vqq,Wqq,Ib (66),(v)
02: vpblendd Vx,Hx,Wx,Ib (66),(v)
-03:
+03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
04: vpermilps Vx,Wx,Ib (66),(v)
05: vpermilpd Vx,Wx,Ib (66),(v)
06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
07:
-08: vroundps Vx,Wx,Ib (66)
-09: vroundpd Vx,Wx,Ib (66)
-0a: vroundss Vss,Wss,Ib (66),(v1)
-0b: vroundsd Vsd,Wsd,Ib (66),(v1)
+08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
+09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
+0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
+0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
0c: vblendps Vx,Hx,Wx,Ib (66)
0d: vblendpd Vx,Hx,Wx,Ib (66)
0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
@@ -780,26 +818,51 @@ AVXcode: 3
15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
16: vpextrd/q Ey,Vdq,Ib (66),(v1)
17: vextractps Ed,Vdq,Ib (66),(v1)
-18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v)
-19: vextractf128 Wdq,Vqq,Ib (66),(v)
+18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
+1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
-38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v)
-39: vextracti128 Wdq,Vqq,Ib (66),(v)
+23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
+26: vgetmantps/d Vx,Wx,Ib (66),(ev)
+27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
+30: kshiftrb/w Vk,Uk,Ib (66),(v)
+31: kshiftrd/q Vk,Uk,Ib (66),(v)
+32: kshiftlb/w Vk,Uk,Ib (66),(v)
+33: kshiftld/q Vk,Uk,Ib (66),(v)
+38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
+3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
40: vdpps Vx,Hx,Wx,Ib (66)
41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
-42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1)
+42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
+43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
+51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
+54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
+55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
+56: vreduceps/d Vx,Wx,Ib (66),(ev)
+57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+66: vfpclassps/d Vk,Wx,Ib (66),(ev)
+67: vfpclassss/d Vk,Wx,Ib (66),(ev)
cc: sha1rnds4 Vdq,Wdq,Ib
df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
f0: RORX Gy,Ey,Ib (F2),(v)
@@ -927,8 +990,10 @@ GrpTable: Grp12
EndTable
GrpTable: Grp13
+0: vprord/q Hx,Wx,Ib (66),(ev)
+1: vprold/q Hx,Wx,Ib (66),(ev)
2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
-4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1)
+4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
EndTable
@@ -947,7 +1012,7 @@ GrpTable: Grp15
4: XSAVE
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
-7: clflush | clflushopt (66) | sfence (11B) | pcommit (66),(11B)
+7: clflush | clflushopt (66) | sfence (11B)
EndTable
GrpTable: Grp16
@@ -963,6 +1028,20 @@ GrpTable: Grp17
3: BLSI By,Ey (v)
EndTable
+GrpTable: Grp18
+1: vgatherpf0dps/d Wx (66),(ev)
+2: vgatherpf1dps/d Wx (66),(ev)
+5: vscatterpf0dps/d Wx (66),(ev)
+6: vscatterpf1dps/d Wx (66),(ev)
+EndTable
+
+GrpTable: Grp19
+1: vgatherpf0qps/d Wx (66),(ev)
+2: vgatherpf1qps/d Wx (66),(ev)
+5: vscatterpf0qps/d Wx (66),(ev)
+6: vscatterpf1qps/d Wx (66),(ev)
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 62c0043a5fd5..96d2b847e09e 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -37,4 +37,5 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
+obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 2ca15b59fb3f..ba47524f56e8 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 99bfb192803f..ea9c49adaa1f 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -14,7 +14,7 @@
#include <linux/debugfs.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/seq_file.h>
#include <asm/pgtable.h>
@@ -72,9 +72,9 @@ static struct addr_marker address_markers[] = {
{ 0, "User Space" },
#ifdef CONFIG_X86_64
{ 0x8000000000000000UL, "Kernel Space" },
- { PAGE_OFFSET, "Low Kernel Mapping" },
- { VMALLOC_START, "vmalloc() Area" },
- { VMEMMAP_START, "Vmemmap" },
+ { 0/* PAGE_OFFSET */, "Low Kernel Mapping" },
+ { 0/* VMALLOC_START */, "vmalloc() Area" },
+ { 0/* VMEMMAP_START */, "Vmemmap" },
# ifdef CONFIG_X86_ESPFIX64
{ ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
# endif
@@ -434,8 +434,16 @@ void ptdump_walk_pgd_level_checkwx(void)
static int __init pt_dump_init(void)
{
+ /*
+ * Various markers are not compile-time constants, so assign them
+ * here.
+ */
+#ifdef CONFIG_X86_64
+ address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
+ address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
+ address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
+#endif
#ifdef CONFIG_X86_32
- /* Not a compile-time constant on x86-32 */
address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
# ifdef CONFIG_HIGHMEM
@@ -446,8 +454,4 @@ static int __init pt_dump_init(void)
return 0;
}
-
__initcall(pt_dump_init);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
-MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 4bb53b89f3c5..832b98f822be 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -1,6 +1,7 @@
#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/traps.h>
+#include <asm/kdebug.h>
typedef bool (*ex_handler_t)(const struct exception_table_entry *,
struct pt_regs *, int);
@@ -37,7 +38,7 @@ bool ex_handler_ext(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
/* Special hack for uaccess_err */
- current_thread_info()->uaccess_err = 1;
+ current->thread.uaccess_err = 1;
regs->ip = ex_fixup_addr(fixup);
return true;
}
@@ -46,8 +47,9 @@ EXPORT_SYMBOL(ex_handler_ext);
bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
- WARN_ONCE(1, "unchecked MSR access error: RDMSR from 0x%x\n",
- (unsigned int)regs->cx);
+ if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n",
+ (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
+ show_stack_regs(regs);
/* Pretend that the read succeeded and returned 0. */
regs->ip = ex_fixup_addr(fixup);
@@ -60,9 +62,10 @@ EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
- WARN_ONCE(1, "unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x)\n",
- (unsigned int)regs->cx,
- (unsigned int)regs->dx, (unsigned int)regs->ax);
+ if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n",
+ (unsigned int)regs->cx, (unsigned int)regs->dx,
+ (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
+ show_stack_regs(regs);
/* Pretend that the write succeeded. */
regs->ip = ex_fixup_addr(fixup);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 7d1fa7cd2374..dc8023060456 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -439,7 +439,7 @@ static noinline int vmalloc_fault(unsigned long address)
* happen within a race in page table update. In the later
* case just flush:
*/
- pgd = pgd_offset(current->active_mm, address);
+ pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address);
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
@@ -737,7 +737,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
* In this case we need to make sure we're not recursively
* faulting through the emulate_vsyscall() logic.
*/
- if (current_thread_info()->sig_on_uaccess_error && signal) {
+ if (current->thread.sig_on_uaccess_err && signal) {
tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | PF_USER;
tsk->thread.cr2 = address;
@@ -1353,7 +1353,7 @@ good_area:
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
major |= fault & VM_FAULT_MAJOR;
/*
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index a6d739258137..6d18b70ed5a9 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,5 +1,5 @@
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/swap.h> /* for totalram_pages */
#include <linux/bootmem.h>
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 372aad2b3291..fb4c1b42fc7e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -17,6 +17,7 @@
#include <asm/proto.h>
#include <asm/dma.h> /* for MAX_DMA_PFN */
#include <asm/microcode.h>
+#include <asm/kaslr.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -590,6 +591,9 @@ void __init init_mem_mapping(void)
/* the ISA range is always mapped regardless of memory holes */
init_memory_mapping(0, ISA_END_ADDRESS);
+ /* Init the trampoline, possibly with KASLR memory offset */
+ init_trampoline();
+
/*
* If the allocation is in bottom-up direction, we setup direct mapping
* in bottom-up, otherwise we setup direct mapping in top-down.
@@ -696,13 +700,6 @@ void free_initmem(void)
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
/*
- * Remember, initrd memory may contain microcode or other useful things.
- * Before we lose initrd mem, we need to find a place to hold them
- * now that normal virtual memory is enabled.
- */
- save_microcode_in_initrd();
-
- /*
* end could be not aligned, and We can not align that,
* decompresser could be confused by aligned initrd_end
* We already reserve the end partial page before in
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 84df150ee77e..cf8059016ec8 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -5,7 +5,6 @@
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
-#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index bce2e5d9edd4..14b9dd71d9e8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -27,7 +27,6 @@
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/dma-mapping.h>
-#include <linux/module.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/memremap.h>
@@ -328,22 +327,30 @@ void __init cleanup_highmap(void)
}
}
+/*
+ * Create PTE level page table mapping for physical addresses.
+ * It returns the last physical address mapped.
+ */
static unsigned long __meminit
-phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
+phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
pgprot_t prot)
{
- unsigned long pages = 0, next;
- unsigned long last_map_addr = end;
+ unsigned long pages = 0, paddr_next;
+ unsigned long paddr_last = paddr_end;
+ pte_t *pte;
int i;
- pte_t *pte = pte_page + pte_index(addr);
+ pte = pte_page + pte_index(paddr);
+ i = pte_index(paddr);
- for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
- next = (addr & PAGE_MASK) + PAGE_SIZE;
- if (addr >= end) {
+ for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
+ paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
+ if (paddr >= paddr_end) {
if (!after_bootmem &&
- !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
- !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
+ !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
+ E820_RAM) &&
+ !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
+ E820_RESERVED_KERN))
set_pte(pte, __pte(0));
continue;
}
@@ -354,54 +361,61 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
* pagetable pages as RO. So assume someone who pre-setup
* these mappings are more intelligent.
*/
- if (pte_val(*pte)) {
+ if (!pte_none(*pte)) {
if (!after_bootmem)
pages++;
continue;
}
if (0)
- printk(" pte=%p addr=%lx pte=%016lx\n",
- pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
+ pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
+ pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
pages++;
- set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
- last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
+ set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+ paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
}
update_page_count(PG_LEVEL_4K, pages);
- return last_map_addr;
+ return paddr_last;
}
+/*
+ * Create PMD level page table mapping for physical addresses. The virtual
+ * and physical address have to be aligned at this level.
+ * It returns the last physical address mapped.
+ */
static unsigned long __meminit
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
+phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
unsigned long page_size_mask, pgprot_t prot)
{
- unsigned long pages = 0, next;
- unsigned long last_map_addr = end;
+ unsigned long pages = 0, paddr_next;
+ unsigned long paddr_last = paddr_end;
- int i = pmd_index(address);
+ int i = pmd_index(paddr);
- for (; i < PTRS_PER_PMD; i++, address = next) {
- pmd_t *pmd = pmd_page + pmd_index(address);
+ for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
+ pmd_t *pmd = pmd_page + pmd_index(paddr);
pte_t *pte;
pgprot_t new_prot = prot;
- next = (address & PMD_MASK) + PMD_SIZE;
- if (address >= end) {
+ paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
+ if (paddr >= paddr_end) {
if (!after_bootmem &&
- !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
- !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
+ !e820_any_mapped(paddr & PMD_MASK, paddr_next,
+ E820_RAM) &&
+ !e820_any_mapped(paddr & PMD_MASK, paddr_next,
+ E820_RESERVED_KERN))
set_pmd(pmd, __pmd(0));
continue;
}
- if (pmd_val(*pmd)) {
+ if (!pmd_none(*pmd)) {
if (!pmd_large(*pmd)) {
spin_lock(&init_mm.page_table_lock);
pte = (pte_t *)pmd_page_vaddr(*pmd);
- last_map_addr = phys_pte_init(pte, address,
- end, prot);
+ paddr_last = phys_pte_init(pte, paddr,
+ paddr_end, prot);
spin_unlock(&init_mm.page_table_lock);
continue;
}
@@ -420,7 +434,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
if (page_size_mask & (1 << PG_LEVEL_2M)) {
if (!after_bootmem)
pages++;
- last_map_addr = next;
+ paddr_last = paddr_next;
continue;
}
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
@@ -430,51 +444,65 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
pages++;
spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pmd,
- pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
+ pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
__pgprot(pgprot_val(prot) | _PAGE_PSE)));
spin_unlock(&init_mm.page_table_lock);
- last_map_addr = next;
+ paddr_last = paddr_next;
continue;
}
pte = alloc_low_page();
- last_map_addr = phys_pte_init(pte, address, end, new_prot);
+ paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
spin_lock(&init_mm.page_table_lock);
pmd_populate_kernel(&init_mm, pmd, pte);
spin_unlock(&init_mm.page_table_lock);
}
update_page_count(PG_LEVEL_2M, pages);
- return last_map_addr;
+ return paddr_last;
}
+/*
+ * Create PUD level page table mapping for physical addresses. The virtual
+ * and physical address do not have to be aligned at this level. KASLR can
+ * randomize virtual addresses up to this level.
+ * It returns the last physical address mapped.
+ */
static unsigned long __meminit
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
- unsigned long page_size_mask)
+phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
+ unsigned long page_size_mask)
{
- unsigned long pages = 0, next;
- unsigned long last_map_addr = end;
- int i = pud_index(addr);
+ unsigned long pages = 0, paddr_next;
+ unsigned long paddr_last = paddr_end;
+ unsigned long vaddr = (unsigned long)__va(paddr);
+ int i = pud_index(vaddr);
- for (; i < PTRS_PER_PUD; i++, addr = next) {
- pud_t *pud = pud_page + pud_index(addr);
+ for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
+ pud_t *pud;
pmd_t *pmd;
pgprot_t prot = PAGE_KERNEL;
- next = (addr & PUD_MASK) + PUD_SIZE;
- if (addr >= end) {
+ vaddr = (unsigned long)__va(paddr);
+ pud = pud_page + pud_index(vaddr);
+ paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
+ if (paddr >= paddr_end) {
if (!after_bootmem &&
- !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
- !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
+ !e820_any_mapped(paddr & PUD_MASK, paddr_next,
+ E820_RAM) &&
+ !e820_any_mapped(paddr & PUD_MASK, paddr_next,
+ E820_RESERVED_KERN))
set_pud(pud, __pud(0));
continue;
}
- if (pud_val(*pud)) {
+ if (!pud_none(*pud)) {
if (!pud_large(*pud)) {
pmd = pmd_offset(pud, 0);
- last_map_addr = phys_pmd_init(pmd, addr, end,
- page_size_mask, prot);
+ paddr_last = phys_pmd_init(pmd, paddr,
+ paddr_end,
+ page_size_mask,
+ prot);
__flush_tlb_all();
continue;
}
@@ -493,7 +521,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if (page_size_mask & (1 << PG_LEVEL_1G)) {
if (!after_bootmem)
pages++;
- last_map_addr = next;
+ paddr_last = paddr_next;
continue;
}
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
@@ -503,16 +531,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
pages++;
spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pud,
- pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
+ pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE));
spin_unlock(&init_mm.page_table_lock);
- last_map_addr = next;
+ paddr_last = paddr_next;
continue;
}
pmd = alloc_low_page();
- last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
- prot);
+ paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
+ page_size_mask, prot);
spin_lock(&init_mm.page_table_lock);
pud_populate(&init_mm, pud, pmd);
@@ -522,38 +550,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
update_page_count(PG_LEVEL_1G, pages);
- return last_map_addr;
+ return paddr_last;
}
+/*
+ * Create page table mapping for the physical memory for specific physical
+ * addresses. The virtual and physical addresses have to be aligned on PMD level
+ * down. It returns the last physical address mapped.
+ */
unsigned long __meminit
-kernel_physical_mapping_init(unsigned long start,
- unsigned long end,
+kernel_physical_mapping_init(unsigned long paddr_start,
+ unsigned long paddr_end,
unsigned long page_size_mask)
{
bool pgd_changed = false;
- unsigned long next, last_map_addr = end;
- unsigned long addr;
+ unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
- start = (unsigned long)__va(start);
- end = (unsigned long)__va(end);
- addr = start;
+ paddr_last = paddr_end;
+ vaddr = (unsigned long)__va(paddr_start);
+ vaddr_end = (unsigned long)__va(paddr_end);
+ vaddr_start = vaddr;
- for (; start < end; start = next) {
- pgd_t *pgd = pgd_offset_k(start);
+ for (; vaddr < vaddr_end; vaddr = vaddr_next) {
+ pgd_t *pgd = pgd_offset_k(vaddr);
pud_t *pud;
- next = (start & PGDIR_MASK) + PGDIR_SIZE;
+ vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
if (pgd_val(*pgd)) {
pud = (pud_t *)pgd_page_vaddr(*pgd);
- last_map_addr = phys_pud_init(pud, __pa(start),
- __pa(end), page_size_mask);
+ paddr_last = phys_pud_init(pud, __pa(vaddr),
+ __pa(vaddr_end),
+ page_size_mask);
continue;
}
pud = alloc_low_page();
- last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
- page_size_mask);
+ paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
+ page_size_mask);
spin_lock(&init_mm.page_table_lock);
pgd_populate(&init_mm, pgd, pud);
@@ -562,11 +596,11 @@ kernel_physical_mapping_init(unsigned long start,
}
if (pgd_changed)
- sync_global_pgds(addr, end - 1, 0);
+ sync_global_pgds(vaddr_start, vaddr_end - 1, 0);
__flush_tlb_all();
- return last_map_addr;
+ return paddr_last;
}
#ifndef CONFIG_NUMA
@@ -673,7 +707,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
for (i = 0; i < PTRS_PER_PTE; i++) {
pte = pte_start + i;
- if (pte_val(*pte))
+ if (!pte_none(*pte))
return;
}
@@ -691,7 +725,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
for (i = 0; i < PTRS_PER_PMD; i++) {
pmd = pmd_start + i;
- if (pmd_val(*pmd))
+ if (!pmd_none(*pmd))
return;
}
@@ -702,27 +736,6 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
spin_unlock(&init_mm.page_table_lock);
}
-/* Return true if pgd is changed, otherwise return false. */
-static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
-{
- pud_t *pud;
- int i;
-
- for (i = 0; i < PTRS_PER_PUD; i++) {
- pud = pud_start + i;
- if (pud_val(*pud))
- return false;
- }
-
- /* free a pud table */
- free_pagetable(pgd_page(*pgd), 0);
- spin_lock(&init_mm.page_table_lock);
- pgd_clear(pgd);
- spin_unlock(&init_mm.page_table_lock);
-
- return true;
-}
-
static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
bool direct)
@@ -913,7 +926,6 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
unsigned long addr;
pgd_t *pgd;
pud_t *pud;
- bool pgd_changed = false;
for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
@@ -924,13 +936,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
pud = (pud_t *)pgd_page_vaddr(*pgd);
remove_pud_table(pud, addr, next, direct);
- if (free_pud_table(pud, pgd))
- pgd_changed = true;
}
- if (pgd_changed)
- sync_global_pgds(start, end - 1, 1);
-
flush_tlb_all();
}
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 9c0ff045fdd4..ada98b39b8ad 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -18,7 +18,7 @@
#include <asm/iomap.h>
#include <asm/pat.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/highmem.h>
static int is_io_mapping_possible(resource_size_t base, unsigned long size)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index f0894910bdd7..7aaa2635862d 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -9,7 +9,6 @@
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 1b1110fa0057..0493c17b8a51 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -54,8 +54,8 @@ static int kasan_die_handler(struct notifier_block *self,
void *data)
{
if (val == DIE_GPF) {
- pr_emerg("CONFIG_KASAN_INLINE enabled");
- pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
+ pr_emerg("CONFIG_KASAN_INLINE enabled\n");
+ pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
}
return NOTIFY_OK;
}
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
new file mode 100644
index 000000000000..26dccd6c0df1
--- /dev/null
+++ b/arch/x86/mm/kaslr.c
@@ -0,0 +1,172 @@
+/*
+ * This file implements KASLR memory randomization for x86_64. It randomizes
+ * the virtual address space of kernel memory regions (physical memory
+ * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
+ * exploits relying on predictable kernel addresses.
+ *
+ * Entropy is generated using the KASLR early boot functions now shared in
+ * the lib directory (originally written by Kees Cook). Randomization is
+ * done on PGD & PUD page table levels to increase possible addresses. The
+ * physical memory mapping code was adapted to support PUD level virtual
+ * addresses. This implementation on the best configuration provides 30,000
+ * possible virtual addresses in average for each memory region. An additional
+ * low memory page is used to ensure each CPU can start with a PGD aligned
+ * virtual address (for realmode).
+ *
+ * The order of each memory region is not changed. The feature looks at
+ * the available space for the regions based on different configuration
+ * options and randomizes the base and space between each. The size of the
+ * physical memory mapping is the available physical memory.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/random.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/setup.h>
+#include <asm/kaslr.h>
+
+#include "mm_internal.h"
+
+#define TB_SHIFT 40
+
+/*
+ * Virtual address start and end range for randomization. The end changes base
+ * on configuration to have the highest amount of space for randomization.
+ * It increases the possible random position for each randomized region.
+ *
+ * You need to add an if/def entry if you introduce a new memory region
+ * compatible with KASLR. Your entry must be in logical order with memory
+ * layout. For example, ESPFIX is before EFI because its virtual address is
+ * before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to
+ * ensure that this order is correct and won't be changed.
+ */
+static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
+static const unsigned long vaddr_end = VMEMMAP_START;
+
+/* Default values */
+unsigned long page_offset_base = __PAGE_OFFSET_BASE;
+EXPORT_SYMBOL(page_offset_base);
+unsigned long vmalloc_base = __VMALLOC_BASE;
+EXPORT_SYMBOL(vmalloc_base);
+
+/*
+ * Memory regions randomized by KASLR (except modules that use a separate logic
+ * earlier during boot). The list is ordered based on virtual addresses. This
+ * order is kept after randomization.
+ */
+static __initdata struct kaslr_memory_region {
+ unsigned long *base;
+ unsigned long size_tb;
+} kaslr_regions[] = {
+ { &page_offset_base, 64/* Maximum */ },
+ { &vmalloc_base, VMALLOC_SIZE_TB },
+};
+
+/* Get size in bytes used by the memory region */
+static inline unsigned long get_padding(struct kaslr_memory_region *region)
+{
+ return (region->size_tb << TB_SHIFT);
+}
+
+/*
+ * Apply no randomization if KASLR was disabled at boot or if KASAN
+ * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
+ */
+static inline bool kaslr_memory_enabled(void)
+{
+ return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
+}
+
+/* Initialize base and padding for each memory region randomized with KASLR */
+void __init kernel_randomize_memory(void)
+{
+ size_t i;
+ unsigned long vaddr = vaddr_start;
+ unsigned long rand, memory_tb;
+ struct rnd_state rand_state;
+ unsigned long remain_entropy;
+
+ if (!kaslr_memory_enabled())
+ return;
+
+ /*
+ * Update Physical memory mapping to available and
+ * add padding if needed (especially for memory hotplug support).
+ */
+ BUG_ON(kaslr_regions[0].base != &page_offset_base);
+ memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) +
+ CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
+
+ /* Adapt phyiscal memory region size based on available memory */
+ if (memory_tb < kaslr_regions[0].size_tb)
+ kaslr_regions[0].size_tb = memory_tb;
+
+ /* Calculate entropy available between regions */
+ remain_entropy = vaddr_end - vaddr_start;
+ for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
+ remain_entropy -= get_padding(&kaslr_regions[i]);
+
+ prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
+
+ for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
+ unsigned long entropy;
+
+ /*
+ * Select a random virtual address using the extra entropy
+ * available.
+ */
+ entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
+ prandom_bytes_state(&rand_state, &rand, sizeof(rand));
+ entropy = (rand % (entropy + 1)) & PUD_MASK;
+ vaddr += entropy;
+ *kaslr_regions[i].base = vaddr;
+
+ /*
+ * Jump the region and add a minimum padding based on
+ * randomization alignment.
+ */
+ vaddr += get_padding(&kaslr_regions[i]);
+ vaddr = round_up(vaddr + 1, PUD_SIZE);
+ remain_entropy -= entropy;
+ }
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+ unsigned long paddr, paddr_next;
+ pgd_t *pgd;
+ pud_t *pud_page, *pud_page_tramp;
+ int i;
+
+ if (!kaslr_memory_enabled()) {
+ init_trampoline_default();
+ return;
+ }
+
+ pud_page_tramp = alloc_low_page();
+
+ paddr = 0;
+ pgd = pgd_offset_k((unsigned long)__va(paddr));
+ pud_page = (pud_t *) pgd_page_vaddr(*pgd);
+
+ for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
+ pud_t *pud, *pud_tramp;
+ unsigned long vaddr = (unsigned long)__va(paddr);
+
+ pud_tramp = pud_page_tramp + pud_index(paddr);
+ pud = pud_page + pud_index(vaddr);
+ paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
+ *pud_tramp = *pud;
+ }
+
+ set_pgd(&trampoline_pgd_entry,
+ __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
+}
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index b4f2e7e9e907..4515bae36bbe 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/mm.h>
-#include <linux/module.h>
#include <linux/page-flags.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
index aec124214d97..c2638a7d2c10 100644
--- a/arch/x86/mm/kmemcheck/shadow.c
+++ b/arch/x86/mm/kmemcheck/shadow.c
@@ -1,5 +1,5 @@
#include <linux/kmemcheck.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <asm/page.h>
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index ddb2244b06a1..afc47f5c9531 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -11,7 +11,7 @@
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/hash.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 0057a7accfb1..bef36622e408 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -24,7 +24,7 @@
#define DEBUG 1
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 9c086c57105c..fb682108f4dc 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -1,4 +1,5 @@
/* Common code for 32 and 64-bit NUMA */
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
@@ -7,7 +8,6 @@
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/ctype.h>
-#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/sched.h>
#include <linux/topology.h>
@@ -15,7 +15,6 @@
#include <asm/e820.h>
#include <asm/proto.h>
#include <asm/dma.h>
-#include <asm/acpi.h>
#include <asm/amd_nb.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 47b6436e41c2..6b7ce6279133 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -24,7 +24,7 @@
#include <linux/bootmem.h>
#include <linux/memblock.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 7a1f7bbf4105..849dc09fa4f0 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -101,7 +101,8 @@ static inline unsigned long highmap_start_pfn(void)
static inline unsigned long highmap_end_pfn(void)
{
- return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
+ /* Do not reference physical address outside the kernel. */
+ return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
}
#endif
@@ -112,6 +113,12 @@ within(unsigned long addr, unsigned long start, unsigned long end)
return addr >= start && addr < end;
}
+static inline int
+within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
+{
+ return addr >= start && addr <= end;
+}
+
/*
* Flushing functions
*/
@@ -746,18 +753,6 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
return true;
}
-static bool try_to_free_pud_page(pud_t *pud)
-{
- int i;
-
- for (i = 0; i < PTRS_PER_PUD; i++)
- if (!pud_none(pud[i]))
- return false;
-
- free_page((unsigned long)pud);
- return true;
-}
-
static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
{
pte_t *pte = pte_offset_kernel(pmd, start);
@@ -871,16 +866,6 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
*/
}
-static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
-{
- pgd_t *pgd_entry = root + pgd_index(addr);
-
- unmap_pud_range(pgd_entry, addr, end);
-
- if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
- pgd_clear(pgd_entry);
-}
-
static int alloc_pte_page(pmd_t *pmd)
{
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
@@ -1113,7 +1098,12 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
ret = populate_pud(cpa, addr, pgd_entry, pgprot);
if (ret < 0) {
- unmap_pgd_range(cpa->pgd, addr,
+ /*
+ * Leave the PUD page in place in case some other CPU or thread
+ * already found it, but remove any useless entries we just
+ * added to it.
+ */
+ unmap_pud_range(pgd_entry, addr,
addr + (cpa->numpages << PAGE_SHIFT));
return ret;
}
@@ -1185,7 +1175,7 @@ repeat:
return __cpa_process_fault(cpa, address, primary);
old_pte = *kpte;
- if (!pte_val(old_pte))
+ if (pte_none(old_pte))
return __cpa_process_fault(cpa, address, primary);
if (level == PG_LEVEL_4K) {
@@ -1316,7 +1306,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
* to touch the high mapped kernel as well:
*/
if (!within(vaddr, (unsigned long)_text, _brk_end) &&
- within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
+ within_inclusive(cpa->pfn, highmap_start_pfn(),
+ highmap_end_pfn())) {
unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
__START_KERNEL_map - phys_base;
alias_cpa = *cpa;
@@ -1991,12 +1982,6 @@ out:
return retval;
}
-void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
- unsigned numpages)
-{
- unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
-}
-
/*
* The testcases use internal knowledge of the implementation that shouldn't
* be exposed to the rest of the kernel. Include these directly here.
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index fb0604f11eec..ecb1b69c1651 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,7 +11,6 @@
#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/mm.h>
@@ -755,11 +754,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
while (cursor < to) {
- if (!devmem_is_allowed(pfn)) {
- pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
- current->comm, from, to - 1);
+ if (!devmem_is_allowed(pfn))
return 0;
- }
cursor += PAGE_SIZE;
pfn++;
}
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index 2f7702253ccf..de391b7bc19a 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -11,7 +11,6 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/rbtree_augmented.h>
#include <linux/sched.h>
#include <linux/gfp.h>
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
index 9f0614daea85..a235869532bc 100644
--- a/arch/x86/mm/pf_in.c
+++ b/arch/x86/mm/pf_in.c
@@ -26,7 +26,6 @@
* Bjorn Steinbrink (B.Steinbrink@gmx.de), 2007
*/
-#include <linux/module.h>
#include <linux/ptrace.h> /* struct pt_regs */
#include "pf_in.h"
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 4eb287e25043..3feec5af4e67 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -6,7 +6,7 @@
#include <asm/fixmap.h>
#include <asm/mtrr.h>
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
#ifdef CONFIG_HIGHPTE
#define PGALLOC_USER_GFP __GFP_HIGHMEM
@@ -18,7 +18,7 @@ gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- return (pte_t *)__get_free_page(PGALLOC_GFP);
+ return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -207,9 +207,13 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
{
int i;
bool failed = false;
+ gfp_t gfp = PGALLOC_GFP;
+
+ if (mm == &init_mm)
+ gfp &= ~__GFP_ACCOUNT;
for(i = 0; i < PREALLOCATED_PMDS; i++) {
- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
+ pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
if (!pmd)
failed = true;
if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 75cc0978d45d..9adce776852b 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -8,7 +8,6 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -47,7 +46,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
return;
}
pte = pte_offset_kernel(pmd, vaddr);
- if (pte_val(pteval))
+ if (!pte_none(pteval))
set_pte_at(&init_mm, vaddr, pte, pteval);
else
pte_clear(&init_mm, vaddr, pte);
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index e666cbbb9261..cfc3b9121ce4 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -1,6 +1,6 @@
#include <linux/bootmem.h>
#include <linux/mmdebug.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <asm/page.h>
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index b5f821881465..35fe69529bc1 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -13,10 +13,8 @@
#include <linux/acpi.h>
#include <linux/mmzone.h>
#include <linux/bitmap.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/topology.h>
-#include <linux/bootmem.h>
-#include <linux/memblock.h>
#include <linux/mm.h>
#include <asm/proto.h>
#include <asm/numa.h>
@@ -24,51 +22,6 @@
#include <asm/apic.h>
#include <asm/uv/uv.h>
-int acpi_numa __initdata;
-
-static __init int setup_node(int pxm)
-{
- return acpi_map_pxm_to_node(pxm);
-}
-
-static __init void bad_srat(void)
-{
- printk(KERN_ERR "SRAT: SRAT not used.\n");
- acpi_numa = -1;
-}
-
-static __init inline int srat_disabled(void)
-{
- return acpi_numa < 0;
-}
-
-/*
- * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
- * I/O localities since SRAT does not list them. I/O localities are
- * not supported at this point.
- */
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
-{
- int i, j;
-
- for (i = 0; i < slit->locality_count; i++) {
- const int from_node = pxm_to_node(i);
-
- if (from_node == NUMA_NO_NODE)
- continue;
-
- for (j = 0; j < slit->locality_count; j++) {
- const int to_node = pxm_to_node(j);
-
- if (to_node == NUMA_NO_NODE)
- continue;
-
- numa_set_distance(from_node, to_node,
- slit->entry[slit->locality_count * i + j]);
- }
- }
-}
-
/* Callback for Proximity Domain -> x2APIC mapping */
void __init
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
@@ -91,7 +44,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
pxm, apic_id);
return;
}
- node = setup_node(pxm);
+ node = acpi_map_pxm_to_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
@@ -104,7 +57,6 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
}
set_apicid_to_node(apic_id, node);
node_set(node, numa_nodes_parsed);
- acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
pxm, apic_id, node);
}
@@ -127,7 +79,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
pxm = pa->proximity_domain_lo;
if (acpi_srat_revision >= 2)
pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
- node = setup_node(pxm);
+ node = acpi_map_pxm_to_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
@@ -146,74 +98,10 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
set_apicid_to_node(apic_id, node);
node_set(node, numa_nodes_parsed);
- acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
pxm, apic_id, node);
}
-#ifdef CONFIG_MEMORY_HOTPLUG
-static inline int save_add_info(void) {return 1;}
-#else
-static inline int save_add_info(void) {return 0;}
-#endif
-
-/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
-int __init
-acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
-{
- u64 start, end;
- u32 hotpluggable;
- int node, pxm;
-
- if (srat_disabled())
- goto out_err;
- if (ma->header.length != sizeof(struct acpi_srat_mem_affinity))
- goto out_err_bad_srat;
- if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- goto out_err;
- hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
- if (hotpluggable && !save_add_info())
- goto out_err;
-
- start = ma->base_address;
- end = start + ma->length;
- pxm = ma->proximity_domain;
- if (acpi_srat_revision <= 1)
- pxm &= 0xff;
-
- node = setup_node(pxm);
- if (node < 0) {
- printk(KERN_ERR "SRAT: Too many proximity domains.\n");
- goto out_err_bad_srat;
- }
-
- if (numa_add_memblk(node, start, end) < 0)
- goto out_err_bad_srat;
-
- node_set(node, numa_nodes_parsed);
-
- pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
- node, pxm,
- (unsigned long long) start, (unsigned long long) end - 1,
- hotpluggable ? " hotplug" : "",
- ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
-
- /* Mark hotplug range in memblock. */
- if (hotpluggable && memblock_mark_hotplug(start, ma->length))
- pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
- (unsigned long long)start, (unsigned long long)end - 1);
-
- max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
-
- return 0;
-out_err_bad_srat:
- bad_srat();
-out_err:
- return -1;
-}
-
-void __init acpi_numa_arch_fixup(void) {}
-
int __init x86_acpi_numa_init(void)
{
int ret;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5643fd0b1a7d..4dbe65622810 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -4,7 +4,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/cpu.h>
#include <asm/tlbflush.h>
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index b2a4e2a61f6b..3cd69832d7f4 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -396,6 +396,7 @@ int __init pci_acpi_init(void)
return -ENODEV;
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
+ acpi_irq_penalty_init();
pcibios_enable_irq = acpi_pci_irq_enable;
pcibios_disable_irq = acpi_pci_irq_disable;
x86_init.pci.init_irq = x86_init_noop;
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 8b93e634af84..5a18aedcb341 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -36,7 +36,8 @@
#define PCIE_CAP_OFFSET 0x100
/* Quirks for the listed devices */
-#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_MRFLD_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_MRFLD_HSU 0x1191
/* Fixed BAR fields */
#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
@@ -225,13 +226,20 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
/* Special treatment for IRQ0 */
if (dev->irq == 0) {
/*
+ * Skip HS UART common registers device since it has
+ * IRQ0 assigned and not used by the kernel.
+ */
+ if (dev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU)
+ return -EBUSY;
+ /*
* TNG has IRQ0 assigned to eMMC controller. But there
* are also other devices with bogus PCI configuration
* that have IRQ0 assigned. This check ensures that
- * eMMC gets it.
+ * eMMC gets it. The rest of devices still could be
+ * enabled without interrupt line being allocated.
*/
- if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
- return -EBUSY;
+ if (dev->device != PCI_DEVICE_ID_INTEL_MRFLD_MMC)
+ return 0;
}
break;
default:
@@ -308,14 +316,39 @@ static void pci_d3delay_fixup(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
-static void mrst_power_off_unused_dev(struct pci_dev *dev)
+static void mid_power_off_one_device(struct pci_dev *dev)
{
+ u16 pmcsr;
+
+ /*
+ * Update current state first, otherwise PCI core enforces PCI_D0 in
+ * pci_set_power_state() for devices which status was PCI_UNKNOWN.
+ */
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pci_power_t __force)(pmcsr & PCI_PM_CTRL_STATE_MASK);
+
pci_set_power_state(dev, PCI_D3hot);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
+
+static void mid_power_off_devices(struct pci_dev *dev)
+{
+ int id;
+
+ if (!pci_soc_mode)
+ return;
+
+ id = intel_mid_pwr_get_lss_id(dev);
+ if (id < 0)
+ return;
+
+ /*
+ * This sets only PMCSR bits. The actual power off will happen in
+ * arch/x86/platform/intel-mid/pwr.c.
+ */
+ mid_power_off_one_device(dev);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, mid_power_off_devices);
/*
* Langwell devices reside at fixed offsets, don't try to move them.
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c
index 7792aba266df..613cac7395c4 100644
--- a/arch/x86/pci/vmd.c
+++ b/arch/x86/pci/vmd.c
@@ -195,7 +195,7 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
vmdirq->virq = virq;
irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
- vmdirq, handle_simple_irq, vmd, NULL);
+ vmdirq, handle_untracked_irq, vmd, NULL);
return 0;
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 99ddab79215e..3a483cb5ac81 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -9,7 +9,7 @@
* Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
* Stefano Stabellini <stefano.stabellini@eu.citrix.com>
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/acpi.h>
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
index 81c769e80614..8ff7b9355416 100644
--- a/arch/x86/platform/atom/punit_atom_debug.c
+++ b/arch/x86/platform/atom/punit_atom_debug.c
@@ -23,10 +23,9 @@
#include <linux/seq_file.h>
#include <linux/io.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
-/* Power gate status reg */
-#define PWRGT_STATUS 0x61
/* Subsystem config/status Video processor */
#define VED_SS_PM0 0x32
/* Subsystem config/status ISP (Image Signal Processor) */
@@ -35,12 +34,16 @@
#define MIO_SS_PM 0x3B
/* Shift bits for getting status for video, isp and i/o */
#define SSS_SHIFT 24
+
+/* Power gate status reg */
+#define PWRGT_STATUS 0x61
/* Shift bits for getting status for graphics rendering */
#define RENDER_POS 0
/* Shift bits for getting status for media control */
#define MEDIA_POS 2
/* Shift bits for getting status for Valley View/Baytrail display */
#define VLV_DISPLAY_POS 6
+
/* Subsystem config/status display for Cherry Trail SOC */
#define CHT_DSP_SSS 0x36
/* Shift bits for getting status for display */
@@ -52,6 +55,14 @@ struct punit_device {
int sss_pos;
};
+static const struct punit_device punit_device_tng[] = {
+ { "DISPLAY", CHT_DSP_SSS, SSS_SHIFT },
+ { "VED", VED_SS_PM0, SSS_SHIFT },
+ { "ISP", ISP_SS_PM0, SSS_SHIFT },
+ { "MIO", MIO_SS_PM, SSS_SHIFT },
+ { NULL }
+};
+
static const struct punit_device punit_device_byt[] = {
{ "GFX RENDER", PWRGT_STATUS, RENDER_POS },
{ "GFX MEDIA", PWRGT_STATUS, MEDIA_POS },
@@ -143,8 +154,9 @@ static void punit_dbgfs_unregister(void)
(kernel_ulong_t)&drv_data }
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
- ICPU(55, punit_device_byt), /* Valleyview, Bay Trail */
- ICPU(76, punit_device_cht), /* Braswell, Cherry Trail */
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
+ ICPU(INTEL_FAM6_ATOM_MERRIFIELD1, punit_device_tng),
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht),
{}
};
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 701fd5843c87..b27bccd4390f 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -11,11 +11,9 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/irq.h>
-#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
-#include <linux/reboot.h>
#include <asm/ce4100.h>
#include <asm/prom.h>
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index f93545e7dc54..17c8bbd4e2f0 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -98,21 +98,6 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
return status;
}
-void efi_get_time(struct timespec *now)
-{
- efi_status_t status;
- efi_time_t eft;
- efi_time_cap_t cap;
-
- status = efi.get_time(&eft, &cap);
- if (status != EFI_SUCCESS)
- pr_err("Oops: efitime: can't read time!\n");
-
- now->tv_sec = mktime(eft.year, eft.month, eft.day, eft.hour,
- eft.minute, eft.second);
- now->tv_nsec = 0;
-}
-
void __init efi_find_mirror(void)
{
efi_memory_desc_t *md;
@@ -978,8 +963,6 @@ static void __init __efi_enter_virtual_mode(void)
* EFI mixed mode we need all of memory to be accessible when
* we pass parameters to the EFI runtime services in the
* thunking code.
- *
- * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
*/
free_pages((unsigned long)new_memmap, pg_shift);
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 338402b91d2e..cef39b097649 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -49,9 +49,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
return 0;
}
-void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
-{
-}
void __init efi_map_region(efi_memory_desc_t *md)
{
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6e7242be1c87..04db6fbce96d 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -24,7 +24,7 @@
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/ioport.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/efi.h>
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void)
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
- gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+ gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
if (!efi_pgd)
return -ENOMEM;
@@ -285,11 +285,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
return 0;
}
-void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
-{
- kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
-}
-
static void __init __map_region(efi_memory_desc_t *md, u64 va)
{
unsigned long flags = _PAGE_RW;
@@ -466,22 +461,17 @@ extern efi_status_t efi64_thunk(u32, ...);
#define efi_thunk(f, ...) \
({ \
efi_status_t __s; \
- unsigned long flags; \
- u32 func; \
- \
- efi_sync_low_kernel_mappings(); \
- local_irq_save(flags); \
+ unsigned long __flags; \
+ u32 __func; \
\
- efi_scratch.prev_cr3 = read_cr3(); \
- write_cr3((unsigned long)efi_scratch.efi_pgt); \
- __flush_tlb_all(); \
+ local_irq_save(__flags); \
+ arch_efi_call_virt_setup(); \
\
- func = runtime_service32(f); \
- __s = efi64_thunk(func, __VA_ARGS__); \
+ __func = runtime_service32(f); \
+ __s = efi64_thunk(__func, __VA_ARGS__); \
\
- write_cr3(efi_scratch.prev_cr3); \
- __flush_tlb_all(); \
- local_irq_restore(flags); \
+ arch_efi_call_virt_teardown(); \
+ local_irq_restore(__flags); \
\
__s; \
})
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
index 0ce1b1913673..fa021dfab088 100644
--- a/arch/x86/platform/intel-mid/Makefile
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfld.o pwr.o
# SFI specific code
ifdef CONFIG_X86_INTEL_MID
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 91ec9f8704bf..fc135bf70511 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -1,3 +1,5 @@
+# Family-Level Interface Shim (FLIS)
+obj-$(subst m,y,$(CONFIG_PINCTRL_MERRIFIELD)) += platform_mrfld_pinctrl.o
# IPC Devices
obj-y += platform_ipc.o
obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o
@@ -8,14 +10,18 @@ obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_battery.o
obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
+# SPI Devices
+obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
# I2C Devices
obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
-obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o
obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o
-obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o
+# I2C GPIO Expanders
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c
new file mode 100644
index 000000000000..4de8a664e6a1
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c
@@ -0,0 +1,43 @@
+/*
+ * Intel Merrifield FLIS platform device initialization file
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel-mid.h>
+
+#define FLIS_BASE_ADDR 0xff0c0000
+#define FLIS_LENGTH 0x8000
+
+static struct resource mrfld_pinctrl_mmio_resource = {
+ .start = FLIS_BASE_ADDR,
+ .end = FLIS_BASE_ADDR + FLIS_LENGTH - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device mrfld_pinctrl_device = {
+ .name = "pinctrl-merrifield",
+ .id = PLATFORM_DEVID_NONE,
+ .resource = &mrfld_pinctrl_mmio_resource,
+ .num_resources = 1,
+};
+
+static int __init mrfld_pinctrl_init(void)
+{
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ return platform_device_register(&mrfld_pinctrl_device);
+
+ return -ENODEV;
+}
+arch_initcall(mrfld_pinctrl_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
new file mode 100644
index 000000000000..429a94192671
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
@@ -0,0 +1,99 @@
+/*
+ * PCAL9555a platform data initilization file
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/pca953x.h>
+#include <linux/sfi.h>
+
+#include <asm/intel-mid.h>
+
+#define PCAL9555A_NUM 4
+
+static struct pca953x_platform_data pcal9555a_pdata[PCAL9555A_NUM];
+static int nr;
+
+static void __init *pcal9555a_platform_data(void *info)
+{
+ struct i2c_board_info *i2c_info = info;
+ char *type = i2c_info->type;
+ struct pca953x_platform_data *pcal9555a;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+ int gpio_base, intr;
+
+ snprintf(base_pin_name, sizeof(base_pin_name), "%s_base", type);
+ snprintf(intr_pin_name, sizeof(intr_pin_name), "%s_int", type);
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ /* Check if the SFI record valid */
+ if (gpio_base == -1)
+ return NULL;
+
+ if (nr >= PCAL9555A_NUM) {
+ pr_err("%s: Too many instances, only %d supported\n", __func__,
+ PCAL9555A_NUM);
+ return NULL;
+ }
+
+ pcal9555a = &pcal9555a_pdata[nr++];
+ pcal9555a->gpio_base = gpio_base;
+
+ if (intr >= 0) {
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ pcal9555a->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ pcal9555a->irq_base = -1;
+ }
+
+ strcpy(type, "pcal9555a");
+ return pcal9555a;
+}
+
+static const struct devs_id pcal9555a_1_dev_id __initconst = {
+ .name = "pcal9555a-1",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &pcal9555a_platform_data,
+};
+
+static const struct devs_id pcal9555a_2_dev_id __initconst = {
+ .name = "pcal9555a-2",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &pcal9555a_platform_data,
+};
+
+static const struct devs_id pcal9555a_3_dev_id __initconst = {
+ .name = "pcal9555a-3",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &pcal9555a_platform_data,
+};
+
+static const struct devs_id pcal9555a_4_dev_id __initconst = {
+ .name = "pcal9555a-4",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &pcal9555a_platform_data,
+};
+
+sfi_device(pcal9555a_1_dev_id);
+sfi_device(pcal9555a_2_dev_id);
+sfi_device(pcal9555a_3_dev_id);
+sfi_device(pcal9555a_4_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
new file mode 100644
index 000000000000..30c601b399ee
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
@@ -0,0 +1,50 @@
+/*
+ * spidev platform data initilization file
+ *
+ * (C) Copyright 2014, 2016 Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+
+#include <asm/intel-mid.h>
+
+#define MRFLD_SPI_DEFAULT_DMA_BURST 8
+#define MRFLD_SPI_DEFAULT_TIMEOUT 500
+
+/* GPIO pin for spidev chipselect */
+#define MRFLD_SPIDEV_GPIO_CS 111
+
+static struct pxa2xx_spi_chip spidev_spi_chip = {
+ .dma_burst_size = MRFLD_SPI_DEFAULT_DMA_BURST,
+ .timeout = MRFLD_SPI_DEFAULT_TIMEOUT,
+ .gpio_cs = MRFLD_SPIDEV_GPIO_CS,
+};
+
+static void __init *spidev_platform_data(void *info)
+{
+ struct spi_board_info *spi_info = info;
+
+ spi_info->mode = SPI_MODE_0;
+ spi_info->controller_data = &spidev_spi_chip;
+
+ return NULL;
+}
+
+static const struct devs_id spidev_dev_id __initconst = {
+ .name = "spidev",
+ .type = SFI_DEV_TYPE_SPI,
+ .delay = 0,
+ .get_platform_data = &spidev_platform_data,
+};
+
+sfi_device(spidev_dev_id);
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 90bb997ed0a2..ce119d2ba0d0 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -16,10 +16,11 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/regulator/machine.h>
#include <linux/scatterlist.h>
#include <linux/sfi.h>
#include <linux/irq.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/notifier.h>
#include <asm/setup.h>
@@ -144,6 +145,15 @@ static void intel_mid_arch_setup(void)
out:
if (intel_mid_ops->arch_setup)
intel_mid_ops->arch_setup();
+
+ /*
+ * Intel MID platforms are using explicitly defined regulators.
+ *
+ * Let the regulator core know that we do not have any additional
+ * regulators left. This lets it substitute unprovided regulators with
+ * dummy ones:
+ */
+ regulator_has_full_constraints();
}
/* MID systems don't have i8042 controller */
diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfld.c
index bd1adc621781..59253db41bbc 100644
--- a/arch/x86/platform/intel-mid/mrfl.c
+++ b/arch/x86/platform/intel-mid/mrfld.c
@@ -1,5 +1,5 @@
/*
- * mrfl.c: Intel Merrifield platform specific setup code
+ * Intel Merrifield platform specific setup code
*
* (C) Copyright 2013 Intel Corporation
*
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
new file mode 100644
index 000000000000..c901a3423772
--- /dev/null
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -0,0 +1,416 @@
+/*
+ * Intel MID Power Management Unit (PWRMU) device driver
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Intel MID Power Management Unit device driver handles the South Complex PCI
+ * devices such as GPDMA, SPI, I2C, PWM, and so on. By default PCI core
+ * modifies bits in PMCSR register in the PCI configuration space. This is not
+ * enough on some SoCs like Intel Tangier. In such case PCI core sets a new
+ * power state of the device in question through a PM hook registered in struct
+ * pci_platform_pm_ops (see drivers/pci/pci-mid.c).
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+#include <asm/intel-mid.h>
+
+/* Registers */
+#define PM_STS 0x00
+#define PM_CMD 0x04
+#define PM_ICS 0x08
+#define PM_WKC(x) (0x10 + (x) * 4)
+#define PM_WKS(x) (0x18 + (x) * 4)
+#define PM_SSC(x) (0x20 + (x) * 4)
+#define PM_SSS(x) (0x30 + (x) * 4)
+
+/* Bits in PM_STS */
+#define PM_STS_BUSY (1 << 8)
+
+/* Bits in PM_CMD */
+#define PM_CMD_CMD(x) ((x) << 0)
+#define PM_CMD_IOC (1 << 8)
+#define PM_CMD_D3cold (1 << 21)
+
+/* List of commands */
+#define CMD_SET_CFG 0x01
+
+/* Bits in PM_ICS */
+#define PM_ICS_INT_STATUS(x) ((x) & 0xff)
+#define PM_ICS_IE (1 << 8)
+#define PM_ICS_IP (1 << 9)
+#define PM_ICS_SW_INT_STS (1 << 10)
+
+/* List of interrupts */
+#define INT_INVALID 0
+#define INT_CMD_COMPLETE 1
+#define INT_CMD_ERR 2
+#define INT_WAKE_EVENT 3
+#define INT_LSS_POWER_ERR 4
+#define INT_S0iX_MSG_ERR 5
+#define INT_NO_C6 6
+#define INT_TRIGGER_ERR 7
+#define INT_INACTIVITY 8
+
+/* South Complex devices */
+#define LSS_MAX_SHARED_DEVS 4
+#define LSS_MAX_DEVS 64
+
+#define LSS_WS_BITS 1 /* wake state width */
+#define LSS_PWS_BITS 2 /* power state width */
+
+/* Supported device IDs */
+#define PCI_DEVICE_ID_PENWELL 0x0828
+#define PCI_DEVICE_ID_TANGIER 0x11a1
+
+struct mid_pwr_dev {
+ struct pci_dev *pdev;
+ pci_power_t state;
+};
+
+struct mid_pwr {
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ bool available;
+
+ struct mutex lock;
+ struct mid_pwr_dev lss[LSS_MAX_DEVS][LSS_MAX_SHARED_DEVS];
+};
+
+static struct mid_pwr *midpwr;
+
+static u32 mid_pwr_get_state(struct mid_pwr *pwr, int reg)
+{
+ return readl(pwr->regs + PM_SSS(reg));
+}
+
+static void mid_pwr_set_state(struct mid_pwr *pwr, int reg, u32 value)
+{
+ writel(value, pwr->regs + PM_SSC(reg));
+}
+
+static void mid_pwr_set_wake(struct mid_pwr *pwr, int reg, u32 value)
+{
+ writel(value, pwr->regs + PM_WKC(reg));
+}
+
+static void mid_pwr_interrupt_disable(struct mid_pwr *pwr)
+{
+ writel(~PM_ICS_IE, pwr->regs + PM_ICS);
+}
+
+static bool mid_pwr_is_busy(struct mid_pwr *pwr)
+{
+ return !!(readl(pwr->regs + PM_STS) & PM_STS_BUSY);
+}
+
+/* Wait 500ms that the latest PWRMU command finished */
+static int mid_pwr_wait(struct mid_pwr *pwr)
+{
+ unsigned int count = 500000;
+ bool busy;
+
+ do {
+ busy = mid_pwr_is_busy(pwr);
+ if (!busy)
+ return 0;
+ udelay(1);
+ } while (--count);
+
+ return -EBUSY;
+}
+
+static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd)
+{
+ writel(PM_CMD_CMD(cmd), pwr->regs + PM_CMD);
+ return mid_pwr_wait(pwr);
+}
+
+static int __update_power_state(struct mid_pwr *pwr, int reg, int bit, int new)
+{
+ int curstate;
+ u32 power;
+ int ret;
+
+ /* Check if the device is already in desired state */
+ power = mid_pwr_get_state(pwr, reg);
+ curstate = (power >> bit) & 3;
+ if (curstate == new)
+ return 0;
+
+ /* Update the power state */
+ mid_pwr_set_state(pwr, reg, (power & ~(3 << bit)) | (new << bit));
+
+ /* Send command to SCU */
+ ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
+ if (ret)
+ return ret;
+
+ /* Check if the device is already in desired state */
+ power = mid_pwr_get_state(pwr, reg);
+ curstate = (power >> bit) & 3;
+ if (curstate != new)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static pci_power_t __find_weakest_power_state(struct mid_pwr_dev *lss,
+ struct pci_dev *pdev,
+ pci_power_t state)
+{
+ pci_power_t weakest = PCI_D3hot;
+ unsigned int j;
+
+ /* Find device in cache or first free cell */
+ for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
+ if (lss[j].pdev == pdev || !lss[j].pdev)
+ break;
+ }
+
+ /* Store the desired state in cache */
+ if (j < LSS_MAX_SHARED_DEVS) {
+ lss[j].pdev = pdev;
+ lss[j].state = state;
+ } else {
+ dev_WARN(&pdev->dev, "No room for device in PWRMU LSS cache\n");
+ weakest = state;
+ }
+
+ /* Find the power state we may use */
+ for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
+ if (lss[j].state < weakest)
+ weakest = lss[j].state;
+ }
+
+ return weakest;
+}
+
+static int __set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
+ pci_power_t state, int id, int reg, int bit)
+{
+ const char *name;
+ int ret;
+
+ state = __find_weakest_power_state(pwr->lss[id], pdev, state);
+ name = pci_power_name(state);
+
+ ret = __update_power_state(pwr, reg, bit, (__force int)state);
+ if (ret) {
+ dev_warn(&pdev->dev, "Can't set power state %s: %d\n", name, ret);
+ return ret;
+ }
+
+ dev_vdbg(&pdev->dev, "Set power state %s\n", name);
+ return 0;
+}
+
+static int mid_pwr_set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
+ pci_power_t state)
+{
+ int id, reg, bit;
+ int ret;
+
+ id = intel_mid_pwr_get_lss_id(pdev);
+ if (id < 0)
+ return id;
+
+ reg = (id * LSS_PWS_BITS) / 32;
+ bit = (id * LSS_PWS_BITS) % 32;
+
+ /* We support states between PCI_D0 and PCI_D3hot */
+ if (state < PCI_D0)
+ state = PCI_D0;
+ if (state > PCI_D3hot)
+ state = PCI_D3hot;
+
+ mutex_lock(&pwr->lock);
+ ret = __set_power_state(pwr, pdev, state, id, reg, bit);
+ mutex_unlock(&pwr->lock);
+ return ret;
+}
+
+int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+ struct mid_pwr *pwr = midpwr;
+ int ret = 0;
+
+ might_sleep();
+
+ if (pwr && pwr->available)
+ ret = mid_pwr_set_power_state(pwr, pdev, state);
+ dev_vdbg(&pdev->dev, "set_power_state() returns %d\n", ret);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
+
+int intel_mid_pwr_get_lss_id(struct pci_dev *pdev)
+{
+ int vndr;
+ u8 id;
+
+ /*
+ * Mapping to PWRMU index is kept in the Logical SubSystem ID byte of
+ * Vendor capability.
+ */
+ vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+ if (!vndr)
+ return -EINVAL;
+
+ /* Read the Logical SubSystem ID byte */
+ pci_read_config_byte(pdev, vndr + INTEL_MID_PWR_LSS_OFFSET, &id);
+ if (!(id & INTEL_MID_PWR_LSS_TYPE))
+ return -ENODEV;
+
+ id &= ~INTEL_MID_PWR_LSS_TYPE;
+ if (id >= LSS_MAX_DEVS)
+ return -ERANGE;
+
+ return id;
+}
+
+static irqreturn_t mid_pwr_irq_handler(int irq, void *dev_id)
+{
+ struct mid_pwr *pwr = dev_id;
+ u32 ics;
+
+ ics = readl(pwr->regs + PM_ICS);
+ if (!(ics & PM_ICS_IP))
+ return IRQ_NONE;
+
+ writel(ics | PM_ICS_IP, pwr->regs + PM_ICS);
+
+ dev_warn(pwr->dev, "Unexpected IRQ: %#x\n", PM_ICS_INT_STATUS(ics));
+ return IRQ_HANDLED;
+}
+
+struct mid_pwr_device_info {
+ int (*set_initial_state)(struct mid_pwr *pwr);
+};
+
+static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mid_pwr_device_info *info = (void *)id->driver_data;
+ struct device *dev = &pdev->dev;
+ struct mid_pwr *pwr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ return ret;
+ }
+
+ pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
+ if (!pwr)
+ return -ENOMEM;
+
+ pwr->dev = dev;
+ pwr->regs = pcim_iomap_table(pdev)[0];
+ pwr->irq = pdev->irq;
+
+ mutex_init(&pwr->lock);
+
+ /* Disable interrupts */
+ mid_pwr_interrupt_disable(pwr);
+
+ if (info && info->set_initial_state) {
+ ret = info->set_initial_state(pwr);
+ if (ret)
+ dev_warn(dev, "Can't set initial state: %d\n", ret);
+ }
+
+ ret = devm_request_irq(dev, pdev->irq, mid_pwr_irq_handler,
+ IRQF_NO_SUSPEND, pci_name(pdev), pwr);
+ if (ret)
+ return ret;
+
+ pwr->available = true;
+ midpwr = pwr;
+
+ pci_set_drvdata(pdev, pwr);
+ return 0;
+}
+
+static int mid_set_initial_state(struct mid_pwr *pwr)
+{
+ unsigned int i, j;
+ int ret;
+
+ /*
+ * Enable wake events.
+ *
+ * PWRMU supports up to 32 sources for wake up the system. Ungate them
+ * all here.
+ */
+ mid_pwr_set_wake(pwr, 0, 0xffffffff);
+ mid_pwr_set_wake(pwr, 1, 0xffffffff);
+
+ /*
+ * Power off South Complex devices.
+ *
+ * There is a map (see a note below) of 64 devices with 2 bits per each
+ * on 32-bit HW registers. The following calls set all devices to one
+ * known initial state, i.e. PCI_D3hot. This is done in conjunction
+ * with PMCSR setting in arch/x86/pci/intel_mid_pci.c.
+ *
+ * NOTE: The actual device mapping is provided by a platform at run
+ * time using vendor capability of PCI configuration space.
+ */
+ mid_pwr_set_state(pwr, 0, 0xffffffff);
+ mid_pwr_set_state(pwr, 1, 0xffffffff);
+ mid_pwr_set_state(pwr, 2, 0xffffffff);
+ mid_pwr_set_state(pwr, 3, 0xffffffff);
+
+ /* Send command to SCU */
+ ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < LSS_MAX_DEVS; i++) {
+ for (j = 0; j < LSS_MAX_SHARED_DEVS; j++)
+ pwr->lss[i][j].state = PCI_D3hot;
+ }
+
+ return 0;
+}
+
+static const struct mid_pwr_device_info mid_info = {
+ .set_initial_state = mid_set_initial_state,
+};
+
+static const struct pci_device_id mid_pwr_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&mid_info },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&mid_info },
+ {}
+};
+
+static struct pci_driver mid_pwr_pci_driver = {
+ .name = "intel_mid_pwr",
+ .probe = mid_pwr_probe,
+ .id_table = mid_pwr_pci_ids,
+};
+
+builtin_pci_driver(mid_pwr_pci_driver);
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index 5ee360a951ce..051d264fce2e 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -24,7 +24,7 @@
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/notifier.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
@@ -407,6 +407,32 @@ static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
i2c_register_board_info(pentry->host_num, &i2c_info, 1);
}
+static void __init sfi_handle_sd_dev(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct mid_sd_board_info sd_info;
+ void *pdata;
+
+ memset(&sd_info, 0, sizeof(sd_info));
+ strncpy(sd_info.name, pentry->name, SFI_NAME_LEN);
+ sd_info.bus_num = pentry->host_num;
+ sd_info.max_clk = pentry->max_freq;
+ sd_info.addr = pentry->addr;
+ pr_debug("SD bus = %d, name = %16.16s, max_clk = %d, addr = 0x%x\n",
+ sd_info.bus_num,
+ sd_info.name,
+ sd_info.max_clk,
+ sd_info.addr);
+ pdata = intel_mid_sfi_get_pdata(dev, &sd_info);
+ if (IS_ERR(pdata))
+ return;
+
+ /* Nothing we can do with this for now */
+ sd_info.platform_data = pdata;
+
+ pr_debug("Successfully registered %16.16s", sd_info.name);
+}
+
extern struct devs_id *const __x86_intel_mid_dev_start[],
*const __x86_intel_mid_dev_end[];
@@ -490,6 +516,9 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
case SFI_DEV_TYPE_I2C:
sfi_handle_i2c_dev(pentry, dev);
break;
+ case SFI_DEV_TYPE_SD:
+ sfi_handle_sd_dev(pentry, dev);
+ break;
case SFI_DEV_TYPE_UART:
case SFI_DEV_TYPE_HSI:
default:
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index 27376081ddec..7c3077e58fa0 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/string.h>
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index e7604f62870d..f1aab8cdb33f 100644
--- a/arch/x86/platform/olpc/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
@@ -1,9 +1,12 @@
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/spinlock_types.h>
#include <linux/init.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/io.h>
+#include <asm/cpufeature.h>
+#include <asm/special_insns.h>
#include <asm/pgtable.h>
#include <asm/olpc_ofw.h>
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
index baf16e72e668..fd39301f25ac 100644
--- a/arch/x86/platform/ts5500/ts5500.c
+++ b/arch/x86/platform/ts5500/ts5500.c
@@ -23,7 +23,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/leds.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_data/gpio-ts5500.h>
#include <linux/platform_data/max197.h>
#include <linux/platform_device.h>
@@ -345,7 +345,3 @@ error:
return err;
}
device_initcall(ts5500_init);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>");
-MODULE_DESCRIPTION("Technologic Systems TS-5500 platform driver");
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 815fec6e05e2..66b2166ea4a1 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -40,8 +40,7 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
*/
return BIOS_STATUS_UNIMPLEMENTED;
- ret = efi_call((void *)__va(tab->function), (u64)which,
- a1, a2, a3, a4, a5);
+ ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
return ret;
}
EXPORT_SYMBOL_GPL(uv_bios_call);
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index e1c24631afbb..776c6592136c 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -8,7 +8,7 @@
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/irq.h>
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 8dd80050d705..cd5173a2733f 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -24,7 +24,7 @@
#include <linux/kdb.h>
#include <linux/kexec.h>
#include <linux/kgdb.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index d5f64996394a..b12c26e2e309 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
+#include <linux/tboot.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
@@ -266,6 +267,35 @@ void notrace restore_processor_state(void)
EXPORT_SYMBOL(restore_processor_state);
#endif
+#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
+static void resume_play_dead(void)
+{
+ play_dead_common();
+ tboot_shutdown(TB_SHUTDOWN_WFS);
+ hlt_play_dead();
+}
+
+int hibernate_resume_nonboot_cpu_disable(void)
+{
+ void (*play_dead)(void) = smp_ops.play_dead;
+ int ret;
+
+ /*
+ * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
+ * during hibernate image restoration, because it is likely that the
+ * monitored address will be actually written to at that time and then
+ * the "dead" CPU will attempt to execute instructions again, but the
+ * address in its instruction pointer may not be possible to resolve
+ * any more at that point (the page tables used by it previously may
+ * have been overwritten by hibernate image data).
+ */
+ smp_ops.play_dead = resume_play_dead;
+ ret = disable_nonboot_cpus();
+ smp_ops.play_dead = play_dead;
+ return ret;
+}
+#endif
+
/*
* When bsp_check() is called in hibernate and suspend, cpu hotplug
* is disabled already. So it's unnessary to handle race condition between
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 009947d419a6..f2b5e6a5cf95 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -19,6 +19,7 @@
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
+#include <asm/tlbflush.h>
/* Defined in hibernate_asm_64.S */
extern asmlinkage __visible int restore_image(void);
@@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void);
* kernel's text (this value is passed in the image header).
*/
unsigned long restore_jump_address __visible;
+unsigned long jump_address_phys;
/*
* Value of the cr3 register from before the hibernation (this value is passed
@@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
pgd_t *temp_level4_pgt __visible;
-void *relocated_restore_code __visible;
+unsigned long relocated_restore_code __visible;
+
+static int set_up_temporary_text_mapping(void)
+{
+ pmd_t *pmd;
+ pud_t *pud;
+
+ /*
+ * The new mapping only has to cover the page containing the image
+ * kernel's entry point (jump_address_phys), because the switch over to
+ * it is carried out by relocated code running from a page allocated
+ * specifically for this purpose and covered by the identity mapping, so
+ * the temporary kernel text mapping is only needed for the final jump.
+ * Moreover, in that mapping the virtual address of the image kernel's
+ * entry point must be the same as its virtual address in the image
+ * kernel (restore_jump_address), so the image kernel's
+ * restore_registers() code doesn't find itself in a different area of
+ * the virtual address space after switching over to the original page
+ * tables used by the image kernel.
+ */
+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!pud)
+ return -ENOMEM;
+
+ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pmd)
+ return -ENOMEM;
+
+ set_pmd(pmd + pmd_index(restore_jump_address),
+ __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
+ set_pud(pud + pud_index(restore_jump_address),
+ __pud(__pa(pmd) | _KERNPG_TABLE));
+ set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
+ __pgd(__pa(pud) | _KERNPG_TABLE));
+
+ return 0;
+}
static void *alloc_pgt_page(void *context)
{
@@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void)
if (!temp_level4_pgt)
return -ENOMEM;
- /* It is safe to reuse the original kernel mapping */
- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
- init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+ /* Prepare a temporary mapping for the kernel text */
+ result = set_up_temporary_text_mapping();
+ if (result)
+ return result;
/* Set up the direct mapping from scratch */
for (i = 0; i < nr_pfn_mapped; i++) {
@@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void)
return 0;
}
+static int relocate_restore_code(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ relocated_restore_code = get_safe_page(GFP_ATOMIC);
+ if (!relocated_restore_code)
+ return -ENOMEM;
+
+ memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
+
+ /* Make the page containing the relocated code executable */
+ pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
+ pud = pud_offset(pgd, relocated_restore_code);
+ if (pud_large(*pud)) {
+ set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+ } else {
+ pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
+
+ if (pmd_large(*pmd)) {
+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
+ } else {
+ pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
+
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
+ }
+ }
+ __flush_tlb_all();
+
+ return 0;
+}
+
int swsusp_arch_resume(void)
{
int error;
/* We have got enough memory and from now on we cannot recover */
- if ((error = set_up_temporary_mappings()))
+ error = set_up_temporary_mappings();
+ if (error)
return error;
- relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
- if (!relocated_restore_code)
- return -ENOMEM;
- memcpy(relocated_restore_code, &core_restore_code,
- &restore_registers - &core_restore_code);
+ error = relocate_restore_code();
+ if (error)
+ return error;
restore_image();
return 0;
@@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
struct restore_data_record {
unsigned long jump_address;
+ unsigned long jump_address_phys;
unsigned long cr3;
unsigned long magic;
};
-#define RESTORE_MAGIC 0x0123456789ABCDEFUL
+#define RESTORE_MAGIC 0x123456789ABCDEF0UL
/**
* arch_hibernation_header_save - populate the architecture specific part
@@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
if (max_size < sizeof(struct restore_data_record))
return -EOVERFLOW;
- rdr->jump_address = restore_jump_address;
+ rdr->jump_address = (unsigned long)&restore_registers;
+ rdr->jump_address_phys = __pa_symbol(&restore_registers);
rdr->cr3 = restore_cr3;
rdr->magic = RESTORE_MAGIC;
return 0;
@@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr)
struct restore_data_record *rdr = addr;
restore_jump_address = rdr->jump_address;
+ jump_address_phys = rdr->jump_address_phys;
restore_cr3 = rdr->cr3;
return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
}
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 4400a43b9e28..8eee0e9c93f0 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -24,7 +24,6 @@
#include <asm/frame.h>
ENTRY(swsusp_arch_suspend)
- FRAME_BEGIN
movq $saved_context, %rax
movq %rsp, pt_regs_sp(%rax)
movq %rbp, pt_regs_bp(%rax)
@@ -44,44 +43,45 @@ ENTRY(swsusp_arch_suspend)
pushfq
popq pt_regs_flags(%rax)
- /* save the address of restore_registers */
- movq $restore_registers, %rax
- movq %rax, restore_jump_address(%rip)
/* save cr3 */
movq %cr3, %rax
movq %rax, restore_cr3(%rip)
+ FRAME_BEGIN
call swsusp_save
FRAME_END
ret
ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image)
- /* switch to temporary page tables */
- movq $__PAGE_OFFSET, %rdx
- movq temp_level4_pgt(%rip), %rax
- subq %rdx, %rax
- movq %rax, %cr3
- /* Flush TLB */
- movq mmu_cr4_features(%rip), %rax
- movq %rax, %rdx
- andq $~(X86_CR4_PGE), %rdx
- movq %rdx, %cr4; # turn off PGE
- movq %cr3, %rcx; # flush TLB
- movq %rcx, %cr3;
- movq %rax, %cr4; # turn PGE back on
-
/* prepare to jump to the image kernel */
- movq restore_jump_address(%rip), %rax
- movq restore_cr3(%rip), %rbx
+ movq restore_jump_address(%rip), %r8
+ movq restore_cr3(%rip), %r9
+
+ /* prepare to switch to temporary page tables */
+ movq temp_level4_pgt(%rip), %rax
+ movq mmu_cr4_features(%rip), %rbx
/* prepare to copy image data to their original locations */
movq restore_pblist(%rip), %rdx
+
+ /* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
jmpq *%rcx
/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
+ /* switch to temporary page tables */
+ movq $__PAGE_OFFSET, %rcx
+ subq %rcx, %rax
+ movq %rax, %cr3
+ /* flush TLB */
+ movq %rbx, %rcx
+ andq $~(X86_CR4_PGE), %rcx
+ movq %rcx, %cr4; # turn off PGE
+ movq %cr3, %rcx; # flush TLB
+ movq %rcx, %cr3;
+ movq %rbx, %cr4; # turn PGE back on
.Lloop:
testq %rdx, %rdx
jz .Ldone
@@ -96,24 +96,16 @@ ENTRY(core_restore_code)
/* progress to the next pbe */
movq pbe_next(%rdx), %rdx
jmp .Lloop
+
.Ldone:
/* jump to the restore_registers address from the image header */
- jmpq *%rax
- /*
- * NOTE: This assumes that the boot kernel's text mapping covers the
- * image kernel's page containing restore_registers and the address of
- * this page is the same as in the image kernel's text mapping (it
- * should always be true, because the text mapping is linear, starting
- * from 0, and is supposed to cover the entire kernel text for every
- * kernel).
- *
- * code below belongs to the image kernel
- */
+ jmpq *%r8
+ /* code below belongs to the image kernel */
+ .align PAGE_SIZE
ENTRY(restore_registers)
- FRAME_BEGIN
/* go back to the original page tables */
- movq %rbx, %cr3
+ movq %r9, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movq mmu_cr4_features(%rip), %rax
@@ -152,6 +144,5 @@ ENTRY(restore_registers)
/* tell the hibernation core that we've just restored the memory */
movq %rax, in_suspend(%rip)
- FRAME_END
ret
ENDPROC(restore_registers)
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
index e69f4701a076..1104515d5ad2 100644
--- a/arch/x86/ras/mce_amd_inj.c
+++ b/arch/x86/ras/mce_amd_inj.c
@@ -241,6 +241,31 @@ static void toggle_nb_mca_mst_cpu(u16 nid)
__func__, PCI_FUNC(F3->devfn), NBCFG);
}
+static void prepare_msrs(void *info)
+{
+ struct mce i_mce = *(struct mce *)info;
+ u8 b = i_mce.bank;
+
+ wrmsrl(MSR_IA32_MCG_STATUS, i_mce.mcgstatus);
+
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
+ if (i_mce.inject_flags == DFR_INT_INJ) {
+ wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), i_mce.status);
+ wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), i_mce.addr);
+ } else {
+ wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), i_mce.status);
+ wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), i_mce.addr);
+ }
+
+ wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), i_mce.misc);
+ } else {
+ wrmsrl(MSR_IA32_MCx_STATUS(b), i_mce.status);
+ wrmsrl(MSR_IA32_MCx_ADDR(b), i_mce.addr);
+ wrmsrl(MSR_IA32_MCx_MISC(b), i_mce.misc);
+ }
+
+}
+
static void do_inject(void)
{
u64 mcg_status = 0;
@@ -287,36 +312,9 @@ static void do_inject(void)
toggle_hw_mce_inject(cpu, true);
- wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS,
- (u32)mcg_status, (u32)(mcg_status >> 32));
-
- if (boot_cpu_has(X86_FEATURE_SMCA)) {
- if (inj_type == DFR_INT_INJ) {
- wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DESTAT(b),
- (u32)i_mce.status, (u32)(i_mce.status >> 32));
-
- wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DEADDR(b),
- (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
- } else {
- wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_STATUS(b),
- (u32)i_mce.status, (u32)(i_mce.status >> 32));
-
- wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_ADDR(b),
- (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
- }
-
- wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(b),
- (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
- } else {
- wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
- (u32)i_mce.status, (u32)(i_mce.status >> 32));
-
- wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
- (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
-
- wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
- (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
- }
+ i_mce.mcgstatus = mcg_status;
+ i_mce.inject_flags = inj_type;
+ smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
toggle_hw_mce_inject(cpu, false);
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 0b7a63d98440..705e3fffb4a1 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -8,6 +8,9 @@
struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features;
+/* Hold the pgd entry used on booting additional CPUs */
+pgd_t trampoline_pgd_entry;
+
void __init reserve_real_mode(void)
{
phys_addr_t mem;
@@ -84,7 +87,7 @@ void __init setup_real_mode(void)
*trampoline_cr4_features = __read_cr4();
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
+ trampoline_pgd[0] = trampoline_pgd_entry.pgd;
trampoline_pgd[511] = init_level4_pgt[511].pgd;
#endif
}
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 093a892026f9..a3d2c62fd805 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -72,12 +72,14 @@ BEGIN {
lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4
- # All opcodes starting with lower-case 'v' or with (v1) superscript
+ # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
# accepts VEX prefix
- vexok_opcode_expr = "^v.*"
+ vexok_opcode_expr = "^[vk].*"
vexok_expr = "\\(v1\\)"
# All opcodes with (v) superscript supports *only* VEX prefix
vexonly_expr = "\\(v\\)"
+ # All opcodes with (ev) superscript supports *only* EVEX prefix
+ evexonly_expr = "\\(ev\\)"
prefix_expr = "\\(Prefix\\)"
prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
@@ -95,6 +97,7 @@ BEGIN {
prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
+ prefix_num["EVEX"] = "INAT_PFX_EVEX"
clear_vars()
}
@@ -319,7 +322,9 @@ function convert_operands(count,opnd, i,j,imm,mod)
flags = add_flags(flags, "INAT_MODRM")
# check VEX codes
- if (match(ext, vexonly_expr))
+ if (match(ext, evexonly_expr))
+ flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
+ else if (match(ext, vexonly_expr))
flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
flags = add_flags(flags, "INAT_VEXOK")
diff --git a/arch/x86/um/delay.c b/arch/x86/um/delay.c
index f3fe1a688f7e..a8fb7ca4822b 100644
--- a/arch/x86/um/delay.c
+++ b/arch/x86/um/delay.c
@@ -7,7 +7,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/param.h>
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index db52a7fafcc2..44c88ad1841a 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -177,7 +177,6 @@ static struct apic xen_pv_apic = {
.get_apic_id = xen_get_apic_id,
.set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */
- .apic_id_mask = 0xFF << 24, /* Used by verify_local_APIC. Match with what xen_get_apic_id does. */
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index c8377fb26cdf..1daff5545c0a 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -1,7 +1,6 @@
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include "debugfs.h"
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index be14cc3e48d5..3be012115853 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -20,10 +20,121 @@
#include <linux/init.h>
#include <linux/string.h>
+#include <xen/xen.h>
#include <xen/xen-ops.h>
+#include <xen/interface/platform.h>
#include <asm/page.h>
#include <asm/setup.h>
+#include <asm/xen/hypercall.h>
+
+static efi_char16_t vendor[100] __initdata;
+
+static efi_system_table_t efi_systab_xen __initdata = {
+ .hdr = {
+ .signature = EFI_SYSTEM_TABLE_SIGNATURE,
+ .revision = 0, /* Initialized later. */
+ .headersize = 0, /* Ignored by Linux Kernel. */
+ .crc32 = 0, /* Ignored by Linux Kernel. */
+ .reserved = 0
+ },
+ .fw_vendor = EFI_INVALID_TABLE_ADDR, /* Initialized later. */
+ .fw_revision = 0, /* Initialized later. */
+ .con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
+ .con_in = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
+ .con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
+ .con_out = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
+ .stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
+ .stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
+ .runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR,
+ /* Not used under Xen. */
+ .boottime = (efi_boot_services_t *)EFI_INVALID_TABLE_ADDR,
+ /* Not used under Xen. */
+ .nr_tables = 0, /* Initialized later. */
+ .tables = EFI_INVALID_TABLE_ADDR /* Initialized later. */
+};
+
+static const struct efi efi_xen __initconst = {
+ .systab = NULL, /* Initialized later. */
+ .runtime_version = 0, /* Initialized later. */
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+ .acpi20 = EFI_INVALID_TABLE_ADDR,
+ .smbios = EFI_INVALID_TABLE_ADDR,
+ .smbios3 = EFI_INVALID_TABLE_ADDR,
+ .sal_systab = EFI_INVALID_TABLE_ADDR,
+ .boot_info = EFI_INVALID_TABLE_ADDR,
+ .hcdp = EFI_INVALID_TABLE_ADDR,
+ .uga = EFI_INVALID_TABLE_ADDR,
+ .uv_systab = EFI_INVALID_TABLE_ADDR,
+ .fw_vendor = EFI_INVALID_TABLE_ADDR,
+ .runtime = EFI_INVALID_TABLE_ADDR,
+ .config_table = EFI_INVALID_TABLE_ADDR,
+ .get_time = xen_efi_get_time,
+ .set_time = xen_efi_set_time,
+ .get_wakeup_time = xen_efi_get_wakeup_time,
+ .set_wakeup_time = xen_efi_set_wakeup_time,
+ .get_variable = xen_efi_get_variable,
+ .get_next_variable = xen_efi_get_next_variable,
+ .set_variable = xen_efi_set_variable,
+ .query_variable_info = xen_efi_query_variable_info,
+ .update_capsule = xen_efi_update_capsule,
+ .query_capsule_caps = xen_efi_query_capsule_caps,
+ .get_next_high_mono_count = xen_efi_get_next_high_mono_count,
+ .reset_system = NULL, /* Functionality provided by Xen. */
+ .set_virtual_address_map = NULL, /* Not used under Xen. */
+ .flags = 0 /* Initialized later. */
+};
+
+static efi_system_table_t __init *xen_efi_probe(void)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_firmware_info,
+ .u.firmware_info = {
+ .type = XEN_FW_EFI_INFO,
+ .index = XEN_FW_EFI_CONFIG_TABLE
+ }
+ };
+ union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info;
+
+ if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0)
+ return NULL;
+
+ /* Here we know that Xen runs on EFI platform. */
+
+ efi = efi_xen;
+
+ efi_systab_xen.tables = info->cfg.addr;
+ efi_systab_xen.nr_tables = info->cfg.nent;
+
+ op.cmd = XENPF_firmware_info;
+ op.u.firmware_info.type = XEN_FW_EFI_INFO;
+ op.u.firmware_info.index = XEN_FW_EFI_VENDOR;
+ info->vendor.bufsz = sizeof(vendor);
+ set_xen_guest_handle(info->vendor.name, vendor);
+
+ if (HYPERVISOR_platform_op(&op) == 0) {
+ efi_systab_xen.fw_vendor = __pa_symbol(vendor);
+ efi_systab_xen.fw_revision = info->vendor.revision;
+ } else
+ efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN");
+
+ op.cmd = XENPF_firmware_info;
+ op.u.firmware_info.type = XEN_FW_EFI_INFO;
+ op.u.firmware_info.index = XEN_FW_EFI_VERSION;
+
+ if (HYPERVISOR_platform_op(&op) == 0)
+ efi_systab_xen.hdr.revision = info->version;
+
+ op.cmd = XENPF_firmware_info;
+ op.u.firmware_info.type = XEN_FW_EFI_INFO;
+ op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION;
+
+ if (HYPERVISOR_platform_op(&op) == 0)
+ efi.runtime_version = info->version;
+
+ return &efi_systab_xen;
+}
void __init xen_efi_init(void)
{
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 760789ae8562..cd993051aed7 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -23,7 +23,7 @@
#include <linux/sched.h>
#include <linux/kprobes.h>
#include <linux/bootmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/highmem.h>
@@ -59,6 +59,7 @@
#include <asm/xen/pci.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
+#include <asm/xen/cpuid.h>
#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/proto.h>
@@ -118,6 +119,10 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
*/
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
+/* Linux <-> Xen vCPU id mapping */
+DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
+
enum xen_domain_type xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
@@ -179,7 +184,7 @@ static void clamp_max_cpus(void)
#endif
}
-static void xen_vcpu_setup(int cpu)
+void xen_vcpu_setup(int cpu)
{
struct vcpu_register_vcpu_info info;
int err;
@@ -202,8 +207,9 @@ static void xen_vcpu_setup(int cpu)
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
return;
}
- if (cpu < MAX_VIRT_CPUS)
- per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+ if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
+ per_cpu(xen_vcpu, cpu) =
+ &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
if (!have_vcpu_info_placement) {
if (cpu >= MAX_VIRT_CPUS)
@@ -223,7 +229,8 @@ static void xen_vcpu_setup(int cpu)
hypervisor has no unregister variant and this hypercall does not
allow to over-write info.mfn and info.offset.
*/
- err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
+ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
+ &info);
if (err) {
printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
@@ -247,10 +254,11 @@ void xen_vcpu_restore(void)
for_each_possible_cpu(cpu) {
bool other_cpu = (cpu != smp_processor_id());
- bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
+ bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
+ NULL);
if (other_cpu && is_up &&
- HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
+ HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
BUG();
xen_setup_runstate_info(cpu);
@@ -259,7 +267,7 @@ void xen_vcpu_restore(void)
xen_vcpu_setup(cpu);
if (other_cpu && is_up &&
- HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
+ HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
BUG();
}
}
@@ -521,9 +529,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
preempt_disable();
- pagefault_disable(); /* Avoid warnings due to being atomic. */
- __get_user(dummy, (unsigned char __user __force *)v);
- pagefault_enable();
+ probe_kernel_read(&dummy, v, 1);
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
@@ -590,7 +596,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
unsigned long frames[pages];
int f;
@@ -639,7 +645,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
unsigned long frames[pages];
int f;
@@ -1137,8 +1143,11 @@ void xen_setup_vcpu_info_placement(void)
{
int cpu;
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ /* Set up direct vCPU id mapping for PV guests. */
+ per_cpu(xen_vcpu_id, cpu) = cpu;
xen_vcpu_setup(cpu);
+ }
/* xen_vcpu_setup managed to place the vcpu_info within the
* percpu area for all cpus, so make use of it. Note that for
@@ -1729,6 +1738,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
#endif
xen_raw_console_write("about to get started...\n");
+ /* Let's presume PV guests always boot on vCPU with id 0. */
+ per_cpu(xen_vcpu_id, 0) = 0;
+
xen_setup_runstate_info(0);
xen_efi_init();
@@ -1770,9 +1782,10 @@ void __ref xen_hvm_init_shared_info(void)
* in that case multiple vcpus might be online. */
for_each_online_cpu(cpu) {
/* Leave it to be NULL. */
- if (cpu >= MAX_VIRT_CPUS)
+ if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS)
continue;
- per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+ per_cpu(xen_vcpu, cpu) =
+ &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
}
}
@@ -1797,6 +1810,12 @@ static void __init init_hvm_pv_info(void)
xen_setup_features();
+ cpuid(base + 4, &eax, &ebx, &ecx, &edx);
+ if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
+ this_cpu_write(xen_vcpu_id, ebx);
+ else
+ this_cpu_write(xen_vcpu_id, smp_processor_id());
+
pv_info.name = "Xen HVM";
xen_domain_type = XEN_HVM_DOMAIN;
@@ -1808,6 +1827,10 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
int cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
+ if (cpu_acpi_id(cpu) != U32_MAX)
+ per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
+ else
+ per_cpu(xen_vcpu_id, cpu) = cpu;
xen_vcpu_setup(cpu);
if (xen_have_vector_callback) {
if (xen_feature(XENFEAT_hvm_safe_pvclock))
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index e079500b17f3..de4144c24f1c 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -111,63 +111,18 @@ int arch_gnttab_init(unsigned long nr_shared)
}
#ifdef CONFIG_XEN_PVH
-#include <xen/balloon.h>
#include <xen/events.h>
-#include <linux/slab.h>
-static int __init xlated_setup_gnttab_pages(void)
-{
- struct page **pages;
- xen_pfn_t *pfns;
- void *vaddr;
- int rc;
- unsigned int i;
- unsigned long nr_grant_frames = gnttab_max_grant_frames();
-
- BUG_ON(nr_grant_frames == 0);
- pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
- if (!pfns) {
- kfree(pages);
- return -ENOMEM;
- }
- rc = alloc_xenballooned_pages(nr_grant_frames, pages);
- if (rc) {
- pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__,
- nr_grant_frames, rc);
- kfree(pages);
- kfree(pfns);
- return rc;
- }
- for (i = 0; i < nr_grant_frames; i++)
- pfns[i] = page_to_pfn(pages[i]);
-
- vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL);
- if (!vaddr) {
- pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__,
- nr_grant_frames, rc);
- free_xenballooned_pages(nr_grant_frames, pages);
- kfree(pages);
- kfree(pfns);
- return -ENOMEM;
- }
- kfree(pages);
-
- xen_auto_xlat_grant_frames.pfn = pfns;
- xen_auto_xlat_grant_frames.count = nr_grant_frames;
- xen_auto_xlat_grant_frames.vaddr = vaddr;
-
- return 0;
-}
-
+#include <xen/xen-ops.h>
static int __init xen_pvh_gnttab_setup(void)
{
if (!xen_pvh_domain())
return -ENODEV;
- return xlated_setup_gnttab_pages();
+ xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
+
+ return xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
+ &xen_auto_xlat_grant_frames.vaddr,
+ xen_auto_xlat_grant_frames.count);
}
/* Call it _before_ __gnttab_init as we need to initialize the
* xen_auto_xlat_grant_frames first. */
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index a1207cb6472a..33e92955e09d 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -109,7 +109,8 @@ static void xen_safe_halt(void)
static void xen_halt(void)
{
if (irqs_disabled())
- HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+ HYPERVISOR_vcpu_op(VCPUOP_down,
+ xen_vcpu_nr(smp_processor_id()), NULL);
else
xen_safe_halt();
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 478a2de543a5..7d5afdb417cc 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -43,7 +43,8 @@
#include <linux/debugfs.h>
#include <linux/bug.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
@@ -1113,7 +1114,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */
- for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+ for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd))
continue;
@@ -1551,41 +1552,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif
}
-#ifdef CONFIG_X86_32
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
- /* If there's an existing pte, then don't allow _PAGE_RW to be set */
- if (pte_val_ma(*ptep) & _PAGE_PRESENT)
- pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
- pte_val_ma(pte));
-
- return pte;
-}
-#else /* CONFIG_X86_64 */
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
- unsigned long pfn;
-
- if (xen_feature(XENFEAT_writable_page_tables) ||
- xen_feature(XENFEAT_auto_translated_physmap) ||
- xen_start_info->mfn_list >= __START_KERNEL_map)
- return pte;
-
- /*
- * Pages belonging to the initial p2m list mapped outside the default
- * address range must be mapped read-only. This region contains the
- * page tables for mapping the p2m list, too, and page tables MUST be
- * mapped read-only.
- */
- pfn = pte_pfn(pte);
- if (pfn >= xen_start_info->first_p2m_pfn &&
- pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
- pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
-
- return pte;
-}
-#endif /* CONFIG_X86_64 */
-
/*
* Init-time set_pte while constructing initial pagetables, which
* doesn't allow RO page table pages to be remapped RW.
@@ -1600,13 +1566,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
* so always write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary.
*/
-static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+__visible pte_t xen_make_pte_init(pteval_t pte)
{
- if (pte_mfn(pte) != INVALID_P2M_ENTRY)
- pte = mask_rw_pte(ptep, pte);
- else
- pte = __pte_ma(0);
+#ifdef CONFIG_X86_64
+ unsigned long pfn;
+
+ /*
+ * Pages belonging to the initial p2m list mapped outside the default
+ * address range must be mapped read-only. This region contains the
+ * page tables for mapping the p2m list, too, and page tables MUST be
+ * mapped read-only.
+ */
+ pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+ if (xen_start_info->mfn_list < __START_KERNEL_map &&
+ pfn >= xen_start_info->first_p2m_pfn &&
+ pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
+ pte &= ~_PAGE_RW;
+#endif
+ pte = pte_pfn_to_mfn(pte);
+ return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
+static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+#ifdef CONFIG_X86_32
+ /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+ if (pte_mfn(pte) != INVALID_P2M_ENTRY
+ && pte_val_ma(*ptep) & _PAGE_PRESENT)
+ pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+ pte_val_ma(pte));
+#endif
native_set_pte(ptep, pte);
}
@@ -2407,6 +2397,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud;
#endif
+ pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
#ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3;
@@ -2455,7 +2446,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pte_val = PV_CALLEE_SAVE(xen_pte_val),
.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
- .make_pte = PV_CALLEE_SAVE(xen_make_pte),
+ .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
#ifdef CONFIG_X86_PAE
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index cab9f766bb06..37129db76d33 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -60,7 +60,7 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/sched.h>
@@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void)
if (unlikely(!slab_is_available()))
return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
- return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ return (void *)__get_free_page(GFP_KERNEL);
}
static void __ref free_p2m_page(void *p)
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 9586ff32810c..d37a0c7f82cb 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <xen/platform_pci.h>
#include "xen-ops.h"
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 9466354d3e49..32bdc2c90297 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -547,7 +547,7 @@ void xen_pmu_init(int cpu)
return;
fail:
- pr_warn_once("Could not initialize VPMU for cpu %d, error %d\n",
+ pr_info_once("Could not initialize VPMU for cpu %d, error %d\n",
cpu, err);
free_pages((unsigned long)xenpmu_data, 0);
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index e345891450c3..176425233e4d 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -4,7 +4,7 @@
* Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/pm.h>
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 719cf291dcdf..0b4d04c8ab4d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -322,6 +322,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
}
+
+ /*
+ * Setup vcpu_info for boot CPU.
+ */
+ if (xen_hvm_domain())
+ xen_vcpu_setup(0);
+
/*
* The alternative logic (which patches the unlock/lock) runs before
* the smp bootup up code is activated. Hence we need to set this up
@@ -454,7 +461,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
#endif
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
- if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
+ if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
BUG();
kfree(ctxt);
@@ -492,7 +499,7 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
if (rc)
return rc;
- rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
+ rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
BUG_ON(rc);
while (cpu_report_state(cpu) != CPU_ONLINE)
@@ -520,7 +527,8 @@ static int xen_cpu_disable(void)
static void xen_cpu_die(unsigned int cpu)
{
- while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+ while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up,
+ xen_vcpu_nr(cpu), NULL)) {
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ/10);
}
@@ -536,7 +544,7 @@ static void xen_cpu_die(unsigned int cpu)
static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
{
play_dead_common();
- HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+ HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
cpu_bringup();
/*
* commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
@@ -576,7 +584,7 @@ static void stop_self(void *v)
set_cpu_online(cpu, false);
- HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
+ HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
BUG();
}
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 6deba5bc7e34..67356d29d74d 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -11,8 +11,6 @@
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
-#include <linux/kernel_stat.h>
-#include <linux/math64.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/pvclock_gtod.h>
@@ -31,44 +29,6 @@
/* Xen may fire a timer up to this many ns early */
#define TIMER_SLOP 100000
-#define NS_PER_TICK (1000000000LL / HZ)
-
-/* snapshots of runstate info */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
-
-/* unused ns of stolen time */
-static DEFINE_PER_CPU(u64, xen_residual_stolen);
-
-static void do_stolen_accounting(void)
-{
- struct vcpu_runstate_info state;
- struct vcpu_runstate_info *snap;
- s64 runnable, offline, stolen;
- cputime_t ticks;
-
- xen_get_runstate_snapshot(&state);
-
- WARN_ON(state.state != RUNSTATE_running);
-
- snap = this_cpu_ptr(&xen_runstate_snapshot);
-
- /* work out how much time the VCPU has not been runn*ing* */
- runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
- offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
-
- *snap = state;
-
- /* Add the appropriate number of ticks of stolen time,
- including any left-overs from last time. */
- stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
-
- if (stolen < 0)
- stolen = 0;
-
- ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
- __this_cpu_write(xen_residual_stolen, stolen);
- account_steal_ticks(ticks);
-}
/* Get the TSC speed from Xen */
static unsigned long xen_tsc_khz(void)
@@ -263,8 +223,10 @@ static int xen_vcpuop_shutdown(struct clock_event_device *evt)
{
int cpu = smp_processor_id();
- if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
- HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
+ NULL) ||
+ HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
+ NULL))
BUG();
return 0;
@@ -274,7 +236,8 @@ static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
{
int cpu = smp_processor_id();
- if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
+ NULL))
BUG();
return 0;
@@ -293,7 +256,8 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
/* Get an event anyway, even if the timeout is already expired */
single.flags = 0;
- ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
+ ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
+ &single);
BUG_ON(ret != 0);
return ret;
@@ -335,8 +299,6 @@ static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
- do_stolen_accounting();
-
return ret;
}
@@ -394,13 +356,15 @@ void xen_timer_resume(void)
return;
for_each_online_cpu(cpu) {
- if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
+ xen_vcpu_nr(cpu), NULL))
BUG();
}
}
static const struct pv_time_ops xen_time_ops __initconst = {
.sched_clock = xen_clocksource_read,
+ .steal_clock = xen_steal_clock,
};
static void __init xen_time_init(void)
@@ -414,7 +378,8 @@ static void __init xen_time_init(void)
clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
- if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
+ NULL) == 0) {
/* Successfully turned off 100Hz tick, so we have the
vcpuop-based timer interface */
printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
@@ -431,6 +396,8 @@ static void __init xen_time_init(void)
xen_setup_timer(cpu);
xen_setup_cpu_clockevents();
+ xen_time_setup_guest();
+
if (xen_initial_domain())
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 4140b070f2e9..3cbce3b085e7 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -76,6 +76,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
bool xen_vcpu_stolen(int vcpu);
+void xen_vcpu_setup(int cpu);
void xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index fd8017ce298a..e7a23f2a519a 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -98,6 +98,26 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
return result; \
}
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
#else /* XCHAL_HAVE_S32C1I */
#define ATOMIC_OP(op) \
@@ -138,18 +158,42 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
return vval; \
}
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned int tmp, vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
+ " l32i %0, %3, 0\n" \
+ " " #op " %1, %0, %2\n" \
+ " s32i %1, %3, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
#endif /* XCHAL_HAVE_S32C1I */
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index d38eb9237e64..1065bc8bcae5 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
pte_t *ptep;
int i;
- ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ ptep = (pte_t *)__get_free_page(GFP_KERNEL);
if (!ptep)
return NULL;
for (i = 0; i < 1024; i++)
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index 1d95fa5dcd10..a36221cf6363 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -11,6 +11,9 @@
#ifndef _XTENSA_SPINLOCK_H
#define _XTENSA_SPINLOCK_H
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
/*
* spinlock
*
@@ -29,8 +32,11 @@
*/
#define arch_spin_is_locked(x) ((x)->slock != 0)
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->slock, !VAL);
+}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index ef90479e0397..0fecc8a2c0b5 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -404,7 +404,7 @@ static struct pmu xtensa_pmu = {
.read = xtensa_pmu_read,
};
-static void xtensa_pmu_setup(void)
+static int xtensa_pmu_setup(int cpu)
{
unsigned i;
@@ -413,21 +413,7 @@ static void xtensa_pmu_setup(void)
set_er(0, XTENSA_PMU_PMCTRL(i));
set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
}
-}
-
-static int xtensa_pmu_notifier(struct notifier_block *self,
- unsigned long action, void *data)
-{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- xtensa_pmu_setup();
- break;
-
- default:
- break;
- }
-
- return NOTIFY_OK;
+ return 0;
}
static int __init xtensa_pmu_init(void)
@@ -435,7 +421,13 @@ static int __init xtensa_pmu_init(void)
int ret;
int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
- perf_cpu_notifier(xtensa_pmu_notifier);
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
+ "AP_PERF_XTENSA_STARTING", xtensa_pmu_setup,
+ NULL);
+ if (ret) {
+ pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
+ return ret;
+ }
#if XTENSA_FAKE_NMI
enable_irq(irq);
#else
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 9735691f37f1..143251ede897 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -24,8 +24,8 @@
#include <linux/percpu.h>
#include <linux/clk-provider.h>
#include <linux/cpu.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
# include <linux/console.h>
@@ -255,7 +255,6 @@ void __init early_init_devtree(void *params)
static int __init xtensa_device_probe(void)
{
of_clk_init(NULL);
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 7f4a1fdb1502..2725e08ef353 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -110,7 +110,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
index 5f4bd71971d6..4904c5c16918 100644
--- a/arch/xtensa/platforms/xt2000/setup.c
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -113,7 +113,6 @@ void platform_heartbeat(void)
}
//#define RS_TABLE_SIZE 2
-//#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF|UPF_SKIP_TEST)
#define _SERIAL_PORT(_base,_irq) \
{ \