aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/atomic.h88
-rw-r--r--arch/alpha/include/asm/param.h8
-rw-r--r--arch/alpha/include/asm/spinlock.h4
-rw-r--r--arch/alpha/include/asm/unistd.h3
-rw-r--r--arch/alpha/include/uapi/asm/param.h7
-rw-r--r--arch/alpha/include/uapi/asm/unistd.h2
-rw-r--r--arch/alpha/kernel/entry.S399
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/smp.c15
-rw-r--r--arch/alpha/kernel/sys_dp264.c8
-rw-r--r--arch/alpha/kernel/sys_marvel.c3
-rw-r--r--arch/alpha/kernel/systbls.S2
-rw-r--r--arch/alpha/kernel/time.c4
-rw-r--r--arch/alpha/kernel/traps.c12
-rw-r--r--arch/arc/include/asm/entry.h1
-rw-r--r--arch/arm/Kconfig7
-rw-r--r--arch/arm/Kconfig.debug14
-rw-r--r--arch/arm/Makefile18
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi22
-rw-r--r--arch/arm/boot/dts/imx28-apx4devkit.dts2
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts2
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts2
-rw-r--r--arch/arm/boot/dts/imx28.dtsi1
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts13
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi32
-rw-r--r--arch/arm/boot/dts/prima2.dtsi16
-rw-r--r--arch/arm/boot/dts/stih416-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/stih416.dtsi2
-rw-r--r--arch/arm/boot/dts/twl4030.dtsi6
-rw-r--r--arch/arm/boot/dts/vf610.dtsi8
-rw-r--r--arch/arm/common/edma.c1
-rw-r--r--arch/arm/common/mcpm_platsmp.c4
-rw-r--r--arch/arm/configs/da8xx_omapl_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig6
-rw-r--r--arch/arm/configs/nhk8815_defconfig7
-rw-r--r--arch/arm/include/asm/a.out-core.h45
-rw-r--r--arch/arm/include/asm/arch_timer.h2
-rw-r--r--arch/arm/include/asm/cputype.h7
-rw-r--r--arch/arm/include/asm/elf.h6
-rw-r--r--arch/arm/include/asm/mmu.h3
-rw-r--r--arch/arm/include/asm/mmu_context.h20
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h16
-rw-r--r--arch/arm/include/asm/virt.h12
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/a.out.h34
-rw-r--r--arch/arm/kernel/entry-armv.S103
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/fiq.c19
-rw-r--r--arch/arm/kernel/head-common.S1
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm/kernel/hyp-stub.S4
-rw-r--r--arch/arm/kernel/perf_event_cpu.c6
-rw-r--r--arch/arm/kernel/process.c49
-rw-r--r--arch/arm/kernel/psci_smp.c3
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/signal.c56
-rw-r--r--arch/arm/kernel/signal.h12
-rw-r--r--arch/arm/kernel/smp.c18
-rw-r--r--arch/arm/kernel/smp_tlb.c17
-rw-r--r--arch/arm/kernel/smp_twd.c6
-rw-r--r--arch/arm/kernel/traps.c46
-rw-r--r--arch/arm/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm/lib/delay.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/dm355.c2
-rw-r--r--arch/arm/mach-davinci/dm365.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/Makefile2
-rw-r--r--arch/arm/mach-exynos/common.c26
-rw-r--r--arch/arm/mach-exynos/common.h1
-rw-r--r--arch/arm/mach-exynos/cpuidle.c1
-rw-r--r--arch/arm/mach-exynos/headsmp.S2
-rw-r--r--arch/arm/mach-exynos/include/mach/memory.h5
-rw-r--r--arch/arm/mach-exynos/platsmp.c4
-rw-r--r--arch/arm/mach-exynos/pm.c6
-rw-r--r--arch/arm/mach-footbridge/dc21285.c2
-rw-r--r--arch/arm/mach-highbank/highbank.c7
-rw-r--r--arch/arm/mach-highbank/platsmp.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c5
-rw-r--r--arch/arm/mach-imx/clk-vf610.c2
-rw-r--r--arch/arm/mach-imx/mx27.h2
-rw-r--r--arch/arm/mach-imx/platsmp.c2
-rw-r--r--arch/arm/mach-keystone/keystone.c2
-rw-r--r--arch/arm/mach-keystone/platsmp.c2
-rw-r--r--arch/arm/mach-msm/headsmp.S2
-rw-r--r--arch/arm/mach-msm/platsmp.c6
-rw-r--r--arch/arm/mach-msm/timer.c4
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mach-mvebu/headsmp.S2
-rw-r--r--arch/arm/mach-mvebu/platsmp.c5
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-generic.c23
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S2
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c2
-rw-r--r--arch/arm/mach-omap2/omap-smp.c4
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c4
-rw-r--r--arch/arm/mach-prima2/headsmp.S2
-rw-r--r--arch/arm/mach-prima2/platsmp.c4
-rw-r--r--arch/arm/mach-pxa/em-x270.c17
-rw-r--r--arch/arm/mach-pxa/mainstone.c3
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c3
-rw-r--r--arch/arm/mach-pxa/poodle.c4
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-pxa/stargate2.c3
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig2
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2410.c161
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c3
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S1
-rw-r--r--arch/arm/mach-shmobile/headsmp.S2
-rw-r--r--arch/arm/mach-shmobile/smp-emev2.c2
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c2
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c2
-rw-r--r--arch/arm/mach-socfpga/headsmp.S1
-rw-r--r--arch/arm/mach-socfpga/platsmp.c2
-rw-r--r--arch/arm/mach-spear/generic.h2
-rw-r--r--arch/arm/mach-spear/platsmp.c4
-rw-r--r--arch/arm/mach-sti/Kconfig3
-rw-r--r--arch/arm/mach-sti/platsmp.c6
-rw-r--r--arch/arm/mach-tegra/platsmp.c4
-rw-r--r--arch/arm/mach-tegra/pm.c2
-rw-r--r--arch/arm/mach-ux500/platsmp.c4
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mach-zynq/common.h2
-rw-r--r--arch/arm/mach-zynq/headsmp.S2
-rw-r--r--arch/arm/mach-zynq/platsmp.c6
-rw-r--r--arch/arm/mm/Kconfig34
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/mmu.c57
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-2level.S6
-rw-r--r--arch/arm/mm/proc-v7-3level.S6
-rw-r--r--arch/arm/mm/proc-v7.S13
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/plat-samsung/Kconfig7
-rw-r--r--arch/arm/plat-samsung/Makefile2
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h5
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h8
-rw-r--r--arch/arm/plat-samsung/pm.c14
-rw-r--r--arch/arm/plat-versatile/platsmp.c6
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm64/include/asm/arch_timer.h2
-rw-r--r--arch/arm64/include/asm/debug-monitors.h7
-rw-r--r--arch/arm64/include/asm/system_misc.h3
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/virt.h13
-rw-r--r--arch/arm64/kernel/debug-monitors.c6
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm64/kernel/process.c4
-rw-r--r--arch/arm64/kernel/smp.c23
-rw-r--r--arch/arm64/mm/fault.c46
-rw-r--r--arch/blackfin/kernel/perf_event.c2
-rw-r--r--arch/blackfin/kernel/setup.c4
-rw-r--r--arch/blackfin/mach-bf561/smp.c6
-rw-r--r--arch/blackfin/mach-common/cache-c.c4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/blackfin/mach-common/smp.c18
-rw-r--r--arch/cris/arch-v32/kernel/smp.c2
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/hexagon/kernel/setup.c2
-rw-r--r--arch/hexagon/kernel/smp.c4
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/configs/gensparse_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/configs/xen_domu_defconfig2
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/metag/kernel/perf/perf_event.c6
-rw-r--r--arch/metag/kernel/smp.c22
-rw-r--r--arch/metag/kernel/traps.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/ath79/setup.c2
-rw-r--r--arch/mips/bcm47xx/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c12
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c5
-rw-r--r--arch/mips/cavium-octeon/smp.c6
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h4
-rw-r--r--arch/mips/include/asm/uasm.h37
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h7
-rw-r--r--arch/mips/kernel/bmips_vec.S10
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c2
-rw-r--r--arch/mips/kernel/cevt-gic.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cevt-sb1250.c2
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c14
-rw-r--r--arch/mips/kernel/head.S4
-rw-r--r--arch/mips/kernel/smp-bmips.c26
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/smp-up.c6
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/spram.c14
-rw-r--r--arch/mips/kernel/sync-r4k.c12
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/watch.c2
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/lib/uncached.c2
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c34
-rw-r--r--arch/mips/mm/c-tx39.c2
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/cex-sb1.S4
-rw-r--r--arch/mips/mm/page.c40
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-mips.c2
-rw-r--r--arch/mips/mm/sc-r5k.c2
-rw-r--r--arch/mips/mm/sc-rm7k.c12
-rw-r--r--arch/mips/mm/tlb-r3k.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlb-r8k.c4
-rw-r--r--arch/mips/mm/tlbex.c148
-rw-r--r--arch/mips/mm/uasm-micromips.c10
-rw-r--r--arch/mips/mm/uasm-mips.c10
-rw-r--r--arch/mips/mm/uasm.c106
-rw-r--r--arch/mips/mti-malta/malta-smtc.c6
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/mti-sead3/sead3-time.c2
-rw-r--r--arch/mips/netlogic/common/irq.c68
-rw-r--r--arch/mips/netlogic/common/smp.c4
-rw-r--r--arch/mips/netlogic/common/smpboot.S4
-rw-r--r--arch/mips/netlogic/common/time.c2
-rw-r--r--arch/mips/netlogic/dts/xlp_evp.dts3
-rw-r--r--arch/mips/netlogic/dts/xlp_svp.dts3
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c2
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c2
-rw-r--r--arch/mips/pci/pci-ip27.c2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smtc.c7
-rw-r--r--arch/mips/pmcs-msp71xx/msp_time.c2
-rw-r--r--arch/mips/pnx833x/common/interrupts.c2
-rw-r--r--arch/mips/powertv/asic/asic_devices.c3
-rw-r--r--arch/mips/powertv/time.c2
-rw-r--r--arch/mips/ralink/irq.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c6
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c8
-rw-r--r--arch/mips/sibyte/sb1250/smp.c8
-rw-r--r--arch/openrisc/kernel/setup.c2
-rw-r--r--arch/parisc/configs/c8000_defconfig279
-rw-r--r--arch/parisc/include/asm/parisc-device.h3
-rw-r--r--arch/parisc/kernel/cache.c135
-rw-r--r--arch/parisc/kernel/firmware.c14
-rw-r--r--arch/parisc/kernel/hardware.c2
-rw-r--r--arch/parisc/kernel/inventory.c1
-rw-r--r--arch/parisc/kernel/processor.c6
-rw-r--r--arch/parisc/kernel/signal.c7
-rw-r--r--arch/parisc/kernel/signal32.c1
-rw-r--r--arch/parisc/kernel/smp.c8
-rw-r--r--arch/parisc/kernel/sys32.h36
-rw-r--r--arch/parisc/kernel/sys_parisc32.c2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/eeh.h30
-rw-r--r--arch/powerpc/include/asm/hw_irq.h7
-rw-r--r--arch/powerpc/include/asm/module.h5
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h6
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/perf_event.h18
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/eeh.c70
-rw-r--r--arch/powerpc/kernel/eeh_cache.c18
-rw-r--r--arch/powerpc/kernel/eeh_driver.c77
-rw-r--r--arch/powerpc/kernel/eeh_pe.c58
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c21
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c49
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c56
-rw-r--r--arch/powerpc/kernel/prom_init.c5
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/mm/hash_native_64.c12
-rw-r--r--arch/powerpc/mm/numa.c59
-rw-r--r--arch/powerpc/perf/core-book3s.c7
-rw-r--r--arch/powerpc/perf/power8-pmu.c30
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c17
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c67
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/s390/include/asm/processor.h10
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h1
-rw-r--r--arch/s390/kernel/cache.c15
-rw-r--r--arch/s390/kernel/crash_dump.c51
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/ptrace.c50
-rw-r--r--arch/s390/kernel/smp.c17
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c113
-rw-r--r--arch/score/mm/tlb-score.c2
-rw-r--r--arch/sh/configs/sh03_defconfig2
-rw-r--r--arch/sh/kernel/cpu/init.c18
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c6
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c2
-rw-r--r--arch/sh/kernel/perf_event.c4
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/smp.c8
-rw-r--r--arch/sh/kernel/traps_32.c2
-rw-r--r--arch/sh/kernel/traps_64.c2
-rw-r--r--arch/sh/mm/tlb-sh5.c2
-rw-r--r--arch/sparc/kernel/ds.c11
-rw-r--r--arch/sparc/kernel/entry.h2
-rw-r--r--arch/sparc/kernel/hvtramp.S1
-rw-r--r--arch/sparc/kernel/irq_64.c5
-rw-r--r--arch/sparc/kernel/leon_smp.c10
-rw-r--r--arch/sparc/kernel/mdesc.c34
-rw-r--r--arch/sparc/kernel/smp_32.c20
-rw-r--r--arch/sparc/kernel/smp_64.c9
-rw-r--r--arch/sparc/kernel/sun4d_smp.c6
-rw-r--r--arch/sparc/kernel/sun4m_smp.c6
-rw-r--r--arch/sparc/kernel/sysfs.c4
-rw-r--r--arch/sparc/kernel/trampoline_32.S3
-rw-r--r--arch/sparc/kernel/trampoline_64.S2
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c12
-rw-r--r--arch/tile/kernel/irq.c2
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/setup.c12
-rw-r--r--arch/tile/kernel/smpboot.c8
-rw-r--r--arch/tile/kernel/time.c2
-rw-r--r--arch/um/include/shared/frame_kern.h8
-rw-r--r--arch/um/kernel/signal.c4
-rw-r--r--arch/um/kernel/skas/mmu.c2
-rw-r--r--arch/um/kernel/skas/uaccess.c2
-rw-r--r--arch/um/os-Linux/mem.c230
-rw-r--r--arch/um/os-Linux/signal.c8
-rw-r--r--arch/um/os-Linux/skas/process.c19
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S643
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c151
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/microcode.h4
-rw-r--r--arch/x86/include/asm/microcode_amd.h4
-rw-r--r--arch/x86/include/asm/microcode_intel.h4
-rw-r--r--arch/x86/include/asm/mmconfig.h4
-rw-r--r--arch/x86/include/asm/mpspec.h2
-rw-r--r--arch/x86/include/asm/numa.h6
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/prom.h2
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c6
-rw-r--r--arch/x86/kernel/acpi/sleep.c18
-rw-r--r--arch/x86/kernel/apic/apic.c30
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/es7000_32.c2
-rw-r--r--arch/x86/kernel/apic/numaq_32.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c14
-rw-r--r--arch/x86/kernel/cpu/amd.c33
-rw-r--r--arch/x86/kernel/cpu/centaur.c26
-rw-r--r--arch/x86/kernel/cpu/common.c64
-rw-r--r--arch/x86/kernel/cpu/cyrix.c40
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c30
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c55
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c31
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c20
-rw-r--r--arch/x86/kernel/cpu/rdrand.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c4
-rw-r--r--arch/x86/kernel/cpu/topology.c2
-rw-r--r--arch/x86/kernel/cpu/transmeta.c6
-rw-r--r--arch/x86/kernel/cpu/umc.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/cpuid.c7
-rw-r--r--arch/x86/kernel/devicetree.c2
-rw-r--r--arch/x86/kernel/head_32.S1
-rw-r--r--arch/x86/kernel/head_64.S15
-rw-r--r--arch/x86/kernel/i387.c10
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kvm.c10
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/microcode_amd_early.c8
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/microcode_core_early.c6
-rw-r--r--arch/x86/kernel/microcode_intel_early.c26
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c12
-rw-r--r--arch/x86/kernel/msr.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c28
-rw-r--r--arch/x86/kernel/tboot.c6
-rw-r--r--arch/x86/kernel/tracepoint.c6
-rw-r--r--arch/x86/kernel/traps.c12
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/tsc_sync.c18
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/kernel/xsave.c4
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/numa.c12
-rw-r--r--arch/x86/mm/numa_emulation.c12
-rw-r--r--arch/x86/mm/setup_nx.c4
-rw-r--r--arch/x86/pci/amd_bus.c8
-rw-r--r--arch/x86/platform/ce4100/ce4100.c4
-rw-r--r--arch/x86/platform/efi/efi.c7
-rw-r--r--arch/x86/platform/mrst/mrst.c4
-rw-r--r--arch/x86/um/signal.c1
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/smp.c12
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/kernel/time.c2
455 files changed, 3359 insertions, 2961 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 837a1f2d8b96..082d9b4b5472 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -15,6 +15,7 @@ config ALPHA
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE
select GENERIC_STRNCPY_FROM_USER
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index c2cbe4fc391c..78b03ef39f6f 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
*/
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
+ int c, new, old;
+ smp_mb();
+ __asm__ __volatile__(
+ "1: ldl_l %[old],%[mem]\n"
+ " cmpeq %[old],%[u],%[c]\n"
+ " addl %[old],%[a],%[new]\n"
+ " bne %[c],2f\n"
+ " stl_c %[new],%[mem]\n"
+ " beq %[new],3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
+ : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
+ : "memory");
+ smp_mb();
+ return old;
}
@@ -207,21 +214,56 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
+ * Returns true iff @v was not @u.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
+ long c, tmp;
+ smp_mb();
+ __asm__ __volatile__(
+ "1: ldq_l %[tmp],%[mem]\n"
+ " cmpeq %[tmp],%[u],%[c]\n"
+ " addq %[tmp],%[a],%[tmp]\n"
+ " bne %[c],2f\n"
+ " stq_c %[tmp],%[mem]\n"
+ " beq %[tmp],3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : [tmp] "=&r"(tmp), [c] "=&r"(c)
+ : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
+ : "memory");
+ smp_mb();
+ return !c;
+}
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long old, tmp;
+ smp_mb();
+ __asm__ __volatile__(
+ "1: ldq_l %[old],%[mem]\n"
+ " subq %[old],1,%[tmp]\n"
+ " ble %[old],2f\n"
+ " stq_c %[tmp],%[mem]\n"
+ " beq %[tmp],3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : [old] "=&r"(old), [tmp] "=&r"(tmp)
+ : [mem] "m"(*v)
+ : "memory");
+ smp_mb();
+ return old - 1;
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/alpha/include/asm/param.h b/arch/alpha/include/asm/param.h
index bf46af51941b..a5b68b268bcf 100644
--- a/arch/alpha/include/asm/param.h
+++ b/arch/alpha/include/asm/param.h
@@ -3,7 +3,9 @@
#include <uapi/asm/param.h>
-#define HZ CONFIG_HZ
-#define USER_HZ HZ
-# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
+# undef HZ
+# define HZ CONFIG_HZ
+# define USER_HZ 1024
+# define CLOCKS_PER_SEC USER_HZ /* frequency at which times() counts */
+
#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index 3bba21e41b81..37b570d01202 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -168,8 +168,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 43baee17acdf..f2c94402e2c8 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -3,8 +3,7 @@
#include <uapi/asm/unistd.h>
-
-#define NR_SYSCALLS 506
+#define NR_SYSCALLS 508
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/uapi/asm/param.h b/arch/alpha/include/uapi/asm/param.h
index 29daed819ebd..dbcd9834af6d 100644
--- a/arch/alpha/include/uapi/asm/param.h
+++ b/arch/alpha/include/uapi/asm/param.h
@@ -1,13 +1,7 @@
#ifndef _UAPI_ASM_ALPHA_PARAM_H
#define _UAPI_ASM_ALPHA_PARAM_H
-/* ??? Gross. I don't want to parameterize this, and supposedly the
- hardware ignores reprogramming. We also need userland buy-in to the
- change in HZ, since this is visible in the wait4 resources etc. */
-
-#ifndef __KERNEL__
#define HZ 1024
-#endif
#define EXEC_PAGESIZE 8192
@@ -17,5 +11,4 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
#endif /* _UAPI_ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
index 801d28bcea51..53ae7bb1bfd1 100644
--- a/arch/alpha/include/uapi/asm/unistd.h
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -467,5 +467,7 @@
#define __NR_sendmmsg 503
#define __NR_process_vm_readv 504
#define __NR_process_vm_writev 505
+#define __NR_kcmp 506
+#define __NR_finit_module 507
#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index f62a994ef126..a969b95ee5ac 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -12,11 +12,32 @@
.text
.set noat
+ .cfi_sections .debug_frame
/* Stack offsets. */
#define SP_OFF 184
#define SWITCH_STACK_SIZE 320
+.macro CFI_START_OSF_FRAME func
+ .align 4
+ .globl \func
+ .type \func,@function
+\func:
+ .cfi_startproc simple
+ .cfi_return_column 64
+ .cfi_def_cfa $sp, 48
+ .cfi_rel_offset 64, 8
+ .cfi_rel_offset $gp, 16
+ .cfi_rel_offset $16, 24
+ .cfi_rel_offset $17, 32
+ .cfi_rel_offset $18, 40
+.endm
+
+.macro CFI_END_OSF_FRAME func
+ .cfi_endproc
+ .size \func, . - \func
+.endm
+
/*
* This defines the normal kernel pt-regs layout.
*
@@ -27,100 +48,158 @@
* the palcode-provided values are available to the signal handler.
*/
-#define SAVE_ALL \
- subq $sp, SP_OFF, $sp; \
- stq $0, 0($sp); \
- stq $1, 8($sp); \
- stq $2, 16($sp); \
- stq $3, 24($sp); \
- stq $4, 32($sp); \
- stq $28, 144($sp); \
- lda $2, alpha_mv; \
- stq $5, 40($sp); \
- stq $6, 48($sp); \
- stq $7, 56($sp); \
- stq $8, 64($sp); \
- stq $19, 72($sp); \
- stq $20, 80($sp); \
- stq $21, 88($sp); \
- ldq $2, HAE_CACHE($2); \
- stq $22, 96($sp); \
- stq $23, 104($sp); \
- stq $24, 112($sp); \
- stq $25, 120($sp); \
- stq $26, 128($sp); \
- stq $27, 136($sp); \
- stq $2, 152($sp); \
- stq $16, 160($sp); \
- stq $17, 168($sp); \
+.macro SAVE_ALL
+ subq $sp, SP_OFF, $sp
+ .cfi_adjust_cfa_offset SP_OFF
+ stq $0, 0($sp)
+ stq $1, 8($sp)
+ stq $2, 16($sp)
+ stq $3, 24($sp)
+ stq $4, 32($sp)
+ stq $28, 144($sp)
+ .cfi_rel_offset $0, 0
+ .cfi_rel_offset $1, 8
+ .cfi_rel_offset $2, 16
+ .cfi_rel_offset $3, 24
+ .cfi_rel_offset $4, 32
+ .cfi_rel_offset $28, 144
+ lda $2, alpha_mv
+ stq $5, 40($sp)
+ stq $6, 48($sp)
+ stq $7, 56($sp)
+ stq $8, 64($sp)
+ stq $19, 72($sp)
+ stq $20, 80($sp)
+ stq $21, 88($sp)
+ ldq $2, HAE_CACHE($2)
+ stq $22, 96($sp)
+ stq $23, 104($sp)
+ stq $24, 112($sp)
+ stq $25, 120($sp)
+ stq $26, 128($sp)
+ stq $27, 136($sp)
+ stq $2, 152($sp)
+ stq $16, 160($sp)
+ stq $17, 168($sp)
stq $18, 176($sp)
+ .cfi_rel_offset $5, 40
+ .cfi_rel_offset $6, 48
+ .cfi_rel_offset $7, 56
+ .cfi_rel_offset $8, 64
+ .cfi_rel_offset $19, 72
+ .cfi_rel_offset $20, 80
+ .cfi_rel_offset $21, 88
+ .cfi_rel_offset $22, 96
+ .cfi_rel_offset $23, 104
+ .cfi_rel_offset $24, 112
+ .cfi_rel_offset $25, 120
+ .cfi_rel_offset $26, 128
+ .cfi_rel_offset $27, 136
+.endm
-#define RESTORE_ALL \
- lda $19, alpha_mv; \
- ldq $0, 0($sp); \
- ldq $1, 8($sp); \
- ldq $2, 16($sp); \
- ldq $3, 24($sp); \
- ldq $21, 152($sp); \
- ldq $20, HAE_CACHE($19); \
- ldq $4, 32($sp); \
- ldq $5, 40($sp); \
- ldq $6, 48($sp); \
- ldq $7, 56($sp); \
- subq $20, $21, $20; \
- ldq $8, 64($sp); \
- beq $20, 99f; \
- ldq $20, HAE_REG($19); \
- stq $21, HAE_CACHE($19); \
- stq $21, 0($20); \
-99:; \
- ldq $19, 72($sp); \
- ldq $20, 80($sp); \
- ldq $21, 88($sp); \
- ldq $22, 96($sp); \
- ldq $23, 104($sp); \
- ldq $24, 112($sp); \
- ldq $25, 120($sp); \
- ldq $26, 128($sp); \
- ldq $27, 136($sp); \
- ldq $28, 144($sp); \
+.macro RESTORE_ALL
+ lda $19, alpha_mv
+ ldq $0, 0($sp)
+ ldq $1, 8($sp)
+ ldq $2, 16($sp)
+ ldq $3, 24($sp)
+ ldq $21, 152($sp)
+ ldq $20, HAE_CACHE($19)
+ ldq $4, 32($sp)
+ ldq $5, 40($sp)
+ ldq $6, 48($sp)
+ ldq $7, 56($sp)
+ subq $20, $21, $20
+ ldq $8, 64($sp)
+ beq $20, 99f
+ ldq $20, HAE_REG($19)
+ stq $21, HAE_CACHE($19)
+ stq $21, 0($20)
+99: ldq $19, 72($sp)
+ ldq $20, 80($sp)
+ ldq $21, 88($sp)
+ ldq $22, 96($sp)
+ ldq $23, 104($sp)
+ ldq $24, 112($sp)
+ ldq $25, 120($sp)
+ ldq $26, 128($sp)
+ ldq $27, 136($sp)
+ ldq $28, 144($sp)
addq $sp, SP_OFF, $sp
+ .cfi_restore $0
+ .cfi_restore $1
+ .cfi_restore $2
+ .cfi_restore $3
+ .cfi_restore $4
+ .cfi_restore $5
+ .cfi_restore $6
+ .cfi_restore $7
+ .cfi_restore $8
+ .cfi_restore $19
+ .cfi_restore $20
+ .cfi_restore $21
+ .cfi_restore $22
+ .cfi_restore $23
+ .cfi_restore $24
+ .cfi_restore $25
+ .cfi_restore $26
+ .cfi_restore $27
+ .cfi_restore $28
+ .cfi_adjust_cfa_offset -SP_OFF
+.endm
+
+.macro DO_SWITCH_STACK
+ bsr $1, do_switch_stack
+ .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
+ .cfi_rel_offset $9, 0
+ .cfi_rel_offset $10, 8
+ .cfi_rel_offset $11, 16
+ .cfi_rel_offset $12, 24
+ .cfi_rel_offset $13, 32
+ .cfi_rel_offset $14, 40
+ .cfi_rel_offset $15, 48
+ /* We don't really care about the FP registers for debugging. */
+.endm
+
+.macro UNDO_SWITCH_STACK
+ bsr $1, undo_switch_stack
+ .cfi_restore $9
+ .cfi_restore $10
+ .cfi_restore $11
+ .cfi_restore $12
+ .cfi_restore $13
+ .cfi_restore $14
+ .cfi_restore $15
+ .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
+.endm
/*
* Non-syscall kernel entry points.
*/
- .align 4
- .globl entInt
- .ent entInt
-entInt:
+CFI_START_OSF_FRAME entInt
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $19
jsr $31, do_entInt
-.end entInt
+CFI_END_OSF_FRAME entInt
- .align 4
- .globl entArith
- .ent entArith
-entArith:
+CFI_START_OSF_FRAME entArith
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $18
jsr $31, do_entArith
-.end entArith
+CFI_END_OSF_FRAME entArith
- .align 4
- .globl entMM
- .ent entMM
-entMM:
+CFI_START_OSF_FRAME entMM
SAVE_ALL
/* save $9 - $15 so the inline exception code can manipulate them. */
subq $sp, 56, $sp
+ .cfi_adjust_cfa_offset 56
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@@ -128,6 +207,13 @@ entMM:
stq $13, 32($sp)
stq $14, 40($sp)
stq $15, 48($sp)
+ .cfi_rel_offset $9, 0
+ .cfi_rel_offset $10, 8
+ .cfi_rel_offset $11, 16
+ .cfi_rel_offset $12, 24
+ .cfi_rel_offset $13, 32
+ .cfi_rel_offset $14, 40
+ .cfi_rel_offset $15, 48
addq $sp, 56, $19
/* handle the fault */
lda $8, 0x3fff
@@ -142,28 +228,33 @@ entMM:
ldq $14, 40($sp)
ldq $15, 48($sp)
addq $sp, 56, $sp
+ .cfi_restore $9
+ .cfi_restore $10
+ .cfi_restore $11
+ .cfi_restore $12
+ .cfi_restore $13
+ .cfi_restore $14
+ .cfi_restore $15
+ .cfi_adjust_cfa_offset -56
/* finish up the syscall as normal. */
br ret_from_sys_call
-.end entMM
+CFI_END_OSF_FRAME entMM
- .align 4
- .globl entIF
- .ent entIF
-entIF:
+CFI_START_OSF_FRAME entIF
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $17
jsr $31, do_entIF
-.end entIF
+CFI_END_OSF_FRAME entIF
- .align 4
- .globl entUna
- .ent entUna
-entUna:
+CFI_START_OSF_FRAME entUna
lda $sp, -256($sp)
+ .cfi_adjust_cfa_offset 256
stq $0, 0($sp)
+ .cfi_rel_offset $0, 0
+ .cfi_remember_state
ldq $0, 256($sp) /* get PS */
stq $1, 8($sp)
stq $2, 16($sp)
@@ -195,6 +286,32 @@ entUna:
stq $28, 224($sp)
mov $sp, $19
stq $gp, 232($sp)
+ .cfi_rel_offset $1, 1*8
+ .cfi_rel_offset $2, 2*8
+ .cfi_rel_offset $3, 3*8
+ .cfi_rel_offset $4, 4*8
+ .cfi_rel_offset $5, 5*8
+ .cfi_rel_offset $6, 6*8
+ .cfi_rel_offset $7, 7*8
+ .cfi_rel_offset $8, 8*8
+ .cfi_rel_offset $9, 9*8
+ .cfi_rel_offset $10, 10*8
+ .cfi_rel_offset $11, 11*8
+ .cfi_rel_offset $12, 12*8
+ .cfi_rel_offset $13, 13*8
+ .cfi_rel_offset $14, 14*8
+ .cfi_rel_offset $15, 15*8
+ .cfi_rel_offset $19, 19*8
+ .cfi_rel_offset $20, 20*8
+ .cfi_rel_offset $21, 21*8
+ .cfi_rel_offset $22, 22*8
+ .cfi_rel_offset $23, 23*8
+ .cfi_rel_offset $24, 24*8
+ .cfi_rel_offset $25, 25*8
+ .cfi_rel_offset $26, 26*8
+ .cfi_rel_offset $27, 27*8
+ .cfi_rel_offset $28, 28*8
+ .cfi_rel_offset $29, 29*8
lda $8, 0x3fff
stq $31, 248($sp)
bic $sp, $8, $8
@@ -228,16 +345,45 @@ entUna:
ldq $28, 224($sp)
ldq $gp, 232($sp)
lda $sp, 256($sp)
+ .cfi_restore $1
+ .cfi_restore $2
+ .cfi_restore $3
+ .cfi_restore $4
+ .cfi_restore $5
+ .cfi_restore $6
+ .cfi_restore $7
+ .cfi_restore $8
+ .cfi_restore $9
+ .cfi_restore $10
+ .cfi_restore $11
+ .cfi_restore $12
+ .cfi_restore $13
+ .cfi_restore $14
+ .cfi_restore $15
+ .cfi_restore $19
+ .cfi_restore $20
+ .cfi_restore $21
+ .cfi_restore $22
+ .cfi_restore $23
+ .cfi_restore $24
+ .cfi_restore $25
+ .cfi_restore $26
+ .cfi_restore $27
+ .cfi_restore $28
+ .cfi_restore $29
+ .cfi_adjust_cfa_offset -256
call_pal PAL_rti
-.end entUna
.align 4
- .ent entUnaUser
entUnaUser:
+ .cfi_restore_state
ldq $0, 0($sp) /* restore original $0 */
lda $sp, 256($sp) /* pop entUna's stack frame */
+ .cfi_restore $0
+ .cfi_adjust_cfa_offset -256
SAVE_ALL /* setup normal kernel stack */
lda $sp, -56($sp)
+ .cfi_adjust_cfa_offset 56
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@@ -245,6 +391,13 @@ entUnaUser:
stq $13, 32($sp)
stq $14, 40($sp)
stq $15, 48($sp)
+ .cfi_rel_offset $9, 0
+ .cfi_rel_offset $10, 8
+ .cfi_rel_offset $11, 16
+ .cfi_rel_offset $12, 24
+ .cfi_rel_offset $13, 32
+ .cfi_rel_offset $14, 40
+ .cfi_rel_offset $15, 48
lda $8, 0x3fff
addq $sp, 56, $19
bic $sp, $8, $8
@@ -257,20 +410,25 @@ entUnaUser:
ldq $14, 40($sp)
ldq $15, 48($sp)
lda $sp, 56($sp)
+ .cfi_restore $9
+ .cfi_restore $10
+ .cfi_restore $11
+ .cfi_restore $12
+ .cfi_restore $13
+ .cfi_restore $14
+ .cfi_restore $15
+ .cfi_adjust_cfa_offset -56
br ret_from_sys_call
-.end entUnaUser
+CFI_END_OSF_FRAME entUna
- .align 4
- .globl entDbg
- .ent entDbg
-entDbg:
+CFI_START_OSF_FRAME entDbg
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $16
jsr $31, do_entDbg
-.end entDbg
+CFI_END_OSF_FRAME entDbg
/*
* The system call entry point is special. Most importantly, it looks
@@ -285,8 +443,12 @@ entDbg:
.align 4
.globl entSys
- .globl ret_from_sys_call
- .ent entSys
+ .type entSys, @function
+ .cfi_startproc simple
+ .cfi_return_column 64
+ .cfi_def_cfa $sp, 48
+ .cfi_rel_offset 64, 8
+ .cfi_rel_offset $gp, 16
entSys:
SAVE_ALL
lda $8, 0x3fff
@@ -300,6 +462,9 @@ entSys:
stq $17, SP_OFF+32($sp)
s8addq $0, $5, $5
stq $18, SP_OFF+40($sp)
+ .cfi_rel_offset $16, SP_OFF+24
+ .cfi_rel_offset $17, SP_OFF+32
+ .cfi_rel_offset $18, SP_OFF+40
blbs $3, strace
beq $4, 1f
ldq $27, 0($5)
@@ -310,6 +475,7 @@ entSys:
stq $31, 72($sp) /* a3=0 => no error */
.align 4
+ .globl ret_from_sys_call
ret_from_sys_call:
cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
ldq $0, SP_OFF($sp)
@@ -324,10 +490,12 @@ ret_to_user:
and $17, _TIF_WORK_MASK, $2
bne $2, work_pending
restore_all:
+ .cfi_remember_state
RESTORE_ALL
call_pal PAL_rti
ret_to_kernel:
+ .cfi_restore_state
lda $16, 7
call_pal PAL_swpipl
br restore_all
@@ -356,7 +524,6 @@ $ret_success:
stq $0, 0($sp)
stq $31, 72($sp) /* a3=0 => no error */
br ret_from_sys_call
-.end entSys
/*
* Do all cleanup when returning from all interrupts and system calls.
@@ -370,7 +537,7 @@ $ret_success:
*/
.align 4
- .ent work_pending
+ .type work_pending, @function
work_pending:
and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
bne $2, $work_notifysig
@@ -387,23 +554,22 @@ $work_resched:
$work_notifysig:
mov $sp, $16
- bsr $1, do_switch_stack
+ DO_SWITCH_STACK
jsr $26, do_work_pending
- bsr $1, undo_switch_stack
+ UNDO_SWITCH_STACK
br restore_all
-.end work_pending
/*
* PTRACE syscall handler
*/
.align 4
- .ent strace
+ .type strace, @function
strace:
/* set up signal stack, call syscall_trace */
- bsr $1, do_switch_stack
+ DO_SWITCH_STACK
jsr $26, syscall_trace_enter /* returns the syscall number */
- bsr $1, undo_switch_stack
+ UNDO_SWITCH_STACK
/* get the arguments back.. */
ldq $16, SP_OFF+24($sp)
@@ -431,9 +597,9 @@ ret_from_straced:
$strace_success:
stq $0, 0($sp) /* save return value */
- bsr $1, do_switch_stack
+ DO_SWITCH_STACK
jsr $26, syscall_trace_leave
- bsr $1, undo_switch_stack
+ UNDO_SWITCH_STACK
br $31, ret_from_sys_call
.align 3
@@ -447,26 +613,31 @@ $strace_error:
stq $0, 0($sp)
stq $1, 72($sp) /* a3 for return */
- bsr $1, do_switch_stack
+ DO_SWITCH_STACK
mov $18, $9 /* save old syscall number */
mov $19, $10 /* save old a3 */
jsr $26, syscall_trace_leave
mov $9, $18
mov $10, $19
- bsr $1, undo_switch_stack
+ UNDO_SWITCH_STACK
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
br ret_from_sys_call
-.end strace
+CFI_END_OSF_FRAME entSys
/*
* Save and restore the switch stack -- aka the balance of the user context.
*/
.align 4
- .ent do_switch_stack
+ .type do_switch_stack, @function
+ .cfi_startproc simple
+ .cfi_return_column 64
+ .cfi_def_cfa $sp, 0
+ .cfi_register 64, $1
do_switch_stack:
lda $sp, -SWITCH_STACK_SIZE($sp)
+ .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@@ -510,10 +681,14 @@ do_switch_stack:
stt $f0, 312($sp) # save fpcr in slot of $f31
ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
ret $31, ($1), 1
-.end do_switch_stack
+ .cfi_endproc
+ .size do_switch_stack, .-do_switch_stack
.align 4
- .ent undo_switch_stack
+ .type undo_switch_stack, @function
+ .cfi_startproc simple
+ .cfi_def_cfa $sp, 0
+ .cfi_register 64, $1
undo_switch_stack:
ldq $9, 0($sp)
ldq $10, 8($sp)
@@ -558,7 +733,8 @@ undo_switch_stack:
ldt $f30, 304($sp)
lda $sp, SWITCH_STACK_SIZE($sp)
ret $31, ($1), 1
-.end undo_switch_stack
+ .cfi_endproc
+ .size undo_switch_stack, .-undo_switch_stack
/*
* The meat of the context switch code.
@@ -566,17 +742,18 @@ undo_switch_stack:
.align 4
.globl alpha_switch_to
- .ent alpha_switch_to
+ .type alpha_switch_to, @function
+ .cfi_startproc
alpha_switch_to:
- .prologue 0
- bsr $1, do_switch_stack
+ DO_SWITCH_STACK
call_pal PAL_swpctx
lda $8, 0x3fff
- bsr $1, undo_switch_stack
+ UNDO_SWITCH_STACK
bic $sp, $8, $8
mov $17, $0
ret
-.end alpha_switch_to
+ .cfi_endproc
+ .size alpha_switch_to, .-alpha_switch_to
/*
* New processes begin life here.
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index f433fc11877a..28e4429596f3 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -236,7 +236,7 @@ void __init
init_rtc_irq(void)
{
irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
- handle_simple_irq, "RTC");
+ handle_percpu_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 7b60834fb4b2..9dbbcb3b9146 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -116,7 +116,7 @@ wait_boot_cpu_to_stop(int cpuid)
/*
* Where secondaries begin a life of C.
*/
-void __cpuinit
+void
smp_callin(void)
{
int cpuid = hard_smp_processor_id();
@@ -194,7 +194,7 @@ wait_for_txrdy (unsigned long cpumask)
* Send a message to a secondary's console. "START" is one such
* interesting message. ;-)
*/
-static void __cpuinit
+static void
send_secondary_console_msg(char *str, int cpuid)
{
struct percpu_struct *cpu;
@@ -264,9 +264,10 @@ recv_secondary_console_msg(void)
if (cnt <= 0 || cnt >= 80)
strcpy(buf, "<<< BOGUS MSG >>>");
else {
- cp1 = (char *) &cpu->ipc_buffer[11];
+ cp1 = (char *) &cpu->ipc_buffer[1];
cp2 = buf;
- strcpy(cp2, cp1);
+ memcpy(cp2, cp1, cnt);
+ cp2[cnt] = '\0';
while ((cp2 = strchr(cp2, '\r')) != 0) {
*cp2 = ' ';
@@ -285,7 +286,7 @@ recv_secondary_console_msg(void)
/*
* Convince the console to have a secondary cpu begin execution.
*/
-static int __cpuinit
+static int
secondary_cpu_start(int cpuid, struct task_struct *idle)
{
struct percpu_struct *cpu;
@@ -356,7 +357,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
/*
* Bring one cpu online.
*/
-static int __cpuinit
+static int
smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
unsigned long timeout;
@@ -472,7 +473,7 @@ smp_prepare_boot_cpu(void)
{
}
-int __cpuinit
+int
__cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu, tidle);
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 5bf401f7ea97..6c35159bc00e 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -190,9 +190,6 @@ static struct irq_chip clipper_irq_type = {
static void
dp264_device_interrupt(unsigned long vector)
{
-#if 1
- printk("dp264_device_interrupt: NOT IMPLEMENTED YET!!\n");
-#else
unsigned long pld;
unsigned int i;
@@ -210,12 +207,7 @@ dp264_device_interrupt(unsigned long vector)
isa_device_interrupt(vector);
else
handle_irq(16 + i);
-#if 0
- TSUNAMI_cchip->dir0.csr = 1UL << i; mb();
- tmp = TSUNAMI_cchip->dir0.csr;
-#endif
}
-#endif
}
static void
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 407accc80877..c92e389ff219 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -317,8 +317,9 @@ marvel_init_irq(void)
}
static int
-marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
{
+ struct pci_dev *dev = (struct pci_dev *)cdev;
struct pci_controller *hose = dev->sysdata;
struct io7_port *io7_port = hose->sysdata;
struct io7 *io7 = io7_port->io7;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 4284ec798ec9..dca9b3fb0071 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -524,6 +524,8 @@ sys_call_table:
.quad sys_sendmmsg
.quad sys_process_vm_readv
.quad sys_process_vm_writev /* 505 */
+ .quad sys_kcmp
+ .quad sys_finit_module
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index e336694ca042..ea3395036556 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -105,9 +105,7 @@ void arch_irq_work_raise(void)
static inline __u32 rpcc(void)
{
- __u32 result;
- asm volatile ("rpcc %0" : "=r"(result));
- return result;
+ return __builtin_alpha_rpcc();
}
int update_persistent_clock(struct timespec now)
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index affccb959a9e..bd0665cdc840 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -32,7 +32,7 @@
static int opDEC_fix;
-static void __cpuinit
+static void
opDEC_check(void)
{
__asm__ __volatile__ (
@@ -66,8 +66,8 @@ dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
{
printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
regs->pc, regs->r26, regs->ps, print_tainted());
- print_symbol("pc is at %s\n", regs->pc);
- print_symbol("ra is at %s\n", regs->r26 );
+ printk("pc is at %pSR\n", (void *)regs->pc);
+ printk("ra is at %pSR\n", (void *)regs->r26);
printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
regs->r0, regs->r1, regs->r2);
printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
@@ -132,9 +132,7 @@ dik_show_trace(unsigned long *sp)
continue;
if (tmp >= (unsigned long) &_etext)
continue;
- printk("[<%lx>]", tmp);
- print_symbol(" %s", tmp);
- printk("\n");
+ printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
if (i > 40) {
printk(" ...");
break;
@@ -1059,7 +1057,7 @@ give_sigbus:
return;
}
-void __cpuinit
+void
trap_init(void)
{
/* Tell PAL-code what global pointer we want in the kernel. */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 8943c028d4bb..df57611652e5 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -38,6 +38,7 @@
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/thread_info.h> /* For THREAD_SIZE */
+#include <asm/mmu.h>
/* Note on the LD/ST addr modes with addr reg wback
*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba412e02ec0c..43594d5116ef 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -20,7 +20,6 @@ config ARM
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HARDIRQS_SW_RESEND
- select HAVE_AOUT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
@@ -218,7 +217,8 @@ config VECTORS_BASE
default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000
help
- The base address of exception vectors.
+ The base address of exception vectors. This must be two pages
+ in size.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED
@@ -1600,8 +1600,7 @@ config LOCAL_TIMERS
config ARCH_NR_GPIO
int
default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
- default 512 if SOC_OMAP5
- default 512 if ARCH_KEYSTONE
+ default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5
default 392 if ARCH_U8500
default 352 if ARCH_VT8500
default 288 if ARCH_SUNXI
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index e401a766c0bd..583f4a00ec32 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -804,9 +804,19 @@ config DEBUG_LL_INCLUDE
config DEBUG_UNCOMPRESS
bool
- default y if ARCH_MULTIPLATFORM && DEBUG_LL && \
- !DEBUG_OMAP2PLUS_UART && \
+ depends on ARCH_MULTIPLATFORM
+ default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
!DEBUG_TEGRA_UART
+ help
+ This option influences the normal decompressor output for
+ multiplatform kernels. Normally, multiplatform kernels disable
+ decompressor output because it is not possible to know where to
+ send the decompressor output.
+
+ When this option is set, the selected DEBUG_LL output method
+ will be re-used for normal decompressor output on multiplatform
+ kernels.
+
config UNCOMPRESS_INCLUDE
string
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c0ac0f5e5e5c..6fd2ceae305a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -153,6 +153,7 @@ machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DOVE) += dove
machine-$(CONFIG_ARCH_EBSA110) += ebsa110
machine-$(CONFIG_ARCH_EP93XX) += ep93xx
+machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_GEMINI) += gemini
machine-$(CONFIG_ARCH_HIGHBANK) += highbank
machine-$(CONFIG_ARCH_INTEGRATOR) += integrator
@@ -160,15 +161,16 @@ machine-$(CONFIG_ARCH_IOP13XX) += iop13xx
machine-$(CONFIG_ARCH_IOP32X) += iop32x
machine-$(CONFIG_ARCH_IOP33X) += iop33x
machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx
+machine-$(CONFIG_ARCH_KEYSTONE) += keystone
machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood
machine-$(CONFIG_ARCH_KS8695) += ks8695
machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx
machine-$(CONFIG_ARCH_MMP) += mmp
machine-$(CONFIG_ARCH_MSM) += msm
machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0
+machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_MXC) += imx
machine-$(CONFIG_ARCH_MXS) += mxs
-machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_NETX) += netx
machine-$(CONFIG_ARCH_NOMADIK) += nomadik
machine-$(CONFIG_ARCH_NSPIRE) += nspire
@@ -176,7 +178,6 @@ machine-$(CONFIG_ARCH_OMAP1) += omap1
machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2
machine-$(CONFIG_ARCH_ORION5X) += orion5x
machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
-machine-$(CONFIG_ARCH_SIRF) += prima2
machine-$(CONFIG_ARCH_PXA) += pxa
machine-$(CONFIG_ARCH_REALVIEW) += realview
machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip
@@ -186,25 +187,24 @@ machine-$(CONFIG_ARCH_S3C64XX) += s3c64xx
machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0
machine-$(CONFIG_ARCH_S5PC100) += s5pc100
machine-$(CONFIG_ARCH_S5PV210) += s5pv210
-machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_SA1100) += sa1100
machine-$(CONFIG_ARCH_SHARK) += shark
machine-$(CONFIG_ARCH_SHMOBILE) += shmobile
+machine-$(CONFIG_ARCH_SIRF) += prima2
+machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
+machine-$(CONFIG_ARCH_STI) += sti
+machine-$(CONFIG_ARCH_SUNXI) += sunxi
machine-$(CONFIG_ARCH_TEGRA) += tegra
machine-$(CONFIG_ARCH_U300) += u300
machine-$(CONFIG_ARCH_U8500) += ux500
machine-$(CONFIG_ARCH_VERSATILE) += versatile
machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
+machine-$(CONFIG_ARCH_VIRT) += virt
machine-$(CONFIG_ARCH_VT8500) += vt8500
machine-$(CONFIG_ARCH_W90X900) += w90x900
+machine-$(CONFIG_ARCH_ZYNQ) += zynq
machine-$(CONFIG_FOOTBRIDGE) += footbridge
-machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
machine-$(CONFIG_PLAT_SPEAR) += spear
-machine-$(CONFIG_ARCH_STI) += sti
-machine-$(CONFIG_ARCH_VIRT) += virt
-machine-$(CONFIG_ARCH_ZYNQ) += zynq
-machine-$(CONFIG_ARCH_SUNXI) += sunxi
-machine-$(CONFIG_ARCH_KEYSTONE) += keystone
# Platform directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 9866cd736dee..a0f2721ea583 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -485,6 +485,12 @@
sirf,function = "usp0";
};
};
+ usp0_uart_nostreamctrl_pins_a: usp0@1 {
+ usp0 {
+ sirf,pins = "usp0_uart_nostreamctrl_grp";
+ sirf,function = "usp0_uart_nostreamctrl";
+ };
+ };
usp1_pins_a: usp1@0 {
usp1 {
sirf,pins = "usp1grp";
@@ -515,16 +521,16 @@
sirf,function = "pulse_count";
};
};
- cko0_rst_pins_a: cko0_rst@0 {
- cko0_rst {
- sirf,pins = "cko0_rstgrp";
- sirf,function = "cko0_rst";
+ cko0_pins_a: cko0@0 {
+ cko0 {
+ sirf,pins = "cko0grp";
+ sirf,function = "cko0";
};
};
- cko1_rst_pins_a: cko1_rst@0 {
- cko1_rst {
- sirf,pins = "cko1_rstgrp";
- sirf,function = "cko1_rst";
+ cko1_pins_a: cko1@0 {
+ cko1 {
+ sirf,pins = "cko1grp";
+ sirf,function = "cko1";
};
};
};
diff --git a/arch/arm/boot/dts/imx28-apx4devkit.dts b/arch/arm/boot/dts/imx28-apx4devkit.dts
index 43bf3c796cba..0e7fed47bd8d 100644
--- a/arch/arm/boot/dts/imx28-apx4devkit.dts
+++ b/arch/arm/boot/dts/imx28-apx4devkit.dts
@@ -147,7 +147,7 @@
reg = <0x0a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
-
+ clocks = <&saif0>;
};
pcf8563: rtc@51 {
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 1f0d38d7b16f..e035f4664b97 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -195,7 +195,7 @@
reg = <0x0a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
-
+ clocks = <&saif0>;
};
at24@51 {
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 880df2f13be8..44d9da57736e 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -184,7 +184,7 @@
reg = <0x0a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
-
+ clocks = <&saif0>;
};
eeprom: eeprom@51 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 6a8acb01b1d3..9524a0571281 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -837,6 +837,7 @@
compatible = "fsl,imx28-saif";
reg = <0x80042000 0x2000>;
interrupts = <59 80>;
+ #clock-cells = <0>;
clocks = <&clks 53>;
dmas = <&dma_apbx 4>;
dma-names = "rx-tx";
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 6dd9486c755b..ad3471ca17c7 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -61,6 +61,16 @@
mux-int-port = <2>;
mux-ext-port = <3>;
};
+
+ clocks {
+ clk_26M: codec_clock {
+ compatible = "fixed-clock";
+ reg=<0>;
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ gpios = <&gpio4 26 1>;
+ };
+ };
};
&esdhc1 {
@@ -229,6 +239,7 @@
MX51_PAD_EIM_A27__GPIO2_21 0x5
MX51_PAD_CSPI1_SS0__GPIO4_24 0x85
MX51_PAD_CSPI1_SS1__GPIO4_25 0x85
+ MX51_PAD_CSPI1_RDY__GPIO4_26 0x80000000
>;
};
};
@@ -255,7 +266,7 @@
sgtl5000: codec@0a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
- clock-frequency = <26000000>;
+ clocks = <&clk_26M>;
VDDA-supply = <&vdig_reg>;
VDDIO-supply = <&vvideo_reg>;
};
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index aaa33bc99f78..a63090267941 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -27,7 +27,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm2 0 50000 0 0>;
+ pwms = <&pwm2 0 50000>;
brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>;
default-brightness-level = <10>;
enable-gpios = <&gpio7 7 0>;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 3895fbba8fce..569aa9f2c4ed 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -725,15 +725,15 @@
uart1 {
pinctrl_uart1_1: uart1grp-1 {
fsl,pins = <
- MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1c5
- MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1c5
+ MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1e4
+ MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1e4
>;
};
pinctrl_uart1_2: uart1grp-2 {
fsl,pins = <
- MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1c5
- MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1c5
+ MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1e4
+ MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1e4
>;
};
@@ -748,8 +748,8 @@
uart2 {
pinctrl_uart2_1: uart2grp-1 {
fsl,pins = <
- MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1c5
- MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1c5
+ MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1e4
+ MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1e4
>;
};
@@ -766,17 +766,17 @@
uart3 {
pinctrl_uart3_1: uart3grp-1 {
fsl,pins = <
- MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5
- MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5
- MX53_PAD_PATA_DA_1__UART3_CTS 0x1c5
- MX53_PAD_PATA_DA_2__UART3_RTS 0x1c5
+ MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
+ MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
+ MX53_PAD_PATA_DA_1__UART3_CTS 0x1e4
+ MX53_PAD_PATA_DA_2__UART3_RTS 0x1e4
>;
};
pinctrl_uart3_2: uart3grp-2 {
fsl,pins = <
- MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5
- MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5
+ MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
+ MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
>;
};
@@ -785,8 +785,8 @@
uart4 {
pinctrl_uart4_1: uart4grp-1 {
fsl,pins = <
- MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1c5
- MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1c5
+ MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1e4
+ MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1e4
>;
};
};
@@ -794,8 +794,8 @@
uart5 {
pinctrl_uart5_1: uart5grp-1 {
fsl,pins = <
- MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1c5
- MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1c5
+ MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1e4
+ MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1e4
>;
};
};
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 05e9489cf95c..bbeb623fc2c6 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -515,16 +515,16 @@
sirf,function = "pulse_count";
};
};
- cko0_rst_pins_a: cko0_rst@0 {
- cko0_rst {
- sirf,pins = "cko0_rstgrp";
- sirf,function = "cko0_rst";
+ cko0_pins_a: cko0@0 {
+ cko0 {
+ sirf,pins = "cko0grp";
+ sirf,function = "cko0";
};
};
- cko1_rst_pins_a: cko1_rst@0 {
- cko1_rst {
- sirf,pins = "cko1_rstgrp";
- sirf,function = "cko1_rst";
+ cko1_pins_a: cko1@0 {
+ cko1 {
+ sirf,pins = "cko1grp";
+ sirf,function = "cko1";
};
};
};
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi
index 957b21a71b4b..0f246c979262 100644
--- a/arch/arm/boot/dts/stih416-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih416-pinctrl.dtsi
@@ -166,6 +166,15 @@
reg = <0x9000 0x100>;
st,bank-name = "PIO31";
};
+
+ serial2-oe {
+ pinctrl_serial2_oe: serial2-1 {
+ st,pins {
+ output-enable = <&PIO11 3 ALT2 OUT>;
+ };
+ };
+ };
+
};
pin-controller-rear {
@@ -218,7 +227,6 @@
st,pins {
tx = <&PIO17 4 ALT2 OUT>;
rx = <&PIO17 5 ALT2 IN>;
- output-enable = <&PIO11 3 ALT2 OUT>;
};
};
};
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 3cecd9689a49..1a0326ea7d07 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -79,7 +79,7 @@
interrupts = <0 197 0>;
clocks = <&CLK_S_ICN_REG_0>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_serial2>;
+ pinctrl-0 = <&pinctrl_serial2 &pinctrl_serial2_oe>;
};
/* SBC_UART1 */
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index b3034da00a37..ae6a17aed9ee 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -47,6 +47,12 @@
regulator-max-microvolt = <3150000>;
};
+ vmmc2: regulator-vmmc2 {
+ compatible = "ti,twl4030-vmmc2";
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <3150000>;
+ };
+
vusb1v5: regulator-vusb1v5 {
compatible = "ti,twl4030-vusb1v5";
};
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index e1eb7dadda80..67d929cf9804 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -442,8 +442,8 @@
compatible = "fsl,mvf600-fec";
reg = <0x400d0000 0x1000>;
interrupts = <0 78 0x04>;
- clocks = <&clks VF610_CLK_ENET>,
- <&clks VF610_CLK_ENET>,
+ clocks = <&clks VF610_CLK_ENET0>,
+ <&clks VF610_CLK_ENET0>,
<&clks VF610_CLK_ENET>;
clock-names = "ipg", "ahb", "ptp";
status = "disabled";
@@ -453,8 +453,8 @@
compatible = "fsl,mvf600-fec";
reg = <0x400d1000 0x1000>;
interrupts = <0 79 0x04>;
- clocks = <&clks VF610_CLK_ENET>,
- <&clks VF610_CLK_ENET>,
+ clocks = <&clks VF610_CLK_ENET1>,
+ <&clks VF610_CLK_ENET1>,
<&clks VF610_CLK_ENET>;
clock-names = "ipg", "ahb", "ptp";
status = "disabled";
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index a432e6c1dac1..39ad030ac0c7 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -26,7 +26,6 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/edma.h>
-#include <linux/err.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 510e5b13aa2e..1bc34c7567fd 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,7 +19,7 @@
#include <asm/smp.h>
#include <asm/smp_plat.h>
-static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned int mpidr, pcpu, pcluster, ret;
extern void secondary_startup(void);
@@ -40,7 +40,7 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
return 0;
}
-static void __cpuinit mcpm_secondary_init(unsigned int cpu)
+static void mcpm_secondary_init(unsigned int cpu)
{
mcpm_cpu_powered_up();
}
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index 7c868139bdb0..1571bea48bed 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -102,6 +102,8 @@ CONFIG_SND_SOC=m
CONFIG_SND_DAVINCI_SOC=m
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_DMADEVICES=y
+CONFIG_TI_EDMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index c86fd75e181a..ab2f7378352c 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -162,6 +162,8 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_TI_EDMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index fe0bdc361d2c..6e572c64cf5a 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -53,6 +53,7 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_OMAP_OCP2SCP=y
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_SATA_AHCI_PLATFORM=y
@@ -61,6 +62,7 @@ CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
CONFIG_SUN4I_EMAC=y
CONFIG_NET_CALXEDA_XGMAC=y
+CONFIG_KS8851=y
CONFIG_SMSC911X=y
CONFIG_STMMAC_ETH=y
CONFIG_MDIO_SUN4I=y
@@ -89,6 +91,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_SIRF=y
CONFIG_I2C_TEGRA=y
CONFIG_SPI=y
+CONFIG_SPI_OMAP24XX=y
CONFIG_SPI_PL022=y
CONFIG_SPI_SIRF=y
CONFIG_SPI_TEGRA114=y
@@ -111,11 +114,12 @@ CONFIG_FB_SIMPLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MXC=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_AB8500_USB=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_OMAP_USB2=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 35f8cf299fa2..263ae3869e32 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -1,6 +1,8 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -48,7 +50,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_TESTS=m
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND_ECC_SMC=y
CONFIG_MTD_NAND=y
@@ -94,8 +95,10 @@ CONFIG_I2C_GPIO=y
CONFIG_I2C_NOMADIK=y
CONFIG_DEBUG_GPIO=y
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
CONFIG_MMC=y
-CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb5c70c..000000000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
- struct task_struct *tsk = current;
-
- dump->magic = CMAGIC;
- dump->start_code = tsk->mm->start_code;
- dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
-
- dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
- dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
- dump->u_ssize = 0;
-
- memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
-
- if (dump->start_stack < 0x04000000)
- dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
-
- dump->regs = *regs;
- dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index accefe099182..e406d575c94f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -89,7 +89,7 @@ static inline u64 arch_counter_get_cntvct(void)
return cval;
}
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline void arch_counter_set_user_access(void)
{
u32 cntkctl;
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c25dc4e9851..9672e978d50d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -89,13 +89,18 @@ extern unsigned int processor_id;
__val; \
})
+/*
+ * The memory clobber prevents gcc 4.5 from reordering the mrc before
+ * any is_smp() tests, which can cause undefined instruction aborts on
+ * ARM1136 r0 due to the missing extended CP15 registers.
+ */
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
asm("mrc p15, 0, %0, c0, " ext_reg \
: "=r" (__val) \
: \
- : "cc"); \
+ : "memory"); \
__val; \
})
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 38050b1c4800..56211f2084ef 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -130,4 +130,10 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
+#ifdef CONFIG_MMU
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+int arch_setup_additional_pages(struct linux_binprm *, int);
+#endif
+
#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index e3d55547e755..6f18da09668b 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,8 +6,11 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
atomic64_t id;
+#else
+ int switch_pending;
#endif
unsigned int vmalloc_seq;
+ unsigned long sigpage;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index b5792b7fd8d3..9b32f76bb0dd 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ mm->context.switch_pending = 1;
else
cpu_switch_mm(mm->pgd, mm);
}
@@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
- struct mm_struct *mm = current->mm;
- cpu_switch_mm(mm->pgd, mm);
+ struct mm_struct *mm = current->mm;
+
+ if (mm && mm->context.switch_pending) {
+ /*
+ * Preemption must be disabled during cpu_switch_mm() as we
+ * have some stateful cache flush implementations. Check
+ * switch_pending again in case we were preempted and the
+ * switch to this mm was already done.
+ */
+ preempt_disable();
+ if (mm->context.switch_pending) {
+ mm->context.switch_pending = 0;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+ preempt_enable_no_resched();
}
}
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 6363f3d1d505..4355f0ec44d6 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
+#ifdef CONFIG_KUSER_HELPERS
#define __HAVE_ARCH_GATE_AREA 1
+#endif
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h>
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d509eaac..413f3876341c 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -54,7 +54,6 @@ struct thread_struct {
#define start_thread(regs,pc,sp) \
({ \
- unsigned long *stack = (unsigned long *)sp; \
memset(regs->uregs, 0, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
@@ -65,9 +64,6 @@ struct thread_struct {
regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
nommu_start_thread(regs); \
})
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 214d4158089a..2b8114fcba09 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
-#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index fdbb9e369745..f467e9b3f8d5 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void)
isb();
}
+#include <asm/cputype.h>
#ifdef CONFIG_ARM_ERRATA_798181
+static inline int erratum_a15_798181(void)
+{
+ unsigned int midr = read_cpuid_id();
+
+ /* Cortex-A15 r0p0..r3p2 affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+ return 0;
+ return 1;
+}
+
static inline void dummy_flush_tlb_a15_erratum(void)
{
/*
@@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void)
dsb();
}
#else
+static inline int erratum_a15_798181(void)
+{
+ return 0;
+}
+
static inline void dummy_flush_tlb_a15_erratum(void)
{
}
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 50af92bac737..4371f45c5784 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -29,6 +29,7 @@
#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
#ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
#ifdef CONFIG_ARM_VIRT_EXT
/*
@@ -41,10 +42,21 @@
*/
extern int __boot_cpu_mode;
+static inline void sync_boot_mode(void)
+{
+ /*
+ * As secondaries write to __boot_cpu_mode with caches disabled, we
+ * must flush the corresponding cache entries to ensure the visibility
+ * of their writes.
+ */
+ sync_cache_r(&__boot_cpu_mode);
+}
+
void __hyp_set_vectors(unsigned long phys_vector_base);
unsigned long __hyp_get_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
+#define sync_boot_mode()
#endif
#ifndef ZIMAGE
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 47bcb2d254af..18d76fd5a2af 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,7 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += a.out.h
header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
deleted file mode 100644
index 083894b2e3bc..000000000000
--- a/arch/arm/include/uapi/asm/a.out.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ARM_A_OUT_H__
-#define __ARM_A_OUT_H__
-
-#include <linux/personality.h>
-#include <linux/types.h>
-
-struct exec
-{
- __u32 a_info; /* Use macros N_MAGIC, etc for access */
- __u32 a_text; /* length of text, in bytes */
- __u32 a_data; /* length of data, in bytes */
- __u32 a_bss; /* length of uninitialized data area for file, in bytes */
- __u32 a_syms; /* length of symbol table data in file, in bytes */
- __u32 a_entry; /* start address */
- __u32 a_trsize; /* length of relocation info for text, in bytes */
- __u32 a_drsize; /* length of relocation info for data, in bytes */
-};
-
-/*
- * This is always the same
- */
-#define N_TXTADDR(a) (0x00008000)
-
-#define N_TRSIZE(a) ((a).a_trsize)
-#define N_DRSIZE(a) ((a).a_drsize)
-#define N_SYMSIZE(a) ((a).a_syms)
-
-#define M_ARM 103
-
-#ifndef LIBRARY_START_TEXT
-#define LIBRARY_START_TEXT (0x00c00000)
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a39cfc2a1f90..d40d0ef389db 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -742,6 +742,18 @@ ENDPROC(__switch_to)
#endif
.endm
+ .macro kuser_pad, sym, size
+ .if (. - \sym) & 3
+ .rept 4 - (. - \sym) & 3
+ .byte 0
+ .endr
+ .endif
+ .rept (\size - (. - \sym)) / 4
+ .word 0xe7fddef1
+ .endr
+ .endm
+
+#ifdef CONFIG_KUSER_HELPERS
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup:
#error "incoherent kernel configuration"
#endif
- /* pad to next slot */
- .rept (16 - (. - __kuser_cmpxchg64)/4)
- .word 0
- .endr
-
- .align 5
+ kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm
usr_ret lr
- .align 5
+ kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
@@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup:
#endif
- .align 5
+ kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
- .rep 4
+ kuser_pad __kuser_get_tls, 16
+ .rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
@@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+#endif
+
THUMB( .thumb )
/*
* Vector stubs.
*
- * This code is copied to 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's. Note that this code must not
- * exceed 0x300 bytes.
+ * This code is copied to 0xffff1000 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not exceed
+ * a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -986,8 +996,17 @@ ENDPROC(vector_\name)
1:
.endm
- .globl __stubs_start
+ .section .stubs, "ax", %progbits
__stubs_start:
+ @ This must be the first word
+ .word vector_swi
+
+vector_rst:
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ b vector_und
+
/*
* Interrupt dispatcher
*/
@@ -1082,6 +1101,16 @@ __stubs_start:
.align 5
/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1094,45 +1123,19 @@ __stubs_start:
vector_fiq:
subs pc, lr, #4
-/*=============================================================================
- * Address exception handler
- *-----------------------------------------------------------------------------
- * These aren't too critical.
- * (they're not supposed to happen, and won't happen in 32-bit data mode).
- */
-
-vector_addrexcptn:
- b vector_addrexcptn
-
-/*
- * We group all the following data together to optimise
- * for CPUs with separate I & D caches.
- */
- .align 5
-
-.LCvswi:
- .word vector_swi
-
- .globl __stubs_end
-__stubs_end:
-
- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
+ .globl vector_fiq_offset
+ .equ vector_fiq_offset, vector_fiq
- .globl __vectors_start
+ .section .vectors, "ax", %progbits
__vectors_start:
- ARM( swi SYS_ERROR0 )
- THUMB( svc #0 )
- THUMB( nop )
- W(b) vector_und + stubs_offset
- W(ldr) pc, .LCvswi + stubs_offset
- W(b) vector_pabt + stubs_offset
- W(b) vector_dabt + stubs_offset
- W(b) vector_addrexcptn + stubs_offset
- W(b) vector_irq + stubs_offset
- W(b) vector_fiq + stubs_offset
-
- .globl __vectors_end
-__vectors_end:
+ W(b) vector_rst
+ W(b) vector_und
+ W(ldr) pc, __vectors_start + 0x1000
+ W(b) vector_pabt
+ W(b) vector_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_irq
+ W(b) vector_fiq
.data
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index e00621f1403f..52b26432c9a9 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -49,7 +49,7 @@ __irq_entry:
mov r1, sp
stmdb sp!, {lr}
@ routine called with r0 = irq number, r1 = struct pt_regs *
- bl nvic_do_IRQ
+ bl nvic_handle_irq
pop {lr}
@
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 2adda11f712f..25442f451148 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -47,6 +47,11 @@
#include <asm/irq.h>
#include <asm/traps.h>
+#define FIQ_OFFSET ({ \
+ extern void *vector_fiq_offset; \
+ (unsigned)&vector_fiq_offset; \
+ })
+
static unsigned long no_fiq_insn;
/* Default reacquire function
@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
- memcpy((void *)0xffff001c, start, length);
+ void *base = (void *)0xffff0000;
#else
- memcpy(vectors_page + 0x1c, start, length);
+ void *base = vectors_page;
#endif
- flush_icache_range(0xffff001c, 0xffff001c + length);
+ unsigned offset = FIQ_OFFSET;
+
+ memcpy(base + offset, start, length);
+ flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
if (!vectors_high())
- flush_icache_range(0x1c, 0x1c + length);
+ flush_icache_range(offset, offset + length);
}
int claim_fiq(struct fiq_handler *f)
@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(int start)
{
- no_fiq_insn = *(unsigned long *)0xffff001c;
+ unsigned offset = FIQ_OFFSET;
+ no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
fiq_start = start;
}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 76ab5ca50610..47cd974e57ea 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -149,7 +149,6 @@ ENDPROC(lookup_processor_type)
* r5 = proc_info pointer in physical address space
* r9 = cpuid (preserved)
*/
- __CPUINIT
__lookup_processor_type:
adr r3, __lookup_processor_type_data
ldmia r3, {r4 - r6}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 75f14cc3e073..14235ba64a90 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -87,7 +87,7 @@ ENTRY(stext)
ENDPROC(stext)
#ifdef CONFIG_SMP
- __CPUINIT
+ .text
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 45e8935cae4e..2c7cc1e03473 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -343,7 +343,7 @@ __turn_mmu_on_loc:
.long __turn_mmu_on_end
#if defined(CONFIG_SMP)
- __CPUINIT
+ .text
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1b..7b95de601357 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1020,7 +1020,7 @@ out_mdbgen:
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
}
-static int __cpuinit dbg_reset_notify(struct notifier_block *self,
+static int dbg_reset_notify(struct notifier_block *self,
unsigned long action, void *cpu)
{
if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata dbg_reset_nb = {
+static struct notifier_block dbg_reset_nb = {
.notifier_call = dbg_reset_notify,
};
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 4910232c4833..797b1a6a4906 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode)
ldr \reg3, [\reg2]
ldr \reg1, [\reg2, \reg3]
cmp \mode, \reg1 @ matches primary CPU boot mode?
- orrne r7, r7, #BOOT_CPU_MODE_MISMATCH
- strne r7, [r5, r6] @ record what happened and give up
+ orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
+ strne \reg1, [\reg2, \reg3] @ record what happened and give up
.endm
#else /* ZIMAGE */
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc0..aebe0e99c153 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -157,8 +157,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
-static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
- unsigned long action, void *hcpu)
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
{
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
+static struct notifier_block cpu_pmu_hotplug_notifier = {
.notifier_call = cpu_pmu_notify,
};
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d3ca4f6915af..536c85fe72a8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -197,6 +197,7 @@ void machine_shutdown(void)
*/
void machine_halt(void)
{
+ local_irq_disable();
smp_send_stop();
local_irq_disable();
@@ -211,6 +212,7 @@ void machine_halt(void)
*/
void machine_power_off(void)
{
+ local_irq_disable();
smp_send_stop();
if (pm_power_off)
@@ -230,6 +232,7 @@ void machine_power_off(void)
*/
void machine_restart(char *cmd)
{
+ local_irq_disable();
smp_send_stop();
arm_pm_restart(reboot_mode, cmd);
@@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
+#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Insert it into the
- * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
+ * atomic helpers. Insert it into the gate_vma so that it is visible
+ * through ptrace and /proc/<pid>/mem.
*/
static struct vm_area_struct gate_vma = {
.vm_start = 0xffff0000,
@@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr)
{
return in_gate_area(NULL, addr);
}
+#define is_gate_vma(vma) ((vma) = &gate_vma)
+#else
+#define is_gate_vma(vma) 0
+#endif
const char *arch_vma_name(struct vm_area_struct *vma)
{
- return (vma == &gate_vma) ? "[vectors]" : NULL;
+ return is_gate_vma(vma) ? "[vectors]" :
+ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+ "[sigpage]" : NULL;
+}
+
+static struct page *signal_page;
+extern struct page *get_signal_page(void);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ if (!signal_page)
+ signal_page = get_signal_page();
+ if (!signal_page)
+ return -ENOMEM;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &signal_page);
+
+ if (ret == 0)
+ mm->context.sigpage = addr;
+
+ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
}
#endif
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 219f1d73572a..70ded3fb42d9 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -46,8 +46,7 @@
extern void secondary_startup(void);
-static int __cpuinit psci_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
+static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
if (psci_ops.cpu_on)
return psci_ops.cpu_on(cpu_logical_map(cpu),
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 63af9a7ae512..afc2489ee13b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
void __init hyp_mode_check(void)
{
#ifdef CONFIG_ARM_VIRT_EXT
+ sync_boot_mode();
+
if (is_hyp_mode_available()) {
pr_info("CPU: All CPU(s) started in HYP mode.\n");
pr_info("CPU: Virtualization extensions available.\n");
@@ -971,6 +973,7 @@ static const char *hwcap_str[] = {
"vfpv4",
"idiva",
"idivt",
+ "vfpd32",
"lpae",
NULL
};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 1c16c35c271a..ab3304225272 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
+#include <linux/random.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
@@ -15,12 +16,11 @@
#include <asm/elf.h>
#include <asm/cacheflush.h>
+#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
-#include "signal.h"
-
/*
* For ARM syscalls, we encode the syscall number into the instruction.
*/
@@ -40,11 +40,13 @@
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-const unsigned long sigreturn_codes[7] = {
+static const unsigned long sigreturn_codes[7] = {
MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
};
+static unsigned long signal_return_offset;
+
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{
@@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
__put_user(sigreturn_codes[idx+1], rc+1))
return 1;
- if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) {
+#ifdef CONFIG_MMU
+ if (cpsr & MODE32_BIT) {
+ struct mm_struct *mm = current->mm;
+
/*
- * 32-bit code can use the new high-page
- * signal return code support except when the MPU has
- * protected the vectors page from PL0
+ * 32-bit code can use the signal return page
+ * except when the MPU has protected the vectors
+ * page from PL0
*/
- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
- } else {
+ retcode = mm->context.sigpage + signal_return_offset +
+ (idx << 2) + thumb;
+ } else
+#endif
+ {
/*
* Ensure that the instruction cache sees
* the return code written onto the stack.
@@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
+
+struct page *get_signal_page(void)
+{
+ unsigned long ptr;
+ unsigned offset;
+ struct page *page;
+ void *addr;
+
+ page = alloc_pages(GFP_KERNEL, 0);
+
+ if (!page)
+ return NULL;
+
+ addr = page_address(page);
+
+ /* Give the signal return code some randomness */
+ offset = 0x200 + (get_random_int() & 0x7fc);
+ signal_return_offset = offset;
+
+ /*
+ * Copy signal return handlers into the vector page, and
+ * set sigreturn to be a pointer to these.
+ */
+ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+
+ ptr = (unsigned long)addr + offset;
+ flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+
+ return page;
+}
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
deleted file mode 100644
index 5ff067b7c752..000000000000
--- a/arch/arm/kernel/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * linux/arch/arm/kernel/signal.h
- *
- * Copyright (C) 2005-2009 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-
-extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c5fb5469054b..c2b4f8f0be9a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -58,7 +58,7 @@ struct secondary_data secondary_data;
* control for which core is the next to come out of the secondary
* boot "holding pen"
*/
-volatile int __cpuinitdata pen_release = -1;
+volatile int pen_release = -1;
enum ipi_msg_type {
IPI_WAKEUP,
@@ -86,7 +86,7 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
return pgdir >> ARCH_PGD_SHIFT;
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
@@ -138,7 +138,7 @@ void __init smp_init_cpus(void)
smp_ops.smp_init_cpus();
}
-int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
if (smp_ops.smp_boot_secondary)
return smp_ops.smp_boot_secondary(cpu, idle);
@@ -170,7 +170,7 @@ static int platform_cpu_disable(unsigned int cpu)
/*
* __cpu_disable runs on the processor to be shutdown.
*/
-int __cpuinit __cpu_disable(void)
+int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
int ret;
@@ -216,7 +216,7 @@ static DECLARE_COMPLETION(cpu_died);
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
-void __cpuinit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
{
if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
pr_err("CPU%u: cpu didn't die\n", cpu);
@@ -306,7 +306,7 @@ void __ref cpu_die(void)
* Called by both boot and secondaries to move global data into
* per-processor storage.
*/
-static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+static void smp_store_cpu_info(unsigned int cpuid)
{
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
@@ -322,7 +322,7 @@ static void percpu_timer_setup(void);
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
-asmlinkage void __cpuinit secondary_start_kernel(void)
+asmlinkage void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu;
@@ -521,7 +521,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode,
{
}
-static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
+static void broadcast_timer_setup(struct clock_event_device *evt)
{
evt->name = "dummy_timer";
evt->features = CLOCK_EVT_FEAT_ONESHOT |
@@ -550,7 +550,7 @@ int local_timer_register(struct local_timer_ops *ops)
}
#endif
-static void __cpuinit percpu_timer_setup(void)
+static void percpu_timer_setup(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index a98b62dca2fa..c2edfff573c2 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all();
}
-#ifdef CONFIG_ARM_ERRATA_798181
-static int erratum_a15_798181(void)
-{
- unsigned int midr = read_cpuid_id();
-
- /* Cortex-A15 r0p0..r3p2 affected */
- if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
- return 0;
- return 1;
-}
-#else
-static int erratum_a15_798181(void)
-{
- return 0;
-}
-#endif
-
static void ipi_flush_tlb_a15_erratum(void *arg)
{
dmb();
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index f6fd1d4398c6..25956204ef23 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -187,7 +187,7 @@ core_initcall(twd_cpufreq_init);
#endif
-static void __cpuinit twd_calibrate_rate(void)
+static void twd_calibrate_rate(void)
{
unsigned long count;
u64 waitjiffies;
@@ -265,7 +265,7 @@ static void twd_get_clock(struct device_node *np)
/*
* Setup the local clock events for a CPU.
*/
-static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
+static int twd_timer_setup(struct clock_event_device *clk)
{
struct clock_event_device **this_cpu_clk;
int cpu = smp_processor_id();
@@ -308,7 +308,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
return 0;
}
-static struct local_timer_ops twd_lt_ops __cpuinitdata = {
+static struct local_timer_ops twd_lt_ops = {
.setup = twd_timer_setup,
.stop = twd_timer_stop,
};
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cab094c234ee..ab517fcce21b 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -35,8 +35,6 @@
#include <asm/tls.h>
#include <asm/system_misc.h>
-#include "signal.h"
-
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
void *vectors_page;
@@ -800,15 +798,26 @@ void __init trap_init(void)
return;
}
-static void __init kuser_get_tls_init(unsigned long vectors)
+#ifdef CONFIG_KUSER_HELPERS
+static void __init kuser_init(void *vectors)
{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+
+ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
+ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
+#else
+static void __init kuser_init(void *vectors)
+{
+}
+#endif
void __init early_trap_init(void *vectors_base)
{
@@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base)
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
- extern char __kuser_helper_start[], __kuser_helper_end[];
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned i;
vectors_page = vectors_base;
/*
+ * Poison the vectors page with an undefined instruction. This
+ * instruction is chosen to be undefined for both ARM and Thumb
+ * ISAs. The Thumb version is an undefined instruction with a
+ * branch back to the undefined instruction.
+ */
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ ((u32 *)vectors_base)[i] = 0xe7fddef1;
+
+ /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
- /*
- * Do processor specific fixups for the kuser helpers
- */
- kuser_get_tls_init(vectors);
-
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
- sigreturn_codes, sizeof(sigreturn_codes));
+ kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE);
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */
/*
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index fa25e4e425f6..7bcee5c9b604 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -148,6 +148,23 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
#endif
+ /*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+ __vectors_start = .;
+ .vectors 0 : AT(__vectors_start) {
+ *(.vectors)
+ }
+ . = __vectors_start + SIZEOF(.vectors);
+ __vectors_end = .;
+
+ __stubs_start = .;
+ .stubs 0x1000 : AT(__stubs_start) {
+ *(.stubs)
+ }
+ . = __stubs_start + SIZEOF(.stubs);
+ __stubs_end = .;
INIT_TEXT_SECTION(8)
.exit.text : {
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 64dbfa57204a..5306de350133 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -86,7 +86,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
}
}
-unsigned long __cpuinit calibrate_delay_is_known(void)
+unsigned long calibrate_delay_is_known(void)
{
delay_calibrated = true;
return lpj_fine;
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index afbc439f11d4..4cdb61c54459 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -505,7 +505,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = {
/*
* Amplifiers on the board
*/
-struct ths7303_platform_data ths7303_pdata = {
+static struct ths7303_platform_data ths7303_pdata = {
.ch_1 = 3,
.ch_2 = 3,
.ch_3 = 3,
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 42ef53f62c6c..86100d179694 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -860,7 +860,7 @@ static struct platform_device dm355_vpbe_display = {
},
};
-struct venc_platform_data dm355_venc_pdata = {
+static struct venc_platform_data dm355_venc_pdata = {
.setup_pinmux = dm355_vpbe_setup_pinmux,
.setup_clock = dm355_venc_setup_clock,
};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index fa7af5eda52d..dad28029ba9b 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1349,7 +1349,7 @@ static struct platform_device dm365_vpbe_display = {
},
};
-struct venc_platform_data dm365_venc_pdata = {
+static struct venc_platform_data dm365_venc_pdata = {
.setup_pinmux = dm365_vpbe_setup_pinmux,
.setup_clock = dm365_venc_setup_clock,
};
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 855d4a7b462d..5952e68c76c4 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -92,6 +92,7 @@ config SOC_EXYNOS5440
bool "SAMSUNG EXYNOS5440"
default y
depends on ARCH_EXYNOS5
+ select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
select ARCH_HAS_OPP
select HAVE_ARM_ARCH_TIMER
select AUTO_ZRELADDR
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index e970a7a4e278..53696154aead 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -14,7 +14,7 @@ obj- :=
obj-$(CONFIG_ARCH_EXYNOS) += common.o
-obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_S5P_PM) += pm.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 164685bd25c8..ba95e5db2501 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -58,7 +58,6 @@ static const char name_exynos5440[] = "EXYNOS5440";
static void exynos4_map_io(void);
static void exynos5_map_io(void);
-static void exynos5440_map_io(void);
static int exynos_init(void);
static struct cpu_table cpu_ids[] __initdata = {
@@ -95,7 +94,6 @@ static struct cpu_table cpu_ids[] __initdata = {
}, {
.idcode = EXYNOS5440_SOC_ID,
.idmask = EXYNOS5_SOC_MASK,
- .map_io = exynos5440_map_io,
.init = exynos_init,
.name = name_exynos5440,
},
@@ -150,11 +148,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
.length = SZ_64K,
.type = MT_DEVICE,
}, {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S5P_VA_CMU,
.pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
.length = SZ_128K,
@@ -268,20 +261,6 @@ static struct map_desc exynos5_iodesc[] __initdata = {
.pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
.length = SZ_64K,
.type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
- .length = SZ_512K,
- .type = MT_DEVICE,
- },
-};
-
-static struct map_desc exynos5440_iodesc0[] __initdata = {
- {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(EXYNOS5440_PA_UART0),
- .length = SZ_512K,
- .type = MT_DEVICE,
},
};
@@ -388,11 +367,6 @@ static void __init exynos5_map_io(void)
iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc));
}
-static void __init exynos5440_map_io(void)
-{
- iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
-}
-
void __init exynos_init_time(void)
{
of_clk_init(NULL);
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 3e156bcddcb4..972490fc09d6 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -97,6 +97,5 @@ struct exynos_pmu_conf {
};
extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
-extern void s3c_cpu_resume(void);
#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 17a18ff3d71e..225ee8431c72 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -25,6 +25,7 @@
#include <mach/regs-pmu.h>
#include <plat/cpu.h>
+#include <plat/pm.h>
#include "common.h"
diff --git a/arch/arm/mach-exynos/headsmp.S b/arch/arm/mach-exynos/headsmp.S
index 5364d4bfa8bc..cdd9d91e9933 100644
--- a/arch/arm/mach-exynos/headsmp.S
+++ b/arch/arm/mach-exynos/headsmp.S
@@ -13,8 +13,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __CPUINIT
-
/*
* exynos4 specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-exynos/include/mach/memory.h b/arch/arm/mach-exynos/include/mach/memory.h
index 374ef2cf7152..2a4cdb7cb326 100644
--- a/arch/arm/mach-exynos/include/mach/memory.h
+++ b/arch/arm/mach-exynos/include/mach/memory.h
@@ -15,8 +15,13 @@
#define PLAT_PHYS_OFFSET UL(0x40000000)
+#ifndef CONFIG_ARM_LPAE
/* Maximum of 256MiB in one bank */
#define MAX_PHYSMEM_BITS 32
#define SECTION_SIZE_BITS 28
+#else
+#define MAX_PHYSMEM_BITS 36
+#define SECTION_SIZE_BITS 31
+#endif
#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index deba1308ff16..58b43e6f9262 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -75,7 +75,7 @@ static void __iomem *scu_base_addr(void)
static DEFINE_SPINLOCK(boot_lock);
-static void __cpuinit exynos_secondary_init(unsigned int cpu)
+static void exynos_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
@@ -90,7 +90,7 @@ static void __cpuinit exynos_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
unsigned long phys_cpu = cpu_logical_map(cpu);
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 41c20692a13f..c679db577269 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -217,6 +217,9 @@ static __init int exynos_pm_drvinit(void)
struct clk *pll_base;
unsigned int tmp;
+ if (soc_is_exynos5440())
+ return 0;
+
s3c_pm_init();
/* All wakeup disable */
@@ -340,6 +343,9 @@ static struct syscore_ops exynos_pm_syscore_ops = {
static __init int exynos_pm_syscore_init(void)
{
+ if (soc_is_exynos5440())
+ return 0;
+
register_syscore_ops(&exynos_pm_syscore_ops);
return 0;
}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index a7cd2cf5e08d..3490a24f969e 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -276,8 +276,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset = DC21285_PCI_MEM;
- pci_ioremap_io(0, DC21285_PCI_IO);
-
pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index dc5d6becd8c7..88815795fe26 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -115,6 +115,7 @@ static int highbank_platform_notifier(struct notifier_block *nb,
{
struct resource *res;
int reg = -1;
+ u32 val;
struct device *dev = __dev;
if (event != BUS_NOTIFY_ADD_DEVICE)
@@ -141,10 +142,10 @@ static int highbank_platform_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
if (of_property_read_bool(dev->of_node, "dma-coherent")) {
- writel(0xff31, sregs_base + reg);
+ val = readl(sregs_base + reg);
+ writel(val | 0xff01, sregs_base + reg);
set_dma_ops(dev, &arm_coherent_dma_ops);
- } else
- writel(0, sregs_base + reg);
+ }
return NOTIFY_OK;
}
diff --git a/arch/arm/mach-highbank/platsmp.c b/arch/arm/mach-highbank/platsmp.c
index a984573e0d02..32d75cf55cbc 100644
--- a/arch/arm/mach-highbank/platsmp.c
+++ b/arch/arm/mach-highbank/platsmp.c
@@ -24,7 +24,7 @@
extern void secondary_startup(void);
-static int __cpuinit highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
highbank_set_cpu_jump(cpu, secondary_startup);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 4282e99f5ca1..86567d980b07 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -199,7 +199,8 @@ static const char *pcie_axi_sels[] = { "axi", "ahb", };
static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", };
static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
-static const char *emi_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
+static const char *emi_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *vdo_axi_sels[] = { "axi", "ahb", };
static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
@@ -392,7 +393,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels));
- clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_sels, ARRAY_SIZE(emi_sels));
+ clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels));
clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c
index d617c0b7c809..b169a396d93b 100644
--- a/arch/arm/mach-imx/clk-vf610.c
+++ b/arch/arm/mach-imx/clk-vf610.c
@@ -183,6 +183,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
clk[VF610_CLK_ENET_TS] = imx_clk_gate("enet_ts", "enet_ts_sel", CCM_CSCDR1, 23);
+ clk[VF610_CLK_ENET0] = imx_clk_gate2("enet0", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(0));
+ clk[VF610_CLK_ENET1] = imx_clk_gate2("enet1", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(1));
clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7));
diff --git a/arch/arm/mach-imx/mx27.h b/arch/arm/mach-imx/mx27.h
index e074616d54ca..8a65f192e7f3 100644
--- a/arch/arm/mach-imx/mx27.h
+++ b/arch/arm/mach-imx/mx27.h
@@ -135,7 +135,7 @@
#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4)
#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5)
#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6)
-#define MX27_INT_SDHC (NR_IRQS_LEGACY + 7)
+#define MX27_INT_MSHC (NR_IRQS_LEGACY + 7)
#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8)
#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9)
#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10)
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c
index c6e1ab544882..1f24c1fdfea4 100644
--- a/arch/arm/mach-imx/platsmp.c
+++ b/arch/arm/mach-imx/platsmp.c
@@ -53,7 +53,7 @@ void imx_scu_standby_enable(void)
writel_relaxed(val, scu_base);
}
-static int __cpuinit imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
imx_set_cpu_jump(cpu, v7_secondary_startup);
imx_enable_cpu(cpu, true);
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index fe4d9ff93a7e..b661c5c2870a 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -49,7 +49,7 @@ static const char *keystone_match[] __initconst = {
NULL,
};
-void keystone_restart(char mode, const char *cmd)
+void keystone_restart(enum reboot_mode mode, const char *cmd)
{
u32 val;
diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c
index 1d4181e1daf2..14378e3fef16 100644
--- a/arch/arm/mach-keystone/platsmp.c
+++ b/arch/arm/mach-keystone/platsmp.c
@@ -21,7 +21,7 @@
#include "keystone.h"
-static int __cpuinit keystone_smp_boot_secondary(unsigned int cpu,
+static int keystone_smp_boot_secondary(unsigned int cpu,
struct task_struct *idle)
{
unsigned long start = virt_to_phys(&secondary_startup);
diff --git a/arch/arm/mach-msm/headsmp.S b/arch/arm/mach-msm/headsmp.S
index bcd5af223dea..6c62c3f82fe6 100644
--- a/arch/arm/mach-msm/headsmp.S
+++ b/arch/arm/mach-msm/headsmp.S
@@ -11,8 +11,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __CPUINIT
-
/*
* MSM specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 00cdb0a5dac8..3f06edcdd0ce 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -38,7 +38,7 @@ static inline int get_core_count(void)
return ((read_cpuid_id() >> 4) & 3) + 1;
}
-static void __cpuinit msm_secondary_init(unsigned int cpu)
+static void msm_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
@@ -54,7 +54,7 @@ static void __cpuinit msm_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-static __cpuinit void prepare_cold_cpu(unsigned int cpu)
+static void prepare_cold_cpu(unsigned int cpu)
{
int ret;
ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup),
@@ -73,7 +73,7 @@ static __cpuinit void prepare_cold_cpu(unsigned int cpu)
"address\n");
}
-static int __cpuinit msm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int msm_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
static int cold_boot_done;
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index b6418fd5fe0d..8697cfc0d0b6 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -139,7 +139,7 @@ static struct clocksource msm_clocksource = {
};
#ifdef CONFIG_LOCAL_TIMERS
-static int __cpuinit msm_local_timer_setup(struct clock_event_device *evt)
+static int msm_local_timer_setup(struct clock_event_device *evt)
{
/* Use existing clock_event for cpu 0 */
if (!smp_processor_id())
@@ -164,7 +164,7 @@ static void msm_local_timer_stop(struct clock_event_device *evt)
disable_percpu_irq(evt->irq);
}
-static struct local_timer_ops msm_local_timer_ops __cpuinitdata = {
+static struct local_timer_ops msm_local_timer_ops = {
.setup = msm_local_timer_setup,
.stop = msm_local_timer_stop,
};
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index be117591f7f2..4c24303ec481 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -28,7 +28,7 @@
#include <asm/cacheflush.h>
#include "armada-370-xp.h"
-unsigned long __cpuinitdata coherency_phys_base;
+unsigned long coherency_phys_base;
static void __iomem *coherency_base;
static void __iomem *coherency_cpu_base;
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
index 7147300c8af2..8a1b0c96e9ec 100644
--- a/arch/arm/mach-mvebu/headsmp.S
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -21,8 +21,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __CPUINIT
-
/*
* Armada XP specific entry point for secondary CPUs.
* We add the CPU to the coherency fabric and then jump to secondary
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
index 93f2f3ab45f1..ce81d3031405 100644
--- a/arch/arm/mach-mvebu/platsmp.c
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -71,13 +71,12 @@ void __init set_secondary_cpus_clock(void)
}
}
-static void __cpuinit armada_xp_secondary_init(unsigned int cpu)
+static void armada_xp_secondary_init(unsigned int cpu)
{
armada_xp_mpic_smp_cpu_init();
}
-static int __cpuinit armada_xp_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
+static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
pr_info("Booting CPU %d\n", cpu);
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 627fa7e41fba..3eed0006d189 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,7 +62,7 @@ config SOC_OMAP5
select HAVE_SMP
select COMMON_CLK
select HAVE_ARM_ARCH_TIMER
- select ARM_ERRATA_798181
+ select ARM_ERRATA_798181 if SMP
config SOC_AM33XX
bool "AM33XX support"
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index e5fbfed69aa2..be5d005ebad2 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -15,6 +15,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
+#include <linux/clk.h>
#include <asm/mach/arch.h>
@@ -35,6 +36,21 @@ static struct of_device_id omap_dt_match_table[] __initdata = {
{ }
};
+/*
+ * Create alias for USB host PHY clock.
+ * Remove this when clock phandle can be provided via DT
+ */
+static void __init legacy_init_ehci_clk(char *clkname)
+{
+ int ret;
+
+ ret = clk_add_alias("main_clk", NULL, clkname, NULL);
+ if (ret) {
+ pr_err("%s:Failed to add main_clk alias to %s :%d\n",
+ __func__, clkname, ret);
+ }
+}
+
static void __init omap_generic_init(void)
{
omap_sdrc_init(NULL, NULL);
@@ -45,10 +61,15 @@ static void __init omap_generic_init(void)
* HACK: call display setup code for selected boards to enable omapdss.
* This will be removed when omapdss supports DT.
*/
- if (of_machine_is_compatible("ti,omap4-panda"))
+ if (of_machine_is_compatible("ti,omap4-panda")) {
omap4_panda_display_init_of();
+ legacy_init_ehci_clk("auxclk3_ck");
+
+ }
else if (of_machine_is_compatible("ti,omap4-sdp"))
omap_4430sdp_display_init_of();
+ else if (of_machine_is_compatible("ti,omap5-uevm"))
+ legacy_init_ehci_clk("auxclk1_ck");
}
#ifdef CONFIG_SOC_OMAP2420
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4ea308114165..75e92952c18e 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -20,8 +20,6 @@
#include "omap44xx.h"
- __CPUINIT
-
/* Physical address needed since MMU not enabled yet on secondary core */
#define AUX_CORE_BOOT0_PA 0x48281800
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index f993a4188701..f991016e2a6a 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -291,7 +291,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
* @cpu : CPU ID
* @power_state: CPU low power state.
*/
-int __cpuinit omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
+int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
{
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
unsigned int cpu_state = 0;
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 98a11463a843..8708b2a9da45 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -51,7 +51,7 @@ void __iomem *omap4_get_scu_base(void)
return scu_base;
}
-static void __cpuinit omap4_secondary_init(unsigned int cpu)
+static void omap4_secondary_init(unsigned int cpu)
{
/*
* Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
@@ -72,7 +72,7 @@ static void __cpuinit omap4_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
static struct clockdomain *cpu1_clkdm;
static bool booted;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index f8bb3b9b6a76..813c61558a5f 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -323,8 +323,8 @@ static void irq_save_secure_context(void)
#endif
#ifdef CONFIG_HOTPLUG_CPU
-static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int irq_cpu_hotplug_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)hcpu;
diff --git a/arch/arm/mach-prima2/headsmp.S b/arch/arm/mach-prima2/headsmp.S
index 5b8a408d8921..d86fe33c5f53 100644
--- a/arch/arm/mach-prima2/headsmp.S
+++ b/arch/arm/mach-prima2/headsmp.S
@@ -9,8 +9,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __CPUINIT
-
/*
* SIRFSOC specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index 1c3de7bed841..3dbcb1ab6e37 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -44,7 +44,7 @@ void __init sirfsoc_map_scu(void)
scu_base = (void __iomem *)SIRFSOC_VA(base);
}
-static void __cpuinit sirfsoc_secondary_init(unsigned int cpu)
+static void sirfsoc_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
@@ -65,7 +65,7 @@ static struct of_device_id rsc_ids[] = {
{},
};
-static int __cpuinit sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
struct device_node *np;
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index f6726bb4eb95..3a3362fa793e 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -477,16 +477,24 @@ static int em_x270_usb_hub_init(void)
/* USB Hub power-on and reset */
gpio_direction_output(usb_hub_reset, 1);
gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
- regulator_enable(em_x270_usb_ldo);
+ err = regulator_enable(em_x270_usb_ldo);
+ if (err)
+ goto err_free_rst_gpio;
+
gpio_set_value(usb_hub_reset, 0);
gpio_set_value(usb_hub_reset, 1);
regulator_disable(em_x270_usb_ldo);
- regulator_enable(em_x270_usb_ldo);
+ err = regulator_enable(em_x270_usb_ldo);
+ if (err)
+ goto err_free_rst_gpio;
+
gpio_set_value(usb_hub_reset, 0);
gpio_set_value(GPIO9_USB_VBUS_EN, 1);
return 0;
+err_free_rst_gpio:
+ gpio_free(usb_hub_reset);
err_free_vbus_gpio:
gpio_free(GPIO9_USB_VBUS_EN);
err_free_usb_ldo:
@@ -592,7 +600,7 @@ err_irq:
return err;
}
-static void em_x270_mci_setpower(struct device *dev, unsigned int vdd)
+static int em_x270_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -600,10 +608,11 @@ static void em_x270_mci_setpower(struct device *dev, unsigned int vdd)
int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000;
regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV);
- regulator_enable(em_x270_sdio_ldo);
+ return regulator_enable(em_x270_sdio_ldo);
} else {
regulator_disable(em_x270_sdio_ldo);
}
+ return 0;
}
static void em_x270_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d2c652318376..dd70343c8708 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -408,7 +408,7 @@ static int mainstone_mci_init(struct device *dev, irq_handler_t mstone_detect_in
return err;
}
-static void mainstone_mci_setpower(struct device *dev, unsigned int vdd)
+static int mainstone_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -420,6 +420,7 @@ static void mainstone_mci_setpower(struct device *dev, unsigned int vdd)
printk(KERN_DEBUG "%s: off\n", __func__);
MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON;
}
+ return 0;
}
static void mainstone_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index fb7f1d1627dc..13e5b00eae90 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -335,7 +335,7 @@ static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int,
return err;
}
-static void pcm990_mci_setpower(struct device *dev, unsigned int vdd)
+static int pcm990_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data *p_d = dev->platform_data;
u8 val;
@@ -348,6 +348,7 @@ static void pcm990_mci_setpower(struct device *dev, unsigned int vdd)
val &= ~PCM990_CTRL_MMC2PWR;
pcm990_cpld_writeb(PCM990_CTRL_MMC2PWR, PCM990_CTRL_REG5);
+ return 0;
}
static void pcm990_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 711d37e26bd8..aedf053a1de5 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -258,7 +258,7 @@ err_free_2:
return err;
}
-static void poodle_mci_setpower(struct device *dev, unsigned int vdd)
+static int poodle_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -270,6 +270,8 @@ static void poodle_mci_setpower(struct device *dev, unsigned int vdd)
gpio_set_value(POODLE_GPIO_SD_PWR1, 0);
gpio_set_value(POODLE_GPIO_SD_PWR, 0);
}
+
+ return 0;
}
static void poodle_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 2125df0444e7..4c29173026e8 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -598,7 +598,7 @@ static inline void spitz_spi_init(void) {}
* NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to
* give the card a chance to fully insert/eject.
*/
-static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
+static int spitz_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -606,6 +606,8 @@ static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V);
else
spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0);
+
+ return 0;
}
static struct pxamci_platform_data spitz_mci_platform_data = {
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 88fde43c948c..62aea3e835f3 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -734,9 +734,10 @@ static int stargate2_mci_init(struct device *dev,
*
* Very simple control. Either it is on or off and is controlled by
* a gpio pin */
-static void stargate2_mci_setpower(struct device *dev, unsigned int vdd)
+static int stargate2_mci_setpower(struct device *dev, unsigned int vdd)
{
gpio_set_value(SG2_SD_POWER_ENABLE, !!vdd);
+ return 0;
}
static void stargate2_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 6d9252e081ce..7791ac76f945 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -208,7 +208,7 @@ config S3C24XX_GPIO_EXTRA128
config S3C24XX_PLL
bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
- depends on ARM_S3C24XX
+ depends on ARM_S3C24XX_CPUFREQ
help
Compile in support for changing the PLL frequency from the
S3C24XX series CPUfreq driver. The PLL takes time to settle
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
index 34fffdf6fc1d..564553694b54 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
@@ -119,66 +119,101 @@ static struct clk init_clocks_off[] = {
}
};
-static struct clk init_clocks[] = {
- {
- .name = "lcd",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_LCDC,
- }, {
- .name = "gpio",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_GPIO,
- }, {
- .name = "usb-host",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBH,
- }, {
- .name = "usb-device",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBD,
- }, {
- .name = "timers",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_PWMT,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.0",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART0,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.1",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART1,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.2",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART2,
- }, {
- .name = "rtc",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_RTC,
- }, {
- .name = "watchdog",
- .parent = &clk_p,
- .ctrlbit = 0,
- }, {
- .name = "usb-bus-host",
- .parent = &clk_usb_bus,
- }, {
- .name = "usb-bus-gadget",
- .parent = &clk_usb_bus,
- },
+static struct clk clk_lcd = {
+ .name = "lcd",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_LCDC,
+};
+
+static struct clk clk_gpio = {
+ .name = "gpio",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_GPIO,
+};
+
+static struct clk clk_usb_host = {
+ .name = "usb-host",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBH,
+};
+
+static struct clk clk_usb_device = {
+ .name = "usb-device",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBD,
+};
+
+static struct clk clk_timers = {
+ .name = "timers",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_PWMT,
+};
+
+struct clk s3c24xx_clk_uart0 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.0",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART0,
+};
+
+struct clk s3c24xx_clk_uart1 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.1",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART1,
+};
+
+struct clk s3c24xx_clk_uart2 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.2",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART2,
+};
+
+static struct clk clk_rtc = {
+ .name = "rtc",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_RTC,
+};
+
+static struct clk clk_watchdog = {
+ .name = "watchdog",
+ .parent = &clk_p,
+ .ctrlbit = 0,
+};
+
+static struct clk clk_usb_bus_host = {
+ .name = "usb-bus-host",
+ .parent = &clk_usb_bus,
+};
+
+static struct clk clk_usb_bus_gadget = {
+ .name = "usb-bus-gadget",
+ .parent = &clk_usb_bus,
+};
+
+static struct clk *init_clocks[] = {
+ &clk_lcd,
+ &clk_gpio,
+ &clk_usb_host,
+ &clk_usb_device,
+ &clk_timers,
+ &s3c24xx_clk_uart0,
+ &s3c24xx_clk_uart1,
+ &s3c24xx_clk_uart2,
+ &clk_rtc,
+ &clk_watchdog,
+ &clk_usb_bus_host,
+ &clk_usb_bus_gadget,
};
/* s3c2410_baseclk_add()
@@ -195,7 +230,6 @@ int __init s3c2410_baseclk_add(void)
{
unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
- struct clk *clkp;
struct clk *xtal;
int ret;
int ptr;
@@ -207,8 +241,9 @@ int __init s3c2410_baseclk_add(void)
/* register clocks from clock array */
- clkp = init_clocks;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
+ for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
+ struct clk *clkp = init_clocks[ptr];
+
/* ensure that we note the clock state */
clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 1069b5680826..aaf006d1d6dc 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -166,6 +166,9 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
+ CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
+ CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
+ CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
};
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index 6f9865467258..bfd920083a3b 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -23,7 +23,6 @@
#include <linux/init.h>
#include <asm/memory.h>
- __CPUINIT
/*
* Boot code for secondary CPUs.
*
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 559d1ce5f57e..a9d212498987 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -14,8 +14,6 @@
#include <linux/init.h>
#include <asm/memory.h>
- __CPUINIT
-
ENTRY(shmobile_invalidate_start)
bl v7_invalidate_l1
b secondary_startup
diff --git a/arch/arm/mach-shmobile/smp-emev2.c b/arch/arm/mach-shmobile/smp-emev2.c
index 80991b35f4ac..22a05a869d25 100644
--- a/arch/arm/mach-shmobile/smp-emev2.c
+++ b/arch/arm/mach-shmobile/smp-emev2.c
@@ -30,7 +30,7 @@
#define EMEV2_SCU_BASE 0x1e000000
-static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
return 0;
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 526cfaae81c1..9bdf810f2a87 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -81,7 +81,7 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu)
return ret ? ret : 1;
}
-static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
struct r8a7779_pm_ch *ch = NULL;
int ret = -EIO;
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index d613113a04bd..d5fc3ed4e315 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -48,7 +48,7 @@ void __init sh73a0_register_twd(void)
}
#endif
-static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
cpu = cpu_logical_map(cpu);
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index 9004bfb1756e..95c115d8b5ee 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -10,7 +10,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __CPUINIT
.arch armv7-a
ENTRY(secondary_trampoline)
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index b51ce8c7929d..5356a72bc8ce 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -29,7 +29,7 @@
#include "core.h"
-static int __cpuinit socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index 904f2c907b46..a99d90a4d09c 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -37,7 +37,7 @@ void __init spear13xx_l2x0_init(void);
void spear_restart(enum reboot_mode, const char *);
void spear13xx_secondary_startup(void);
-void __cpuinit spear13xx_cpu_die(unsigned int cpu);
+void spear13xx_cpu_die(unsigned int cpu);
extern struct smp_operations spear13xx_smp_ops;
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
index 9c4c722c954e..5c4a19887b2b 100644
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -24,7 +24,7 @@ static DEFINE_SPINLOCK(boot_lock);
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
-static void __cpuinit spear13xx_secondary_init(unsigned int cpu)
+static void spear13xx_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
@@ -40,7 +40,7 @@ static void __cpuinit spear13xx_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index d04e3bfe1918..835833e3c4f8 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -11,8 +11,9 @@ menuconfig ARCH_STI
select HAVE_SMP
select HAVE_ARM_SCU if SMP
select ARCH_REQUIRE_GPIOLIB
- select ARM_ERRATA_720789
select ARM_ERRATA_754322
+ select ARM_ERRATA_764369
+ select ARM_ERRATA_775420
select PL310_ERRATA_753970 if CACHE_PL310
select PL310_ERRATA_769419 if CACHE_PL310
help
diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
index 977a863468fc..dce50d983a8e 100644
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
@@ -27,7 +27,7 @@
#include "smp.h"
-static void __cpuinit write_pen_release(int val)
+static void write_pen_release(int val)
{
pen_release = val;
smp_wmb();
@@ -37,7 +37,7 @@ static void __cpuinit write_pen_release(int val)
static DEFINE_SPINLOCK(boot_lock);
-void __cpuinit sti_secondary_init(unsigned int cpu)
+void sti_secondary_init(unsigned int cpu)
{
trace_hardirqs_off();
@@ -54,7 +54,7 @@ void __cpuinit sti_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-int __cpuinit sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 24db4ac428ae..97b33a2a2d75 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -35,7 +35,7 @@
static cpumask_t tegra_cpu_init_mask;
-static void __cpuinit tegra_secondary_init(unsigned int cpu)
+static void tegra_secondary_init(unsigned int cpu)
{
cpumask_set_cpu(cpu, &tegra_cpu_init_mask);
}
@@ -167,7 +167,7 @@ static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle)
return ret;
}
-static int __cpuinit tegra_boot_secondary(unsigned int cpu,
+static int tegra_boot_secondary(unsigned int cpu,
struct task_struct *idle)
{
if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_chip_id == TEGRA20)
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 94e69bee3da5..261fec140c06 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -191,7 +191,7 @@ static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
[TEGRA_SUSPEND_LP0] = "LP0",
};
-static int __cpuinit tegra_suspend_enter(suspend_state_t state)
+static int tegra_suspend_enter(suspend_state_t state)
{
enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 14d90469392f..1f296e796a4f 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -54,7 +54,7 @@ static void __iomem *scu_base_addr(void)
static DEFINE_SPINLOCK(boot_lock);
-static void __cpuinit ux500_secondary_init(unsigned int cpu)
+static void ux500_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
@@ -69,7 +69,7 @@ static void __cpuinit ux500_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 5b799c29886e..5f252569c689 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -91,7 +91,7 @@ static void __init zynq_map_io(void)
zynq_scu_map_io();
}
-static void zynq_system_reset(char mode, const char *cmd)
+static void zynq_system_reset(enum reboot_mode mode, const char *cmd)
{
zynq_slcr_system_reset();
}
diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
index fbbd0e21c404..3040d219570f 100644
--- a/arch/arm/mach-zynq/common.h
+++ b/arch/arm/mach-zynq/common.h
@@ -27,7 +27,7 @@ extern void secondary_startup(void);
extern char zynq_secondary_trampoline;
extern char zynq_secondary_trampoline_jump;
extern char zynq_secondary_trampoline_end;
-extern int __cpuinit zynq_cpun_start(u32 address, int cpu);
+extern int zynq_cpun_start(u32 address, int cpu);
extern struct smp_operations zynq_smp_ops __initdata;
#endif
diff --git a/arch/arm/mach-zynq/headsmp.S b/arch/arm/mach-zynq/headsmp.S
index d183cd234a9b..d4cd5f34fe5c 100644
--- a/arch/arm/mach-zynq/headsmp.S
+++ b/arch/arm/mach-zynq/headsmp.S
@@ -9,8 +9,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __CPUINIT
-
ENTRY(zynq_secondary_trampoline)
ldr r0, [pc]
bx r0
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index 023f225493f2..689fbbc3d9c8 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -30,11 +30,11 @@
/*
* Store number of cores in the system
* Because of scu_get_core_count() must be in __init section and can't
- * be called from zynq_cpun_start() because it is in __cpuinit section.
+ * be called from zynq_cpun_start() because it is not in __init section.
*/
static int ncores;
-int __cpuinit zynq_cpun_start(u32 address, int cpu)
+int zynq_cpun_start(u32 address, int cpu)
{
u32 trampoline_code_size = &zynq_secondary_trampoline_end -
&zynq_secondary_trampoline;
@@ -92,7 +92,7 @@ int __cpuinit zynq_cpun_start(u32 address, int cpu)
}
EXPORT_SYMBOL(zynq_cpun_start);
-static int __cpuinit zynq_boot_secondary(unsigned int cpu,
+static int zynq_boot_secondary(unsigned int cpu,
struct task_struct *idle)
{
return zynq_cpun_start(virt_to_phys(secondary_startup), cpu);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 6cacdc8dd654..db5c2cab8fda 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -421,24 +421,28 @@ config CPU_32v3
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v6
bool
@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE
config TLS_REG_EMUL
bool
+ select NEED_KUSER_HELPERS
help
An SMP system using a pre-ARMv6 processor (there are apparently
a few prototypes like that in existence) and therefore access to
@@ -783,11 +788,40 @@ config TLS_REG_EMUL
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
+ select NEED_KUSER_HELPERS
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
+config NEED_KUSER_HELPERS
+ bool
+
+config KUSER_HELPERS
+ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+ default y
+ help
+ Warning: disabling this option may break user programs.
+
+ Provide kuser helpers in the vector page. The kernel provides
+ helper code to userspace in read only form at a fixed location
+ in the high vector page to allow userspace to be independent of
+ the CPU type fitted to the system. This permits binaries to be
+ run on ARMv4 through to ARMv7 without modification.
+
+ However, the fixed address nature of these helpers can be used
+ by ROP (return orientated programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+ are built specifically for your platform, and make no use of
+ these helpers, then you can turn this option off. However,
+ when such an binary or library is run, it will receive a SIGILL
+ signal, which will terminate the program.
+
+ Say N here only if you are absolutely certain that you do not
+ need these helpers; otherwise, the safe option is to say Y.
+
config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance"
depends on CPU_V6K && SMP
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b55b1015724b..4a0544492f10 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_bp_all();
local_flush_tlb_all();
- dummy_flush_tlb_a15_erratum();
+ if (erratum_a15_798181())
+ dummy_flush_tlb_a15_erratum();
}
atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f56617a2392..53cdbd39ec8e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void)
{
+ phys_addr_t memblock_limit = 0;
int i, j, highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
@@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void)
bank->size = size_limit;
}
#endif
- if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
- arm_lowmem_limit = bank->start + bank->size;
+ if (!bank->highmem) {
+ phys_addr_t bank_end = bank->start + bank->size;
+ if (bank_end > arm_lowmem_limit)
+ arm_lowmem_limit = bank_end;
+
+ /*
+ * Find the first non-section-aligned page, and point
+ * memblock_limit at it. This relies on rounding the
+ * limit down to be section-aligned, which happens at
+ * the end of this function.
+ *
+ * With this algorithm, the start or end of almost any
+ * bank can be non-section-aligned. The only exception
+ * is that the start of the bank 0 must be section-
+ * aligned, since otherwise memory would need to be
+ * allocated when mapping the start of bank 0, which
+ * occurs before any free memory is mapped.
+ */
+ if (!memblock_limit) {
+ if (!IS_ALIGNED(bank->start, SECTION_SIZE))
+ memblock_limit = bank->start;
+ else if (!IS_ALIGNED(bank_end, SECTION_SIZE))
+ memblock_limit = bank_end;
+ }
+ }
j++;
}
#ifdef CONFIG_HIGHMEM
@@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void)
#endif
meminfo.nr_banks = j;
high_memory = __va(arm_lowmem_limit - 1) + 1;
- memblock_set_current_limit(arm_lowmem_limit);
+
+ /*
+ * Round the memblock limit down to a section size. This
+ * helps to ensure that we will allocate memory from the
+ * last full section, which should be mapped.
+ */
+ if (memblock_limit)
+ memblock_limit = round_down(memblock_limit, SECTION_SIZE);
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
+ memblock_set_current_limit(memblock_limit);
}
static inline void prepare_page_table(void)
@@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors);
@@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
+#ifdef CONFIG_KUSER_HELPERS
map.type = MT_HIGH_VECTORS;
+#else
+ map.type = MT_LOW_VECTORS;
+#endif
create_mapping(&map);
if (!vectors_high()) {
map.virtual = 0;
+ map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
}
+ /* Now create a kernel read-only mapping */
+ map.pfn += 1;
+ map.virtual = 0xffff0000 + PAGE_SIZE;
+ map.length = PAGE_SIZE;
+ map.type = MT_LOW_VECTORS;
+ create_mapping(&map);
+
/*
* Ask the machine support to map in the statically mapped devices.
*/
@@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
- memblock_set_current_limit(arm_lowmem_limit);
-
build_mem_type_table();
prepare_page_table();
map_lowmem();
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 2bb61e703d6c..d1a2d05971e0 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -443,8 +443,6 @@ ENTRY(cpu_arm1020_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __CPUINIT
-
.type __arm1020_setup, #function
__arm1020_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 8f96aa40f510..9d89405c3d03 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -425,8 +425,6 @@ ENTRY(cpu_arm1020e_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __CPUINIT
-
.type __arm1020e_setup, #function
__arm1020e_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8ebe4a469a22..6f01a0ae3b30 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -407,8 +407,6 @@ ENTRY(cpu_arm1022_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __CPUINIT
-
.type __arm1022_setup, #function
__arm1022_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 093fc7e520c3..4799a24b43e6 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -396,9 +396,6 @@ ENTRY(cpu_arm1026_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
-
- __CPUINIT
-
.type __arm1026_setup, #function
__arm1026_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 0ac908c7ade1..d42c37f9f5bc 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -116,8 +116,6 @@ ENTRY(cpu_arm720_reset)
ENDPROC(cpu_arm720_reset)
.popsection
- __CPUINIT
-
.type __arm710_setup, #function
__arm710_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index fde2d2a794cf..9b0ae90cbf17 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -60,8 +60,6 @@ ENTRY(cpu_arm740_reset)
ENDPROC(cpu_arm740_reset)
.popsection
- __CPUINIT
-
.type __arm740_setup, #function
__arm740_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 6ddea3e464bd..f6cc3f63ce39 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -51,8 +51,6 @@ ENTRY(cpu_arm7tdmi_reset)
ENDPROC(cpu_arm7tdmi_reset)
.popsection
- __CPUINIT
-
.type __arm7tdmi_setup, #function
__arm7tdmi_setup:
mov pc, lr
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2556cf1c2da1..549557df6d57 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -410,8 +410,6 @@ ENTRY(cpu_arm920_do_resume)
ENDPROC(cpu_arm920_do_resume)
#endif
- __CPUINIT
-
.type __arm920_setup, #function
__arm920_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 4464c49d7449..2a758b06c6f6 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -388,8 +388,6 @@ ENTRY(cpu_arm922_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __CPUINIT
-
.type __arm922_setup, #function
__arm922_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 281eb9b9c1d6..97448c3acf38 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -438,8 +438,6 @@ ENTRY(cpu_arm925_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __CPUINIT
-
.type __arm925_setup, #function
__arm925_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 344c8a548cc0..0f098f407c9f 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -425,8 +425,6 @@ ENTRY(cpu_arm926_do_resume)
ENDPROC(cpu_arm926_do_resume)
#endif
- __CPUINIT
-
.type __arm926_setup, #function
__arm926_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 8da189d4a402..1c39a704ff6e 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -273,8 +273,6 @@ ENDPROC(arm940_dma_unmap_area)
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm940
- __CPUINIT
-
.type __arm940_setup, #function
__arm940_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f666cf34075a..0289cd905e73 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -326,8 +326,6 @@ ENTRY(cpu_arm946_dcache_clean_area)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
- __CPUINIT
-
.type __arm946_setup, #function
__arm946_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 8881391dfb9e..f51197ba754a 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -51,8 +51,6 @@ ENTRY(cpu_arm9tdmi_reset)
ENDPROC(cpu_arm9tdmi_reset)
.popsection
- __CPUINIT
-
.type __arm9tdmi_setup, #function
__arm9tdmi_setup:
mov pc, lr
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index aaeb6c127c7a..2dfc0f1d3bfd 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -135,8 +135,6 @@ ENTRY(cpu_fa526_set_pte_ext)
#endif
mov pc, lr
- __CPUINIT
-
.type __fa526_setup, #function
__fa526_setup:
/* On return of this routine, r0 must carry correct flags for CFG register */
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 4106b09e0c29..d5146b98c8d1 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -514,8 +514,6 @@ ENTRY(cpu_feroceon_set_pte_ext)
#endif
mov pc, lr
- __CPUINIT
-
.type __feroceon_setup, #function
__feroceon_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 0b60dd3d742a..40acba595731 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -383,8 +383,6 @@ ENTRY(cpu_mohawk_do_resume)
ENDPROC(cpu_mohawk_do_resume)
#endif
- __CPUINIT
-
.type __mohawk_setup, #function
__mohawk_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 775d70fba937..c45319c8f1d9 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -159,8 +159,6 @@ ENTRY(cpu_sa110_set_pte_ext)
#endif
mov pc, lr
- __CPUINIT
-
.type __sa110_setup, #function
__sa110_setup:
mov r10, #0
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index d92dfd081429..09d241ae2dbe 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -198,8 +198,6 @@ ENTRY(cpu_sa1100_do_resume)
ENDPROC(cpu_sa1100_do_resume)
#endif
- __CPUINIT
-
.type __sa1100_setup, #function
__sa1100_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 2d1ef87328a1..1128064fddcb 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -180,8 +180,6 @@ ENDPROC(cpu_v6_do_resume)
.align
- __CPUINIT
-
/*
* __v6_setup
*
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 9704097c450e..bdd3be4be77a 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
- ALT_SMP(mov pc,lr)
+ ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
@@ -160,8 +160,6 @@ ENDPROC(cpu_v7_set_pte_ext)
mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
.endm
- __CPUINIT
-
/* AT
* TFR EV X F I D LR S
* .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
@@ -172,5 +170,3 @@ ENDPROC(cpu_v7_set_pte_ext)
.type v7_crval, #object
v7_crval:
crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
-
- .previous
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 5ffe1956c6d9..01a719e18bb0 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0]
- ALT_SMP(mov pc, lr)
+ ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
@@ -140,8 +140,6 @@ ENDPROC(cpu_v7_set_pte_ext)
mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0
.endm
- __CPUINIT
-
/*
* AT
* TFR EV X F IHD LR S
@@ -153,5 +151,3 @@ ENDPROC(cpu_v7_set_pte_ext)
.type v7_crval, #object
v7_crval:
crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
-
- .previous
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7ef3ad05df39..73398bcf9bd8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area)
- ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
- ALT_UP(W(nop))
- dcache_line_size r2, r3
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
+ ALT_UP_B(1f)
+ mov pc, lr
+1: dcache_line_size r2, r3
+2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2
subs r1, r1, r2
- bhi 1b
+ bhi 2b
dsb
mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
@@ -167,8 +168,6 @@ ENDPROC(cpu_pj4b_do_idle)
#endif
- __CPUINIT
-
/*
* __v7_setup
*
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index e8efd83b6f25..dc1645890042 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -446,8 +446,6 @@ ENTRY(cpu_xsc3_do_resume)
ENDPROC(cpu_xsc3_do_resume)
#endif
- __CPUINIT
-
.type __xsc3_setup, #function
__xsc3_setup:
mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index e766f889bfd6..d19b1cfcad91 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -558,8 +558,6 @@ ENTRY(cpu_xscale_do_resume)
ENDPROC(cpu_xscale_do_resume)
#endif
- __CPUINIT
-
.type __xscale_setup, #function
__xscale_setup:
mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 3dc5cbea86cc..a5b5ff6e68d2 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -29,6 +29,13 @@ config PLAT_S5P
help
Base platform code for Samsung's S5P series SoC.
+config SAMSUNG_PM
+ bool
+ depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || S5P_PM)
+ default y
+ help
+ Base platform power management code for samsung code
+
if PLAT_SAMSUNG
# boot configurations
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 98d07d8fc7a7..199bbe304d02 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -51,7 +51,7 @@ obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
# PM support
-obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_SAMSUNG_PM) += pm.o
obj-$(CONFIG_SAMSUNG_PM_GPIO) += pm-gpio.o
obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index a62753dc15ba..df45d6edc98d 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -83,6 +83,11 @@ extern struct clk clk_ext;
extern struct clksrc_clk clk_epllref;
extern struct clksrc_clk clk_esysclk;
+/* S3C24XX UART clocks */
+extern struct clk s3c24xx_clk_uart0;
+extern struct clk s3c24xx_clk_uart1;
+extern struct clk s3c24xx_clk_uart2;
+
/* S3C64XX specific clocks */
extern struct clk clk_h2;
extern struct clk clk_27m;
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 5d47ca35cabd..6bc1a8f471e3 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -19,7 +19,7 @@
struct device;
-#ifdef CONFIG_PM
+#ifdef CONFIG_SAMSUNG_PM
extern __init int s3c_pm_init(void);
extern __init int s3c64xx_pm_init(void);
@@ -58,8 +58,6 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
/* from sleep.S */
-extern void s3c_cpu_resume(void);
-
extern int s3c2410_cpu_suspend(unsigned long);
/* sleep save info */
@@ -106,12 +104,14 @@ extern void s3c_pm_do_save(struct sleep_save *ptr, int count);
extern void s3c_pm_do_restore(struct sleep_save *ptr, int count);
extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
-#ifdef CONFIG_PM
+#ifdef CONFIG_SAMSUNG_PM
extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
+extern void s3c_cpu_resume(void);
#else
#define s3c_irq_wake NULL
#define s3c_irqext_wake NULL
+#define s3c_cpu_resume NULL
#endif
/* PM debug functions */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index ea3613642451..d0c23010b693 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -80,7 +80,7 @@ unsigned char pm_uart_udivslot;
#ifdef CONFIG_SAMSUNG_PM_DEBUG
-static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
+static struct pm_uart_save uart_save;
static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
{
@@ -101,11 +101,7 @@ static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
static void s3c_pm_save_uarts(void)
{
- struct pm_uart_save *save = uart_save;
- unsigned int uart;
-
- for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
- s3c_pm_save_uart(uart, save);
+ s3c_pm_save_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
}
static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
@@ -126,11 +122,7 @@ static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
static void s3c_pm_restore_uarts(void)
{
- struct pm_uart_save *save = uart_save;
- unsigned int uart;
-
- for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
- s3c_pm_restore_uart(uart, save);
+ s3c_pm_restore_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
}
#else
static void s3c_pm_save_uarts(void) { }
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index 1e1b2d769748..39895d892c3b 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -23,7 +23,7 @@
* observers, irrespective of whether they're taking part in coherency
* or not. This is necessary for the hotplug code to work reliably.
*/
-static void __cpuinit write_pen_release(int val)
+static void write_pen_release(int val)
{
pen_release = val;
smp_wmb();
@@ -33,7 +33,7 @@ static void __cpuinit write_pen_release(int val)
static DEFINE_SPINLOCK(boot_lock);
-void __cpuinit versatile_secondary_init(unsigned int cpu)
+void versatile_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
@@ -48,7 +48,7 @@ void __cpuinit versatile_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f71c37edca26..c9770ba5c7df 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -172,7 +172,7 @@ static void __init xen_percpu_init(void *unused)
enable_percpu_irq(xen_events_irq, 0);
}
-static void xen_restart(char str, const char *cmd)
+static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
{
struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
int rc;
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index d56ed11ba9a3..98abd476992d 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -97,7 +97,7 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline void arch_counter_set_user_access(void)
{
u32 cntkctl;
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index ef8235c68c09..a2232d07be9d 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -83,14 +83,7 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
}
#endif
-#ifdef CONFIG_COMPAT
int aarch32_break_handler(struct pt_regs *regs);
-#else
-static int aarch32_break_handler(struct pt_regs *regs)
-{
- return -EFAULT;
-}
-#endif
#endif /* __ASSEMBLY */
#endif /* __KERNEL__ */
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index a6e1750369ef..7a18fabbe0f6 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -23,6 +23,7 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <linux/reboot.h>
struct pt_regs;
@@ -41,7 +42,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
void soft_restart(unsigned long);
-extern void (*arm_pm_restart)(char str, const char *cmd);
+extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 3659e460071d..23a3c4791d86 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -24,10 +24,10 @@
#include <linux/compiler.h>
#ifndef CONFIG_ARM64_64K_PAGES
-#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE_ORDER 2
#endif
-#define THREAD_SIZE 8192
+#define THREAD_SIZE 16384
#define THREAD_START_SP (THREAD_SIZE - 16)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439827271e3d..26e310c54344 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -21,6 +21,7 @@
#define BOOT_CPU_MODE_EL2 (0x0e12b007)
#ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
/*
* __boot_cpu_mode records what mode CPUs were booted in.
@@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
void __hyp_set_vectors(phys_addr_t phys_vector_base);
phys_addr_t __hyp_get_vectors(void);
+static inline void sync_boot_mode(void)
+{
+ /*
+ * As secondaries write to __boot_cpu_mode with caches disabled, we
+ * must flush the corresponding cache entries to ensure the visibility
+ * of their writes.
+ */
+ __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
+}
+
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
{
+ sync_boot_mode();
return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
__boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
}
@@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available(void)
/* Check if the bootloader has booted CPUs in different modes */
static inline bool is_hyp_mode_mismatched(void)
{
+ sync_boot_mode();
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 08018e3df580..cbfacf7fb438 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -141,7 +141,7 @@ static void clear_os_lock(void *unused)
isb();
}
-static int __cpuinit os_lock_notify(struct notifier_block *self,
+static int os_lock_notify(struct notifier_block *self,
unsigned long action, void *data)
{
int cpu = (unsigned long)data;
@@ -150,11 +150,11 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata os_lock_nb = {
+static struct notifier_block os_lock_nb = {
.notifier_call = os_lock_notify,
};
-static int __cpuinit debug_monitors_init(void)
+static int debug_monitors_init(void)
{
/* Clear the OS lock. */
smp_call_function(clear_os_lock, NULL, 1);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1d1314280a03..6ad781b21c08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,7 +121,7 @@
.macro get_thread_info, rd
mov \rd, sp
- and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
+ and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.endm
/*
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 5ab825c59db9..329218ca9ffb 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -821,7 +821,7 @@ static void reset_ctrl_regs(void *unused)
}
}
-static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
+static int hw_breakpoint_reset_notify(struct notifier_block *self,
unsigned long action,
void *hcpu)
{
@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
+static struct notifier_block hw_breakpoint_reset_nb = {
.notifier_call = hw_breakpoint_reset_notify,
};
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 46f02c3b5015..57fb55c44c90 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -81,7 +81,7 @@ void soft_restart(unsigned long addr)
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
-void (*arm_pm_restart)(char str, const char *cmd);
+void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
EXPORT_SYMBOL_GPL(arm_pm_restart);
void arch_cpu_idle_prepare(void)
@@ -132,7 +132,7 @@ void machine_restart(char *cmd)
/* Now call the architecture specific reboot code. */
if (arm_pm_restart)
- arm_pm_restart('h', cmd);
+ arm_pm_restart(reboot_mode, cmd);
/*
* Whoops - the architecture was unable to reboot.
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 5d54e3717bf8..fee5cce83450 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -71,7 +71,7 @@ static DEFINE_RAW_SPINLOCK(boot_lock);
* in coherency or not. This is necessary for the hotplug code to work
* reliably.
*/
-static void __cpuinit write_pen_release(u64 val)
+static void write_pen_release(u64 val)
{
void *start = (void *)&secondary_holding_pen_release;
unsigned long size = sizeof(secondary_holding_pen_release);
@@ -84,7 +84,7 @@ static void __cpuinit write_pen_release(u64 val)
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
-static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
@@ -122,7 +122,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
static DECLARE_COMPLETION(cpu_running);
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
@@ -162,7 +162,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
-asmlinkage void __cpuinit secondary_start_kernel(void)
+asmlinkage void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
@@ -200,13 +200,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
raw_spin_unlock(&boot_lock);
/*
- * Enable local interrupts.
- */
- notify_cpu_starting(cpu);
- local_irq_enable();
- local_fiq_enable();
-
- /*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
* before we continue.
@@ -215,6 +208,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
complete(&cpu_running);
/*
+ * Enable GIC and timers.
+ */
+ notify_cpu_starting(cpu);
+
+ local_irq_enable();
+ local_fiq_enable();
+
+ /*
* OK, it's off to the idle thread for us
*/
cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0ecac8980aae..6c8ba25bf6bb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
#define ESR_CM (1 << 8)
#define ESR_LNX_EXEC (1 << 24)
-/*
- * Check that the permissions on the VMA allow for the fault which occurred.
- * If we encountered a write fault, we must have write permission, otherwise
- * we allow any permission.
- */
-static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
-{
- unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
-
- if (esr & ESR_WRITE)
- mask = VM_WRITE;
- if (esr & ESR_LNX_EXEC)
- mask = VM_EXEC;
-
- return vma->vm_flags & mask ? false : true;
-}
-
static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
- unsigned int esr, unsigned int flags,
+ unsigned int mm_flags, unsigned long vm_flags,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
* it.
*/
good_area:
- if (access_error(esr, vma)) {
+ /*
+ * Check that the permissions on the VMA allow for the fault which
+ * occurred. If we encountered a write or exec fault, we must have
+ * appropriate permissions, otherwise we allow any permission.
+ */
+ if (!(vma->vm_flags & vm_flags)) {
fault = VM_FAULT_BADACCESS;
goto out;
}
- return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
- bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0);
+ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+ unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ if (esr & ESR_LNX_EXEC) {
+ vm_flags = VM_EXEC;
+ } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+ vm_flags = VM_WRITE;
+ mm_flags |= FAULT_FLAG_WRITE;
+ }
tsk = current;
mm = tsk->mm;
@@ -248,7 +242,7 @@ retry:
#endif
}
- fault = __do_page_fault(mm, addr, esr, flags, tsk);
+ fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
/*
* If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ retry:
*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +274,7 @@ retry:
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
* starvation.
*/
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index e47d19ae3e06..974e55496db3 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -468,7 +468,7 @@ static void bfin_pmu_setup(int cpu)
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
}
-static int __cpuinit
+static int
bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 107b306b06f1..19ad0637e8ff 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -99,7 +99,7 @@ void __init generate_cplb_tables(void)
}
#endif
-void __cpuinit bfin_setup_caches(unsigned int cpu)
+void bfin_setup_caches(unsigned int cpu)
{
#ifdef CONFIG_BFIN_ICACHE
bfin_icache_init(icplb_tbl[cpu]);
@@ -165,7 +165,7 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
#endif
}
-void __cpuinit bfin_setup_cpudata(unsigned int cpu)
+void bfin_setup_cpudata(unsigned int cpu)
{
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index c77a23bc9de3..11789beca75a 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -48,7 +48,7 @@ int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
return -EINVAL;
}
-void __cpuinit platform_secondary_init(unsigned int cpu)
+void platform_secondary_init(unsigned int cpu)
{
/* Clone setup for peripheral interrupt sources from CoreA. */
bfin_write_SICB_IMASK0(bfin_read_SIC_IMASK0());
@@ -73,7 +73,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
+int platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
@@ -154,7 +154,7 @@ void platform_clear_ipi(unsigned int cpu, int irq)
* Setup core B's local core timer.
* In SMP, core timer is used for clock event device.
*/
-void __cpuinit bfin_local_timer_setup(void)
+void bfin_local_timer_setup(void)
{
#if defined(CONFIG_TICKSOURCE_CORETMR)
struct irq_data *data = irq_get_irq_data(IRQ_CORETMR);
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index a60a24f5035d..0e1e451fd7d8 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -52,7 +52,7 @@ bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr,
}
#ifdef CONFIG_BFIN_ICACHE
-void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
+void bfin_icache_init(struct cplb_entry *icplb_tbl)
{
bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL,
(IMC | ENICPLB));
@@ -60,7 +60,7 @@ void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
#endif
#ifdef CONFIG_BFIN_DCACHE
-void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
+void bfin_dcache_init(struct cplb_entry *dcplb_tbl)
{
/*
* Anomaly notes:
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 6c0c6816a51a..d143fd8d2bc5 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1281,7 +1281,7 @@ static struct irq_chip bfin_gpio_irqchip = {
.irq_set_wake = bfin_gpio_set_wake,
};
-void __cpuinit init_exception_vectors(void)
+void init_exception_vectors(void)
{
/* cannot program in software:
* evt0 - emulation (jtag)
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 961d8392e5e3..82f301c117a5 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -46,7 +46,7 @@ struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
unsigned long blackfin_iflush_l1_entry[NR_CPUS];
#endif
-struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
+struct blackfin_initial_pda initial_pda_coreb;
enum ipi_message_type {
BFIN_IPI_NONE,
@@ -147,7 +147,7 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
bfin_ipi_data = &__get_cpu_var(bfin_ipi);
- while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
+ while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) {
msg = 0;
do {
msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
@@ -182,8 +182,8 @@ static void bfin_ipi_init(void)
struct ipi_data *bfin_ipi_data;
for_each_possible_cpu(cpu) {
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
- bfin_ipi_data->bits = 0;
- bfin_ipi_data->count = 0;
+ atomic_set(&bfin_ipi_data->bits, 0);
+ atomic_set(&bfin_ipi_data->count, 0);
}
}
@@ -246,7 +246,7 @@ void smp_send_stop(void)
return;
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
@@ -259,7 +259,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
return ret;
}
-static void __cpuinit setup_secondary(unsigned int cpu)
+static void setup_secondary(unsigned int cpu)
{
unsigned long ilat;
@@ -277,7 +277,7 @@ static void __cpuinit setup_secondary(unsigned int cpu)
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
}
-void __cpuinit secondary_start_kernel(void)
+void secondary_start_kernel(void)
{
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(resync_core_dcache);
#endif
#ifdef CONFIG_HOTPLUG_CPU
-int __cpuexit __cpu_disable(void)
+int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
@@ -415,7 +415,7 @@ int __cpuexit __cpu_disable(void)
static DECLARE_COMPLETION(cpu_killed);
-int __cpuexit __cpu_die(unsigned int cpu)
+int __cpu_die(unsigned int cpu)
{
return wait_for_completion_timeout(&cpu_killed, 5000);
}
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index cdd12028de0c..fe8e6039db2a 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -197,7 +197,7 @@ int setup_profiling_timer(unsigned int multiplier)
*/
unsigned long cache_decay_ticks = 1;
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index ae3a6706419b..9f3a7a62d787 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -709,7 +709,7 @@ static void __init reserve_dma_coherent(void)
/*
* calibrate the delay loop
*/
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
{
loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ);
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index bfe13311d70d..29d1f1b00016 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -41,7 +41,7 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
int on_simulator;
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
{
loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
}
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 0e364ca43198..9faaa940452b 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -146,7 +146,7 @@ void __init smp_prepare_boot_cpu(void)
* to point to current thread info
*/
-void __cpuinit start_secondary(void)
+void start_secondary(void)
{
unsigned int cpu;
unsigned long thread_ptr;
@@ -194,7 +194,7 @@ void __cpuinit start_secondary(void)
* maintains control until "cpu_online(cpu)" is set.
*/
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
struct thread_info *thread = (struct thread_info *)idle->stack;
void *stack_start;
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 7913695b2fcb..efbd2929aeb7 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index f8e913365423..f64980dd20c3 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -25,7 +25,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index a5a9e02e60a0..0f4e9e41f130 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
index 37b9b422caad..b025acfde5c1 100644
--- a/arch/ia64/configs/xen_domu_defconfig
+++ b/arch/ia64/configs/xen_domu_defconfig
@@ -32,7 +32,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 0ac558adc605..bb21f4f63170 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -343,7 +343,7 @@ static void __init do_boot_cpu(int phys_id)
}
}
-int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
{
int timeout;
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 5b18888ee364..5cc4d4dcf3cf 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -813,8 +813,8 @@ static struct metag_pmu _metag_pmu = {
};
/* PMU CPU hotplug notifier */
-static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
- unsigned long action, void *hcpu)
+static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (unsigned int)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -828,7 +828,7 @@ static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata metag_pmu_notifier = {
+static struct notifier_block metag_pmu_notifier = {
.notifier_call = metag_pmu_cpu_notify,
};
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index e413875cf6d2..7c0113142981 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running);
/*
* "thread" is assumed to be a valid Meta hardware thread ID.
*/
-int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
+int boot_secondary(unsigned int thread, struct task_struct *idle)
{
u32 val;
@@ -118,11 +118,9 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
* If the cache partition has changed, prints a message to the log describing
* those changes.
*/
-static __cpuinit void describe_cachepart_change(unsigned int thread,
- const char *label,
- unsigned int sz,
- unsigned int old,
- unsigned int new)
+static void describe_cachepart_change(unsigned int thread, const char *label,
+ unsigned int sz, unsigned int old,
+ unsigned int new)
{
unsigned int lor1, land1, gor1, gand1;
unsigned int lor2, land2, gor2, gand2;
@@ -170,7 +168,7 @@ static __cpuinit void describe_cachepart_change(unsigned int thread,
* Ensures that coherency is enabled and that the threads share the same cache
* partitions.
*/
-static __cpuinit void setup_smp_cache(unsigned int thread)
+static void setup_smp_cache(unsigned int thread)
{
unsigned int this_thread, lflags;
unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
@@ -215,7 +213,7 @@ static __cpuinit void setup_smp_cache(unsigned int thread)
icpart_old, icpart_new);
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
unsigned int thread = cpu_2_hwthread_id[cpu];
int ret;
@@ -268,7 +266,7 @@ static DECLARE_COMPLETION(cpu_killed);
/*
* __cpu_disable runs on the processor to be shutdown.
*/
-int __cpuexit __cpu_disable(void)
+int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
@@ -299,7 +297,7 @@ int __cpuexit __cpu_disable(void)
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
-void __cpuexit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
{
if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
pr_err("CPU%u: unable to kill\n", cpu);
@@ -311,7 +309,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
* Note that we do not return from this function. If this cpu is
* brought online again it will need to run secondary_startup().
*/
-void __cpuexit cpu_die(void)
+void cpu_die(void)
{
local_irq_disable();
idle_task_exit();
@@ -326,7 +324,7 @@ void __cpuexit cpu_die(void)
* Called by both boot and secondaries to move global data into
* per-processor storage.
*/
-void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+void smp_store_cpu_info(unsigned int cpuid)
{
struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index c00ade0228ef..25f9d1c2ffec 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -812,7 +812,7 @@ static void set_trigger_mask(unsigned int mask)
}
#endif
-void __cpuinit per_cpu_trap_init(unsigned long cpu)
+void per_cpu_trap_init(unsigned long cpu)
{
TBIRES int_context;
unsigned int thread = cpu_2_hwthread_id[cpu];
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4758a8fd3e99..e12764c2a9d0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -114,6 +114,7 @@ config BCM47XX
select FW_CFE
select HW_HAS_PCI
select IRQ_CPU
+ select SYS_HAS_CPU_MIPS32_R1
select NO_EXCEPT_FILL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -1702,6 +1703,7 @@ endchoice
config KVM_GUEST
bool "KVM Guest Kernel"
+ depends on BROKEN_ON_SMP
help
Select this option if building a guest kernel for KVM (Trap & Emulate) mode
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 8be4e856b8b8..80f4ecd42b0d 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -182,7 +182,7 @@ const char *get_system_type(void)
return ath79_sys_type;
}
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index ba611927749b..2b8b118398c4 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -2,7 +2,6 @@ if BCM47XX
config BCM47XX_SSB
bool "SSB Support for Broadcom BCM47XX"
- select SYS_HAS_CPU_MIPS32_R1
select SSB
select SSB_DRIVER_MIPS
select SSB_DRIVER_EXTIF
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 7181def6037a..9d36774bded1 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1095,7 +1095,7 @@ static void octeon_irq_ip3_ciu(void)
static bool octeon_irq_use_ip4;
-static void __cpuinit octeon_irq_local_enable_ip4(void *arg)
+static void octeon_irq_local_enable_ip4(void *arg)
{
set_c0_status(STATUSF_IP4);
}
@@ -1110,21 +1110,21 @@ static void (*octeon_irq_ip2)(void);
static void (*octeon_irq_ip3)(void);
static void (*octeon_irq_ip4)(void);
-void __cpuinitdata (*octeon_irq_setup_secondary)(void);
+void (*octeon_irq_setup_secondary)(void);
-void __cpuinit octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
+void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
{
octeon_irq_ip4 = h;
octeon_irq_use_ip4 = true;
on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
}
-static void __cpuinit octeon_irq_percpu_enable(void)
+static void octeon_irq_percpu_enable(void)
{
irq_cpu_online();
}
-static void __cpuinit octeon_irq_init_ciu_percpu(void)
+static void octeon_irq_init_ciu_percpu(void)
{
int coreid = cvmx_get_core_num();
@@ -1167,7 +1167,7 @@ static void octeon_irq_init_ciu2_percpu(void)
cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
}
-static void __cpuinit octeon_irq_setup_secondary_ciu(void)
+static void octeon_irq_setup_secondary_ciu(void)
{
octeon_irq_init_ciu_percpu();
octeon_irq_percpu_enable();
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 7b746e7bf7a1..1830874ff1e2 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -334,9 +334,10 @@ static void __init octeon_fdt_pip_iface(int pip, int idx, u64 *pmac)
char name_buffer[20];
int iface;
int p;
- int count;
+ int count = 0;
- count = cvmx_helper_interface_enumerate(idx);
+ if (cvmx_helper_interface_enumerate(idx) == 0)
+ count = cvmx_helper_ports_on_interface(idx);
snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx);
iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 295137dfdc37..138cc80c5928 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -173,7 +173,7 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle)
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
-static void __cpuinit octeon_init_secondary(void)
+static void octeon_init_secondary(void)
{
unsigned int sr;
@@ -375,7 +375,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
return 0;
}
-static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
+static int octeon_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -394,7 +394,7 @@ static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static int __cpuinit register_cavium_notifier(void)
+static int register_cavium_notifier(void)
{
hotcpu_notifier(octeon_cpu_callback, 0);
return 0;
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 5b2f2e68e57f..9488fa5f8866 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -25,8 +25,12 @@
#else
#define CAC_BASE _AC(0x80000000, UL)
#endif
+#ifndef IO_BASE
#define IO_BASE _AC(0xa0000000, UL)
+#endif
+#ifndef UNCAC_BASE
#define UNCAC_BASE _AC(0xa0000000, UL)
+#endif
#ifndef MAP_BASE
#ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 370d967725c2..c33a9564fb41 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -13,12 +13,8 @@
#ifdef CONFIG_EXPORT_UASM
#include <linux/export.h>
-#define __uasminit
-#define __uasminitdata
#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
#else
-#define __uasminit __cpuinit
-#define __uasminitdata __cpuinitdata
#define UASM_EXPORT_SYMBOL(sym)
#endif
@@ -54,43 +50,36 @@
#endif
#define Ip_u1u2u3(op) \
-void __uasminit \
-ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \
-void __uasminit \
-ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \
-void __uasminit \
-ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \
-void __uasminit \
-ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \
-void __uasminit \
-ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
+void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_u2u1s3(op) \
-void __uasminit \
-ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2u1msbu3(op) \
-void __uasminit \
-ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
unsigned int d)
#define Ip_u1u2(op) \
-void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \
-void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
+void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
-#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a)
+#define Ip_u1(op) void ISAOPC(op)(u32 **buf, unsigned int a)
-#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf)
+#define Ip_0(op) void ISAOPC(op)(u32 **buf)
Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu);
@@ -163,7 +152,7 @@ struct uasm_label {
int lab;
};
-void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
+void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
int lid);
#ifdef CONFIG_64BIT
int ISAFUNC(uasm_in_compat_space_p)(long addr);
@@ -174,7 +163,7 @@ void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
#define UASM_L_LA(lb) \
-static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
+static inline void ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
{ \
ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
}
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index b7a23064841f..88e292b7719e 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -25,11 +25,12 @@ struct siginfo;
/*
* Careful to keep union _sifields from shifting ...
*/
-#if __SIZEOF_LONG__ == 4
+#if _MIPS_SZLONG == 32
#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
-#endif
-#if __SIZEOF_LONG__ == 8
+#elif _MIPS_SZLONG == 64
#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#else
+#error _MIPS_SZLONG neither 32 nor 64
#endif
#include <asm-generic/siginfo.h>
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 64c4fd62cf08..bd79c4f9bff4 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -28,8 +28,6 @@
.set mips0
.endm
- __CPUINIT
-
/***********************************************************************
* Alternate CPU1 startup vector for BMIPS4350
*
@@ -56,7 +54,11 @@ LEAF(bmips_smp_movevec)
/* set up CPU1 CBR; move BASE to 0xa000_0000 */
li k0, 0xff400000
mtc0 k0, $22, 6
- li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1
+ /* set up relocation vector address based on thread ID */
+ mfc0 k1, $22, 3
+ srl k1, 16
+ andi k1, 0x8000
+ or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
or k0, k1
li k1, 0xa0080000
sw k1, 0(k0)
@@ -216,8 +218,6 @@ END(bmips_smp_int_vec)
* Certain CPUs support extending kseg0 to 1024MB.
***********************************************************************/
- __CPUINIT
-
LEAF(bmips_enable_xks01)
#if defined(CONFIG_XKS01)
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 15f618b40cf6..7976457184b1 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -109,7 +109,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
-void __cpuinit sb1480_clockevent_init(void)
+void sb1480_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
index 730eaf92c018..594cbbf16d62 100644
--- a/arch/mips/kernel/cevt-gic.c
+++ b/arch/mips/kernel/cevt-gic.c
@@ -59,7 +59,7 @@ void gic_event_handler(struct clock_event_device *dev)
{
}
-int __cpuinit gic_clockevent_init(void)
+int gic_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 02033eaf8825..50d3f5a8d6bb 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -171,7 +171,7 @@ int c0_compare_int_usable(void)
}
#ifndef CONFIG_MIPS_MT_SMTC
-int __cpuinit r4k_clockevent_init(void)
+int r4k_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 200f2778bf36..5ea6d6b1de15 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -107,7 +107,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
-void __cpuinit sb1250_clockevent_init(void)
+void sb1250_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_INT_TIMER_0 + cpu;
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 9de5ed7ef1a3..b6cf0a60d896 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -248,7 +248,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
}
-int __cpuinit smtc_clockevent_init(void)
+int smtc_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 0c61df281ce6..2d80b5f1aeae 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -168,7 +168,7 @@ static inline void check_mult_sh(void)
panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
}
-static volatile int daddi_ov __cpuinitdata;
+static volatile int daddi_ov;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c7b1b3c5a761..4c6167a17875 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -27,7 +27,7 @@
#include <asm/spram.h>
#include <asm/uaccess.h>
-static int __cpuinitdata mips_fpu_disabled;
+static int mips_fpu_disabled;
static int __init fpu_disable(char *s)
{
@@ -39,7 +39,7 @@ static int __init fpu_disable(char *s)
__setup("nofpu", fpu_disable);
-int __cpuinitdata mips_dsp_disabled;
+int mips_dsp_disabled;
static int __init dsp_disable(char *s)
{
@@ -134,7 +134,7 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
#endif
}
-static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
+static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
{
switch (isa) {
case MIPS_CPU_ISA_M64R2:
@@ -159,7 +159,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
}
}
-static char unknown_isa[] __cpuinitdata = KERN_ERR \
+static char unknown_isa[] = KERN_ERR \
"Unsupported ISA type, c0.config0: %d.";
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@@ -290,7 +290,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
return config4 & MIPS_CONF_M;
}
-static void __cpuinit decode_configs(struct cpuinfo_mips *c)
+static void decode_configs(struct cpuinfo_mips *c)
{
int ok;
@@ -962,7 +962,7 @@ EXPORT_SYMBOL(__ua_limit);
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
-__cpuinit void cpu_probe(void)
+void cpu_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id();
@@ -1047,7 +1047,7 @@ __cpuinit void cpu_probe(void)
#endif
}
-__cpuinit void cpu_report(void)
+void cpu_report(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 099912324423..7b6a5b3e3acf 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -158,8 +158,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
j start_kernel
END(kernel_entry)
- __CPUINIT
-
#ifdef CONFIG_SMP
/*
* SMP slave cpus entry point. Board specific code for bootstrap calls this
@@ -188,5 +186,3 @@ NESTED(smp_bootstrap, 16, sp)
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */
-
- __FINIT
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index aea6c0885838..159abc8842d2 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -79,15 +79,9 @@ static void __init bmips_smp_setup(void)
* MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
- *
- * If booting from TP1, leave the existing CMT interrupt routing
- * such that TP0 responds to SW1 and TP1 responds to SW0.
*/
- if (boot_cpu == 0)
- change_c0_brcm_cmt_intr(0xf8018000,
+ change_c0_brcm_cmt_intr(0xf8018000,
(0x02 << 27) | (0x03 << 15));
- else
- change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27));
/* single core, 2 threads (2 pipelines) */
max_cpus = 2;
@@ -173,7 +167,7 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
else {
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
/* Reset slave TP1 if booting from TP0 */
- if (cpu_logical_map(cpu) == 0)
+ if (cpu_logical_map(cpu) == 1)
set_c0_brcm_cmt_ctrl(0x01);
#elif defined(CONFIG_CPU_BMIPS5000)
if (cpu & 0x01)
@@ -202,9 +196,15 @@ static void bmips_init_secondary(void)
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
void __iomem *cbr = BMIPS_GET_CBR();
unsigned long old_vec;
+ unsigned long relo_vector;
+ int boot_cpu;
+
+ boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
+ BMIPS_RELO_VECTOR_CONTROL_1;
- old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1);
- __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ old_vec = __raw_readl(cbr + relo_vector);
+ __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
#elif defined(CONFIG_CPU_BMIPS5000)
@@ -398,7 +398,7 @@ struct plat_smp_ops bmips_smp_ops = {
* UP BMIPS systems as well.
***********************************************************************/
-static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end)
+static void bmips_wr_vec(unsigned long dst, char *start, char *end)
{
memcpy((void *)dst, start, end - start);
dma_cache_wback((unsigned long)start, end - start);
@@ -406,7 +406,7 @@ static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end)
instruction_hazard();
}
-static inline void __cpuinit bmips_nmi_handler_setup(void)
+static inline void bmips_nmi_handler_setup(void)
{
bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
&bmips_reset_nmi_vec_end);
@@ -414,7 +414,7 @@ static inline void __cpuinit bmips_nmi_handler_setup(void)
&bmips_smp_int_vec_end);
}
-void __cpuinit bmips_ebase_setup(void)
+void bmips_ebase_setup(void)
{
unsigned long new_ebase = ebase;
void __iomem __maybe_unused *cbr;
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 3e5164c11cac..57a3f7a2b370 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -149,7 +149,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
vsmp_send_ipi_single(i, action);
}
-static void __cpuinit vsmp_init_secondary(void)
+static void vsmp_init_secondary(void)
{
#ifdef CONFIG_IRQ_GIC
/* This is Malta specific: IPI,performance and timer interrupts */
@@ -162,7 +162,7 @@ static void __cpuinit vsmp_init_secondary(void)
STATUSF_IP6 | STATUSF_IP7);
}
-static void __cpuinit vsmp_smp_finish(void)
+static void vsmp_smp_finish(void)
{
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
@@ -188,7 +188,7 @@ static void vsmp_cpus_done(void)
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
-static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
+static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 00500fea2750..7fde3e4d978f 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -28,11 +28,11 @@ static inline void up_send_ipi_mask(const struct cpumask *mask,
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
-static void __cpuinit up_init_secondary(void)
+static void up_init_secondary(void)
{
}
-static void __cpuinit up_smp_finish(void)
+static void up_smp_finish(void)
{
}
@@ -44,7 +44,7 @@ static void up_cpus_done(void)
/*
* Firmware CPU startup hook
*/
-static void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle)
+static void up_boot_secondary(int cpu, struct task_struct *idle)
{
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6e7862ab46cc..5c208ed8f856 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -86,7 +86,7 @@ static inline void set_cpu_sibling_map(int cpu)
struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops);
-__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
+void register_smp_ops(struct plat_smp_ops *ops)
{
if (mp_ops)
printk(KERN_WARNING "Overriding previously set SMP ops\n");
@@ -98,7 +98,7 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
* First C code run on the secondary CPUs after being started up by
* the master.
*/
-asmlinkage __cpuinit void start_secondary(void)
+asmlinkage void start_secondary(void)
{
unsigned int cpu;
@@ -197,7 +197,7 @@ void smp_prepare_boot_cpu(void)
cpu_set(0, cpu_callin_map);
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
mp_ops->boot_secondary(cpu, tidle);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 75a4fd709841..dfc1b911be04 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -645,7 +645,7 @@ void smtc_prepare_cpus(int cpus)
* (unsigned long)idle->thread_info the gp
*
*/
-void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
+void smtc_boot_secondary(int cpu, struct task_struct *idle)
{
extern u32 kernelsp[NR_CPUS];
unsigned long flags;
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 6af08d896e20..93f86817f20a 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -37,7 +37,7 @@
/*
* Different semantics to the set_c0_* function built by __BUILD_SET_C0
*/
-static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
+static unsigned int bis_c0_errctl(unsigned int set)
{
unsigned int res;
res = read_c0_errctl();
@@ -45,7 +45,7 @@ static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
return res;
}
-static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
+static void ispram_store_tag(unsigned int offset, unsigned int data)
{
unsigned int errctl;
@@ -64,7 +64,7 @@ static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
}
-static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
+static unsigned int ispram_load_tag(unsigned int offset)
{
unsigned int data;
unsigned int errctl;
@@ -82,7 +82,7 @@ static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
return data;
}
-static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
+static void dspram_store_tag(unsigned int offset, unsigned int data)
{
unsigned int errctl;
@@ -98,7 +98,7 @@ static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
}
-static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
+static unsigned int dspram_load_tag(unsigned int offset)
{
unsigned int data;
unsigned int errctl;
@@ -115,7 +115,7 @@ static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
return data;
}
-static __cpuinit void probe_spram(char *type,
+static void probe_spram(char *type,
unsigned int base,
unsigned int (*read)(unsigned int),
void (*write)(unsigned int, unsigned int))
@@ -196,7 +196,7 @@ static __cpuinit void probe_spram(char *type,
offset += 2 * SPRAM_TAG_STRIDE;
}
}
-void __cpuinit spram_config(void)
+void spram_config(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config0;
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 1ff43d5ac2c4..84536bf4a154 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -20,15 +20,15 @@
#include <asm/barrier.h>
#include <asm/mipsregs.h>
-static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0);
-static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
-static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
-static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
+static atomic_t count_start_flag = ATOMIC_INIT(0);
+static atomic_t count_count_start = ATOMIC_INIT(0);
+static atomic_t count_count_stop = ATOMIC_INIT(0);
+static atomic_t count_reference = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 5
-void __cpuinit synchronise_count_master(int cpu)
+void synchronise_count_master(int cpu)
{
int i;
unsigned long flags;
@@ -106,7 +106,7 @@ void __cpuinit synchronise_count_master(int cpu)
printk("done.\n");
}
-void __cpuinit synchronise_count_slave(int cpu)
+void synchronise_count_slave(int cpu)
{
int i;
unsigned int initcount;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0903d70b2cfe..aec3408edd4b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -90,7 +90,7 @@ void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void);
-void __cpuinitdata(*board_cache_error_setup)(void);
+void(*board_cache_error_setup)(void);
static void show_raw_backtrace(unsigned long reg29)
{
@@ -1242,7 +1242,6 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
(multi_match) ? "" : "not ");
- exception_exit(prev_state);
}
asmlinkage void do_mt(struct pt_regs *regs)
@@ -1682,7 +1681,7 @@ int cp0_compare_irq_shift;
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
-static int __cpuinitdata noulri;
+static int noulri;
static int __init ulri_disable(char *s)
{
@@ -1693,7 +1692,7 @@ static int __init ulri_disable(char *s)
}
__setup("noulri", ulri_disable);
-void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
+void per_cpu_trap_init(bool is_boot_cpu)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
@@ -1810,7 +1809,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
}
/* Install CPU exception handler */
-void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
+void set_handler(unsigned long offset, void *addr, unsigned long size)
{
#ifdef CONFIG_CPU_MICROMIPS
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
@@ -1820,7 +1819,7 @@ void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
local_flush_icache_range(ebase + offset, ebase + offset + size);
}
-static char panic_null_cerr[] __cpuinitdata =
+static char panic_null_cerr[] =
"Trying to set NULL cache error exception handler";
/*
@@ -1828,7 +1827,7 @@ static char panic_null_cerr[] __cpuinitdata =
* This is suitable only for the cache error exception which is the only
* exception handler that is being run uncached.
*/
-void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
+void set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{
unsigned long uncached_ebase = CKSEG1ADDR(ebase);
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index cbdc4de85bb4..2a03abb5bd2c 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -100,7 +100,7 @@ void mips_clear_watch_registers(void)
}
}
-__cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
+void mips_probe_watch_registers(struct cpuinfo_mips *c)
{
unsigned int t;
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 2c15590e55f7..30e334e823bd 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -5,7 +5,6 @@ source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
- depends on HAVE_KVM
---help---
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 51194875f158..eb3e18659630 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -461,7 +461,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
return 0;
}
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
return MIPS_CPU_TIMER_IRQ;
}
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index 65e3dfc4e585..d8522f8e842a 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -36,7 +36,7 @@
* values, so we can avoid sharing the same stack area between a cached
* and the uncached mode.
*/
-unsigned long __cpuinit run_uncached(void *func)
+unsigned long run_uncached(void *func)
{
register long sp __asm__("$sp");
register long ret __asm__("$2");
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 8557fb552863..a0bcdbb81d41 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -180,7 +180,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
* Probe Octeon's caches
*
*/
-static void __cpuinit probe_octeon(void)
+static void probe_octeon(void)
{
unsigned long icache_size;
unsigned long dcache_size;
@@ -251,7 +251,7 @@ static void __cpuinit probe_octeon(void)
}
}
-static void __cpuinit octeon_cache_error_setup(void)
+static void octeon_cache_error_setup(void)
{
extern char except_vec2_octeon;
set_handler(0x100, &except_vec2_octeon, 0x80);
@@ -261,7 +261,7 @@ static void __cpuinit octeon_cache_error_setup(void)
* Setup the Octeon cache flush routines
*
*/
-void __cpuinit octeon_cache_init(void)
+void octeon_cache_init(void)
{
probe_octeon();
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 704dc735a59d..2fcde0c8ea02 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -26,7 +26,7 @@
static unsigned long icache_size, dcache_size; /* Size in bytes */
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
-unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
+unsigned long r3k_cache_size(unsigned long ca_flags)
{
unsigned long flags, status, dummy, size;
volatile unsigned long *p;
@@ -61,7 +61,7 @@ unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
return size * sizeof(*p);
}
-unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
+unsigned long r3k_cache_lsize(unsigned long ca_flags)
{
unsigned long flags, status, lsize, i;
volatile unsigned long *p;
@@ -90,7 +90,7 @@ unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
return lsize * sizeof(*p);
}
-static void __cpuinit r3k_probe_cache(void)
+static void r3k_probe_cache(void)
{
dcache_size = r3k_cache_size(ST0_ISC);
if (dcache_size)
@@ -312,7 +312,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
r3k_flush_dcache_range(start, start + size);
}
-void __cpuinit r3k_cache_init(void)
+void r3k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 21813beec7a5..f749f687ee87 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -107,7 +107,7 @@ static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
blast_dcache64_page(addr);
}
-static void __cpuinit r4k_blast_dcache_page_setup(void)
+static void r4k_blast_dcache_page_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
@@ -123,7 +123,7 @@ static void __cpuinit r4k_blast_dcache_page_setup(void)
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
-static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
+static void r4k_blast_dcache_page_indexed_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
@@ -140,7 +140,7 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
void (* r4k_blast_dcache)(void);
EXPORT_SYMBOL(r4k_blast_dcache);
-static void __cpuinit r4k_blast_dcache_setup(void)
+static void r4k_blast_dcache_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
@@ -227,7 +227,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
static void (* r4k_blast_icache_page)(unsigned long addr);
-static void __cpuinit r4k_blast_icache_page_setup(void)
+static void r4k_blast_icache_page_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -244,7 +244,7 @@ static void __cpuinit r4k_blast_icache_page_setup(void)
static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
-static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
+static void r4k_blast_icache_page_indexed_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -269,7 +269,7 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
void (* r4k_blast_icache)(void);
EXPORT_SYMBOL(r4k_blast_icache);
-static void __cpuinit r4k_blast_icache_setup(void)
+static void r4k_blast_icache_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -290,7 +290,7 @@ static void __cpuinit r4k_blast_icache_setup(void)
static void (* r4k_blast_scache_page)(unsigned long addr);
-static void __cpuinit r4k_blast_scache_page_setup(void)
+static void r4k_blast_scache_page_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -308,7 +308,7 @@ static void __cpuinit r4k_blast_scache_page_setup(void)
static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
-static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
+static void r4k_blast_scache_page_indexed_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -326,7 +326,7 @@ static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
static void (* r4k_blast_scache)(void);
-static void __cpuinit r4k_blast_scache_setup(void)
+static void r4k_blast_scache_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -797,11 +797,11 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c)
}
}
-static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
+static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
};
-static void __cpuinit probe_pcache(void)
+static void probe_pcache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
@@ -1119,7 +1119,7 @@ static void __cpuinit probe_pcache(void)
* executes in KSEG1 space or else you will crash and burn badly. You have
* been warned.
*/
-static int __cpuinit probe_scache(void)
+static int probe_scache(void)
{
unsigned long flags, addr, begin, end, pow2;
unsigned int config = read_c0_config();
@@ -1196,7 +1196,7 @@ extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
extern int mips_sc_init(void);
-static void __cpuinit setup_scache(void)
+static void setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
@@ -1329,7 +1329,7 @@ static void nxp_pr4450_fixup_config(void)
NXP_BARRIER();
}
-static int __cpuinitdata cca = -1;
+static int cca = -1;
static int __init cca_setup(char *str)
{
@@ -1340,7 +1340,7 @@ static int __init cca_setup(char *str)
early_param("cca", cca_setup);
-static void __cpuinit coherency_setup(void)
+static void coherency_setup(void)
{
if (cca < 0 || cca > 7)
cca = read_c0_config() & CONF_CM_CMASK;
@@ -1380,7 +1380,7 @@ static void __cpuinit coherency_setup(void)
}
}
-static void __cpuinit r4k_cache_error_setup(void)
+static void r4k_cache_error_setup(void)
{
extern char __weak except_vec2_generic;
extern char __weak except_vec2_sb1;
@@ -1398,7 +1398,7 @@ static void __cpuinit r4k_cache_error_setup(void)
}
}
-void __cpuinit r4k_cache_init(void)
+void r4k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index ba9da270289f..8d909dbbf37f 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -344,7 +344,7 @@ static __init void tx39_probe_cache(void)
}
}
-void __cpuinit tx39_cache_init(void)
+void tx39_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 5aeb3eb0b72f..15f813c303b4 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -182,7 +182,7 @@ static inline void setup_protection_map(void)
}
}
-void __cpuinit cpu_cache_init(void)
+void cpu_cache_init(void)
{
if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index fe1d887e8d70..191cf6e0c725 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -49,8 +49,6 @@
* (0x170-0x17f) are used to preserve k0, k1, and ra.
*/
- __CPUINIT
-
LEAF(except_vec2_sb1)
/*
* If this error is recoverable, we need to exit the handler
@@ -142,8 +140,6 @@ unrecoverable:
END(except_vec2_sb1)
- __FINIT
-
LEAF(handle_vec2_sb1)
mfc0 k0,CP0_CONFIG
li k1,~CONF_CM_CMASK
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 2c0bd580b9da..218c2109a55d 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -66,29 +66,29 @@ UASM_L_LA(_copy_pref_both)
UASM_L_LA(_copy_pref_store)
/* We need one branch and therefore one relocation per target label. */
-static struct uasm_label __cpuinitdata labels[5];
-static struct uasm_reloc __cpuinitdata relocs[5];
+static struct uasm_label labels[5];
+static struct uasm_reloc relocs[5];
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
-static int pref_bias_clear_store __cpuinitdata;
-static int pref_bias_copy_load __cpuinitdata;
-static int pref_bias_copy_store __cpuinitdata;
+static int pref_bias_clear_store;
+static int pref_bias_copy_load;
+static int pref_bias_copy_store;
-static u32 pref_src_mode __cpuinitdata;
-static u32 pref_dst_mode __cpuinitdata;
+static u32 pref_src_mode;
+static u32 pref_dst_mode;
-static int clear_word_size __cpuinitdata;
-static int copy_word_size __cpuinitdata;
+static int clear_word_size;
+static int copy_word_size;
-static int half_clear_loop_size __cpuinitdata;
-static int half_copy_loop_size __cpuinitdata;
+static int half_clear_loop_size;
+static int half_copy_loop_size;
-static int cache_line_size __cpuinitdata;
+static int cache_line_size;
#define cache_line_mask() (cache_line_size - 1)
-static inline void __cpuinit
+static inline void
pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
{
if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
@@ -108,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
}
}
-static void __cpuinit set_prefetch_parameters(void)
+static void set_prefetch_parameters(void)
{
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
clear_word_size = 8;
@@ -199,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void)
4 * copy_word_size));
}
-static void __cpuinit build_clear_store(u32 **buf, int off)
+static void build_clear_store(u32 **buf, int off)
{
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
uasm_i_sd(buf, ZERO, off, A0);
@@ -208,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off)
}
}
-static inline void __cpuinit build_clear_pref(u32 **buf, int off)
+static inline void build_clear_pref(u32 **buf, int off)
{
if (off & cache_line_mask())
return;
@@ -240,7 +240,7 @@ extern u32 __clear_page_end;
extern u32 __copy_page_start;
extern u32 __copy_page_end;
-void __cpuinit build_clear_page(void)
+void build_clear_page(void)
{
int off;
u32 *buf = &__clear_page_start;
@@ -333,7 +333,7 @@ void __cpuinit build_clear_page(void)
pr_debug("\t.set pop\n");
}
-static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
+static void build_copy_load(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
uasm_i_ld(buf, reg, off, A1);
@@ -342,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
}
}
-static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
+static void build_copy_store(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
uasm_i_sd(buf, reg, off, A0);
@@ -387,7 +387,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
}
}
-void __cpuinit build_copy_page(void)
+void build_copy_page(void)
{
int off;
u32 *buf = &__copy_page_start;
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index c6aaed934d53..dc7c5a5214a9 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -167,7 +167,7 @@ static struct bcache_ops indy_sc_ops = {
.bc_inv = indy_sc_wback_invalidate
};
-void __cpuinit indy_sc_init(void)
+void indy_sc_init(void)
{
if (indy_sc_probe()) {
indy_sc_enable();
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index df96da7e939b..5d01392e3518 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -132,7 +132,7 @@ static inline int __init mips_sc_probe(void)
return 1;
}
-int __cpuinit mips_sc_init(void)
+int mips_sc_init(void)
{
int found = mips_sc_probe();
if (found) {
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8bc67720e145..0216ed6eaa2a 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -98,7 +98,7 @@ static struct bcache_ops r5k_sc_ops = {
.bc_inv = r5k_dma_cache_inv_sc
};
-void __cpuinit r5k_sc_init(void)
+void r5k_sc_init(void)
{
if (r5k_sc_probe()) {
r5k_sc_enable();
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 274af3be1442..aaffbba33706 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -104,7 +104,7 @@ static void blast_rm7k_tcache(void)
/*
* This function is executed in uncached address space.
*/
-static __cpuinit void __rm7k_tc_enable(void)
+static void __rm7k_tc_enable(void)
{
int i;
@@ -117,7 +117,7 @@ static __cpuinit void __rm7k_tc_enable(void)
cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
}
-static __cpuinit void rm7k_tc_enable(void)
+static void rm7k_tc_enable(void)
{
if (read_c0_config() & RM7K_CONF_TE)
return;
@@ -130,7 +130,7 @@ static __cpuinit void rm7k_tc_enable(void)
/*
* This function is executed in uncached address space.
*/
-static __cpuinit void __rm7k_sc_enable(void)
+static void __rm7k_sc_enable(void)
{
int i;
@@ -143,7 +143,7 @@ static __cpuinit void __rm7k_sc_enable(void)
cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
}
-static __cpuinit void rm7k_sc_enable(void)
+static void rm7k_sc_enable(void)
{
if (read_c0_config() & RM7K_CONF_SE)
return;
@@ -184,7 +184,7 @@ static struct bcache_ops rm7k_sc_ops = {
* This is a probing function like the one found in c-r4k.c, we look for the
* wrap around point with different addresses.
*/
-static __cpuinit void __probe_tcache(void)
+static void __probe_tcache(void)
{
unsigned long flags, addr, begin, end, pow2;
@@ -226,7 +226,7 @@ static __cpuinit void __probe_tcache(void)
local_irq_restore(flags);
}
-void __cpuinit rm7k_sc_init(void)
+void rm7k_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..9aca10994cd2 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -276,7 +276,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
}
}
-void __cpuinit tlb_init(void)
+void tlb_init(void)
{
local_flush_tlb_all();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c643de4c473a..00b26a67a06d 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -389,7 +389,7 @@ int __init has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static int __cpuinitdata ntlb;
+static int ntlb;
static int __init set_ntlb(char *str)
{
get_option(&str, &ntlb);
@@ -398,7 +398,7 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb);
-void __cpuinit tlb_init(void)
+void tlb_init(void)
{
/*
* You should never change this register:
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..6a99733a4440 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -213,14 +213,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_restore(flags);
}
-static void __cpuinit probe_tlb(unsigned long config)
+static void probe_tlb(unsigned long config)
{
struct cpuinfo_mips *c = &current_cpu_data;
c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
}
-void __cpuinit tlb_init(void)
+void tlb_init(void)
{
unsigned int config = read_c0_config();
unsigned long status;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9ab0f907a52c..556cb4815770 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -136,7 +136,7 @@ static int scratchpad_offset(int i)
* why; it's not an issue caused by the core RTL.
*
*/
-static int __cpuinit m4kc_tlbp_war(void)
+static int m4kc_tlbp_war(void)
{
return (current_cpu_data.processor_id & 0xffff00) ==
(PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault)
UASM_L_LA(_tlb_huge_update)
#endif
-static int __cpuinitdata hazard_instance;
+static int hazard_instance;
-static void __cpuinit uasm_bgezl_hazard(u32 **p,
- struct uasm_reloc **r,
- int instance)
+static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
{
switch (instance) {
case 0 ... 7:
@@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p,
}
}
-static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
- u32 **p,
- int instance)
+static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
{
switch (instance) {
case 0 ... 7:
@@ -295,15 +291,15 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun
* We deliberately chose a buffer size of 128, so we won't scribble
* over anything important on overflow before we panic.
*/
-static u32 tlb_handler[128] __cpuinitdata;
+static u32 tlb_handler[128];
/* simply assume worst case size for labels and relocs */
-static struct uasm_label labels[128] __cpuinitdata;
-static struct uasm_reloc relocs[128] __cpuinitdata;
+static struct uasm_label labels[128];
+static struct uasm_reloc relocs[128];
-static int check_for_high_segbits __cpuinitdata;
+static int check_for_high_segbits;
-static unsigned int kscratch_used_mask __cpuinitdata;
+static unsigned int kscratch_used_mask;
static inline int __maybe_unused c0_kscratch(void)
{
@@ -316,7 +312,7 @@ static inline int __maybe_unused c0_kscratch(void)
}
}
-static int __cpuinit allocate_kscratch(void)
+static int allocate_kscratch(void)
{
int r;
unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
@@ -333,11 +329,11 @@ static int __cpuinit allocate_kscratch(void)
return r;
}
-static int scratch_reg __cpuinitdata;
-static int pgd_reg __cpuinitdata;
+static int scratch_reg;
+static int pgd_reg;
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
-static struct work_registers __cpuinit build_get_work_registers(u32 **p)
+static struct work_registers build_get_work_registers(u32 **p)
{
struct work_registers r;
@@ -393,7 +389,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
return r;
}
-static void __cpuinit build_restore_work_registers(u32 **p)
+static void build_restore_work_registers(u32 **p)
{
if (scratch_reg >= 0) {
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
@@ -418,7 +414,7 @@ extern unsigned long pgd_current[];
/*
* The R3000 TLB handler is simple.
*/
-static void __cpuinit build_r3000_tlb_refill_handler(void)
+static void build_r3000_tlb_refill_handler(void)
{
long pgdc = (long)pgd_current;
u32 *p;
@@ -463,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
* other one.To keep things simple, we first assume linear space,
* then we relocate it to the final handler layout as needed.
*/
-static u32 final_handler[64] __cpuinitdata;
+static u32 final_handler[64];
/*
* Hazards
@@ -487,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata;
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
-static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
+static void __maybe_unused build_tlb_probe_entry(u32 **p)
{
switch (current_cpu_type()) {
/* Found by experiment: R4600 v2.0/R4700 needs this, too. */
@@ -511,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
*/
enum tlb_write_entry { tlb_random, tlb_indexed };
-static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
- struct uasm_reloc **r,
- enum tlb_write_entry wmode)
+static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ enum tlb_write_entry wmode)
{
void(*tlbw)(u32 **) = NULL;
@@ -647,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
}
}
-static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
- unsigned int reg)
+static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+ unsigned int reg)
{
if (cpu_has_rixi) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -663,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-static __cpuinit void build_restore_pagemask(u32 **p,
- struct uasm_reloc **r,
- unsigned int tmp,
- enum label_id lid,
- int restore_scratch)
+static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
+ unsigned int tmp, enum label_id lid,
+ int restore_scratch)
{
if (restore_scratch) {
/* Reset default page size */
@@ -706,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p,
}
}
-static __cpuinit void build_huge_tlb_write_entry(u32 **p,
- struct uasm_label **l,
- struct uasm_reloc **r,
- unsigned int tmp,
- enum tlb_write_entry wmode,
- int restore_scratch)
+static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ unsigned int tmp,
+ enum tlb_write_entry wmode,
+ int restore_scratch)
{
/* Set huge page tlb entry size */
uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
@@ -726,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
/*
* Check if Huge PTE is present, if so then jump to LABEL.
*/
-static void __cpuinit
+static void
build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
- unsigned int pmd, int lid)
+ unsigned int pmd, int lid)
{
UASM_i_LW(p, tmp, 0, pmd);
if (use_bbit_insns()) {
@@ -739,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
}
}
-static __cpuinit void build_huge_update_entries(u32 **p,
- unsigned int pte,
- unsigned int tmp)
+static void build_huge_update_entries(u32 **p, unsigned int pte,
+ unsigned int tmp)
{
int small_sequence;
@@ -771,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p,
UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
}
-static __cpuinit void build_huge_handler_tail(u32 **p,
- struct uasm_reloc **r,
- struct uasm_label **l,
- unsigned int pte,
- unsigned int ptr)
+static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+ struct uasm_label **l,
+ unsigned int pte,
+ unsigned int ptr)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
@@ -794,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
*/
-static void __cpuinit
+static void
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr)
{
@@ -886,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc.
*/
-static void __cpuinit
+static void
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int bvaddr, unsigned int ptr,
enum vmalloc64_mode mode)
@@ -956,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry.
*/
-static void __cpuinit __maybe_unused
+static void __maybe_unused
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
long pgdc = (long)pgd_current;
@@ -991,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#endif /* !CONFIG_64BIT */
-static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
+static void build_adjust_context(u32 **p, unsigned int ctx)
{
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1017,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
uasm_i_andi(p, ctx, ctx, mask);
}
-static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
/*
* Bug workaround for the Nevada. It seems as if under certain
@@ -1042,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
}
-static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
- unsigned int ptep)
+static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{
/*
* 64bit address support (36bit on a 32bit CPU) in a 32bit
@@ -1104,7 +1094,7 @@ struct mips_huge_tlb_info {
int restore_scratch;
};
-static struct mips_huge_tlb_info __cpuinit
+static struct mips_huge_tlb_info
build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr, int c0_scratch_reg)
@@ -1282,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
*/
#define MIPS64_REFILL_INSNS 32
-static void __cpuinit build_r4000_tlb_refill_handler(void)
+static void build_r4000_tlb_refill_handler(void)
{
u32 *p = tlb_handler;
struct uasm_label *l = labels;
@@ -1462,11 +1452,11 @@ extern u32 handle_tlbm[], handle_tlbm_end[];
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
-static void __cpuinit build_r4000_setup_pgd(void)
+static void build_r4000_setup_pgd(void)
{
const int a0 = 4;
const int a1 = 5;
- u32 *p = tlbmiss_handler_setup_pgd_array;
+ u32 *p = tlbmiss_handler_setup_pgd;
const int tlbmiss_handler_setup_pgd_size =
tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
struct uasm_label *l = labels;
@@ -1513,7 +1503,7 @@ static void __cpuinit build_r4000_setup_pgd(void)
}
#endif
-static void __cpuinit
+static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
@@ -1533,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
#endif
}
-static void __cpuinit
+static void
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int mode)
{
@@ -1593,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
* the page table where this PTE is located, PTE will be re-loaded
* with it's original value.
*/
-static void __cpuinit
+static void
build_pte_present(u32 **p, struct uasm_reloc **r,
int pte, int ptr, int scratch, enum label_id lid)
{
@@ -1621,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
}
/* Make PTE valid, store result in PTR. */
-static void __cpuinit
+static void
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr)
{
@@ -1634,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be written to, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
-static void __cpuinit
+static void
build_pte_writable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid)
@@ -1654,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
/* Make PTE writable, update software status bits as well, then store
* at PTR.
*/
-static void __cpuinit
+static void
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr)
{
@@ -1668,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be modified, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
-static void __cpuinit
+static void
build_pte_modifiable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid)
@@ -1697,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
* This places the pte into ENTRYLO0 and writes it with tlbwi.
* Then it returns.
*/
-static void __cpuinit
+static void
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
{
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@@ -1713,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
* may have the probe fail bit set as a result of a trap on a
* kseg2 access, i.e. without refill. Then it returns.
*/
-static void __cpuinit
+static void
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte,
unsigned int tmp)
@@ -1731,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
uasm_i_rfe(p); /* branch delay */
}
-static void __cpuinit
+static void
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
unsigned int ptr)
{
@@ -1751,7 +1741,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
uasm_i_tlbp(p); /* load delay */
}
-static void __cpuinit build_r3000_tlb_load_handler(void)
+static void build_r3000_tlb_load_handler(void)
{
u32 *p = handle_tlbl;
const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -1782,7 +1772,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
}
-static void __cpuinit build_r3000_tlb_store_handler(void)
+static void build_r3000_tlb_store_handler(void)
{
u32 *p = handle_tlbs;
const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -1803,7 +1793,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if (p >= handle_tlbs)
+ if (p >= handle_tlbs_end)
panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
@@ -1813,7 +1803,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
}
-static void __cpuinit build_r3000_tlb_modify_handler(void)
+static void build_r3000_tlb_modify_handler(void)
{
u32 *p = handle_tlbm;
const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -1848,7 +1838,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
/*
* R4000 style TLB load/store/modify handlers.
*/
-static struct work_registers __cpuinit
+static struct work_registers
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
struct uasm_reloc **r)
{
@@ -1884,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
return wr;
}
-static void __cpuinit
+static void
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr)
@@ -1902,7 +1892,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
#endif
}
-static void __cpuinit build_r4000_tlb_load_handler(void)
+static void build_r4000_tlb_load_handler(void)
{
u32 *p = handle_tlbl;
const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -2085,7 +2075,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
}
-static void __cpuinit build_r4000_tlb_store_handler(void)
+static void build_r4000_tlb_store_handler(void)
{
u32 *p = handle_tlbs;
const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -2140,7 +2130,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
}
-static void __cpuinit build_r4000_tlb_modify_handler(void)
+static void build_r4000_tlb_modify_handler(void)
{
u32 *p = handle_tlbm;
const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -2196,7 +2186,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
}
-static void __cpuinit flush_tlb_handlers(void)
+static void flush_tlb_handlers(void)
{
local_flush_icache_range((unsigned long)handle_tlbl,
(unsigned long)handle_tlbl_end);
@@ -2210,7 +2200,7 @@ static void __cpuinit flush_tlb_handlers(void)
#endif
}
-void __cpuinit build_tlb_refill_handler(void)
+void build_tlb_refill_handler(void)
{
/*
* The refill handler is generated per-CPU, multi-node systems
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 162ee6d62788..060000fa653c 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -49,7 +49,7 @@
#include "uasm.c"
-static struct insn insn_table_MM[] __uasminitdata = {
+static struct insn insn_table_MM[] = {
{ insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
{ insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
@@ -118,7 +118,7 @@ static struct insn insn_table_MM[] __uasminitdata = {
#undef M
-static inline __uasminit u32 build_bimm(s32 arg)
+static inline u32 build_bimm(s32 arg)
{
WARN(arg > 0xffff || arg < -0x10000,
KERN_WARNING "Micro-assembler field overflow\n");
@@ -128,7 +128,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
}
-static inline __uasminit u32 build_jimm(u32 arg)
+static inline u32 build_jimm(u32 arg)
{
WARN(arg & ~((JIMM_MASK << 2) | 1),
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+static void build_insn(u32 **buf, enum opcode opc, ...)
{
struct insn *ip = NULL;
unsigned int i;
@@ -199,7 +199,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
(*buf)++;
}
-static inline void __uasminit
+static inline void
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 5fcdd8fe3e83..0c724589854e 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -49,7 +49,7 @@
#include "uasm.c"
-static struct insn insn_table[] __uasminitdata = {
+static struct insn insn_table[] = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
@@ -119,7 +119,7 @@ static struct insn insn_table[] __uasminitdata = {
#undef M
-static inline __uasminit u32 build_bimm(s32 arg)
+static inline u32 build_bimm(s32 arg)
{
WARN(arg > 0x1ffff || arg < -0x20000,
KERN_WARNING "Micro-assembler field overflow\n");
@@ -129,7 +129,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
}
-static inline __uasminit u32 build_jimm(u32 arg)
+static inline u32 build_jimm(u32 arg)
{
WARN(arg & ~(JIMM_MASK << 2),
KERN_WARNING "Micro-assembler field overflow\n");
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+static void build_insn(u32 **buf, enum opcode opc, ...)
{
struct insn *ip = NULL;
unsigned int i;
@@ -187,7 +187,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
(*buf)++;
}
-static inline void __uasminit
+static inline void
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 7eb5e4355d25..b9d14b6c7f58 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -63,35 +63,35 @@ struct insn {
enum fields fields;
};
-static inline __uasminit u32 build_rs(u32 arg)
+static inline u32 build_rs(u32 arg)
{
WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RS_MASK) << RS_SH;
}
-static inline __uasminit u32 build_rt(u32 arg)
+static inline u32 build_rt(u32 arg)
{
WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RT_MASK) << RT_SH;
}
-static inline __uasminit u32 build_rd(u32 arg)
+static inline u32 build_rd(u32 arg)
{
WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RD_MASK) << RD_SH;
}
-static inline __uasminit u32 build_re(u32 arg)
+static inline u32 build_re(u32 arg)
{
WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RE_MASK) << RE_SH;
}
-static inline __uasminit u32 build_simm(s32 arg)
+static inline u32 build_simm(s32 arg)
{
WARN(arg > 0x7fff || arg < -0x8000,
KERN_WARNING "Micro-assembler field overflow\n");
@@ -99,14 +99,14 @@ static inline __uasminit u32 build_simm(s32 arg)
return arg & 0xffff;
}
-static inline __uasminit u32 build_uimm(u32 arg)
+static inline u32 build_uimm(u32 arg)
{
WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & IMM_MASK;
}
-static inline __uasminit u32 build_scimm(u32 arg)
+static inline u32 build_scimm(u32 arg)
{
WARN(arg & ~SCIMM_MASK,
KERN_WARNING "Micro-assembler field overflow\n");
@@ -114,21 +114,21 @@ static inline __uasminit u32 build_scimm(u32 arg)
return (arg & SCIMM_MASK) << SCIMM_SH;
}
-static inline __uasminit u32 build_func(u32 arg)
+static inline u32 build_func(u32 arg)
{
WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & FUNC_MASK;
}
-static inline __uasminit u32 build_set(u32 arg)
+static inline u32 build_set(u32 arg)
{
WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & SET_MASK;
}
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
+static void build_insn(u32 **buf, enum opcode opc, ...);
#define I_u1u2u3(op) \
Ip_u1u2u3(op) \
@@ -286,7 +286,7 @@ I_u3u1u2(_ldx)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
-void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
+void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -304,7 +304,7 @@ I_u2s3u1(_pref)
#endif
/* Handle labels. */
-void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
+void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
@@ -312,7 +312,7 @@ void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, in
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
-int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
+int ISAFUNC(uasm_in_compat_space_p)(long addr)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
@@ -323,7 +323,7 @@ int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
-static int __uasminit uasm_rel_highest(long val)
+static int uasm_rel_highest(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -332,7 +332,7 @@ static int __uasminit uasm_rel_highest(long val)
#endif
}
-static int __uasminit uasm_rel_higher(long val)
+static int uasm_rel_higher(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -341,19 +341,19 @@ static int __uasminit uasm_rel_higher(long val)
#endif
}
-int __uasminit ISAFUNC(uasm_rel_hi)(long val)
+int ISAFUNC(uasm_rel_hi)(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
-int __uasminit ISAFUNC(uasm_rel_lo)(long val)
+int ISAFUNC(uasm_rel_lo)(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
-void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
+void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
{
if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
@@ -371,7 +371,7 @@ void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
}
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
-void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
+void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
{
ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
if (ISAFUNC(uasm_rel_lo(addr))) {
@@ -386,8 +386,7 @@ void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
/* Handle relocations. */
-void __uasminit
-ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
+void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
@@ -396,11 +395,11 @@ ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
-static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
+static inline void __resolve_relocs(struct uasm_reloc *rel,
+ struct uasm_label *lab);
-void __uasminit
-ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
+void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
+ struct uasm_label *lab)
{
struct uasm_label *l;
@@ -411,8 +410,8 @@ ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
-void __uasminit
-ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
+ long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
@@ -420,8 +419,8 @@ ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
-void __uasminit
-ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
+void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
+ long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
@@ -429,9 +428,8 @@ ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
-void __uasminit
-ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
- u32 *end, u32 *target)
+void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
+ u32 *first, u32 *end, u32 *target)
{
long off = (long)(target - first);
@@ -442,7 +440,7 @@ ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
-int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
+int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
@@ -456,83 +454,79 @@ int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
/* Convenience functions for labeled branches. */
-void __uasminit
-ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bltz)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
-void __uasminit
-ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
+void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_b)(p, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
-void __uasminit
-ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beqz)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
-void __uasminit
-ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beqzl)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
-void __uasminit
-ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
- unsigned int reg2, int lid)
+void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
-void __uasminit
-ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bnez)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
-void __uasminit
-ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bgezl)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
-void __uasminit
-ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bgez)(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
-void __uasminit
-ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
- unsigned int bit, int lid)
+void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
-void __uasminit
-ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
- unsigned int bit, int lid)
+void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index becbf47506a5..c4849904f013 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -32,7 +32,7 @@ static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
/*
* Post-config but pre-boot cleanup entry point
*/
-static void __cpuinit msmtc_init_secondary(void)
+static void msmtc_init_secondary(void)
{
int myvpe;
@@ -53,7 +53,7 @@ static void __cpuinit msmtc_init_secondary(void)
/*
* Platform "CPU" startup hook
*/
-static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
+static void msmtc_boot_secondary(int cpu, struct task_struct *idle)
{
smtc_boot_secondary(cpu, idle);
}
@@ -61,7 +61,7 @@ static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
/*
* SMP initialization finalization entry point
*/
-static void __cpuinit msmtc_smp_finish(void)
+static void msmtc_smp_finish(void)
{
smtc_smp_finish();
}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 0ad305f75802..53aad4a35375 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -150,7 +150,7 @@ static void __init plat_perf_setup(void)
}
}
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
#ifdef MSC01E_INT_BASE
if (cpu_has_veic) {
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 96b42eb9b5e2..a43ea3cc0a3b 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -91,7 +91,7 @@ static void __init plat_perf_setup(void)
}
}
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
if (cpu_has_vint)
set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 73facb2b33bb..1c7e3a1b81ab 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -40,6 +40,10 @@
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
#include <asm/errno.h>
#include <asm/signal.h>
#include <asm/ptrace.h>
@@ -223,17 +227,6 @@ static void nlm_init_node_irqs(int node)
nodep->irqmask = irqmask;
}
-void __init arch_init_irq(void)
-{
- /* Initialize the irq descriptors */
- nlm_init_percpu_irqs();
- nlm_init_node_irqs(0);
- write_c0_eimr(nlm_current_node()->irqmask);
-#if defined(CONFIG_CPU_XLR)
- nlm_setup_fmn_irq();
-#endif
-}
-
void nlm_smp_irq_init(int hwcpuid)
{
int node, cpu;
@@ -266,3 +259,56 @@ asmlinkage void plat_irq_dispatch(void)
/* top level irq handling */
do_IRQ(nlm_irq_to_xirq(node, i));
}
+
+#ifdef CONFIG_OF
+static struct irq_domain *xlp_pic_domain;
+
+static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init xlp_of_pic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ const int n_picirqs = PIC_IRT_LAST_IRQ - PIC_IRQ_BASE + 1;
+ struct resource res;
+ int socid, ret;
+
+ /* we need a hack to get the PIC's SoC chip id */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret < 0) {
+ pr_err("PIC %s: reg property not found!\n", node->name);
+ return -EINVAL;
+ }
+ socid = (res.start >> 18) & 0x3;
+ xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
+ nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
+ &xlp_pic_irq_domain_ops, NULL);
+ if (xlp_pic_domain == NULL) {
+ pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
+ return -EINVAL;
+ }
+ pr_info("Node %d: IRQ domain created for PIC@%pa\n", socid,
+ &res.start);
+ return 0;
+}
+
+static struct of_device_id __initdata xlp_pic_irq_ids[] = {
+ { .compatible = "netlogic,xlp-pic", .data = xlp_of_pic_init },
+ {},
+};
+#endif
+
+void __init arch_init_irq(void)
+{
+ /* Initialize the irq descriptors */
+ nlm_init_percpu_irqs();
+ nlm_init_node_irqs(0);
+ write_c0_eimr(nlm_current_node()->irqmask);
+#if defined(CONFIG_CPU_XLR)
+ nlm_setup_fmn_irq();
+#endif
+#if defined(CONFIG_OF)
+ of_irq_init(xlp_pic_irq_ids);
+#endif
+}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 885d293b61da..4e35d9c453e2 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -116,7 +116,7 @@ void nlm_early_init_secondary(int cpu)
/*
* Code to run on secondary just after probing the CPU
*/
-static void __cpuinit nlm_init_secondary(void)
+static void nlm_init_secondary(void)
{
int hwtid;
@@ -252,7 +252,7 @@ unsupp:
return 0;
}
-int __cpuinit nlm_wakeup_secondary_cpus(void)
+int nlm_wakeup_secondary_cpus(void)
{
u32 *reset_data;
int threadmode;
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index 528c46c5a170..aa6cff0a229b 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -70,7 +70,6 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
nop
/* not reached */
- __CPUINIT
NESTED(nlm_boot_secondary_cpus, 16, sp)
/* Initialize CP0 Status */
move t1, zero
@@ -94,7 +93,6 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
jr t0
nop
END(nlm_boot_secondary_cpus)
- __FINIT
/*
* In case of RMIboot bootloader which is used on XLR boards, the CPUs
@@ -102,7 +100,6 @@ END(nlm_boot_secondary_cpus)
* This will get them out of the bootloader code and into linux. Needed
* because the bootloader area will be taken and initialized by linux.
*/
- __CPUINIT
NESTED(nlm_rmiboot_preboot, 16, sp)
mfc0 t0, $15, 1 /* read ebase */
andi t0, 0x1f /* t0 has the processor_id() */
@@ -140,4 +137,3 @@ NESTED(nlm_rmiboot_preboot, 16, sp)
b 1b
nop
END(nlm_rmiboot_preboot)
- __FINIT
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index 5c56555380bb..045a396c57ce 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -54,7 +54,7 @@
#error "Unknown CPU"
#endif
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
return IRQ_TIMER;
}
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index e14f42308064..06407033678e 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -76,10 +76,11 @@
};
};
pic: pic@4000 {
- interrupt-controller;
+ compatible = "netlogic,xlp-pic";
#address-cells = <0>;
#interrupt-cells = <1>;
reg = <0 0x4000 0x200>;
+ interrupt-controller;
};
nor_flash@1,0 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
index 8af4bdbe5d99..9c5db102df53 100644
--- a/arch/mips/netlogic/dts/xlp_svp.dts
+++ b/arch/mips/netlogic/dts/xlp_svp.dts
@@ -76,10 +76,11 @@
};
};
pic: pic@4000 {
- interrupt-controller;
+ compatible = "netlogic,xlp-pic";
#address-cells = <0>;
#interrupt-cells = <1>;
reg = <0 0x4000 0x200>;
+ interrupt-controller;
};
nor_flash@1,0 {
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index 9c401dd78337..ef3897ef0dc7 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -119,7 +119,7 @@ static u64 xlp_usb_dmamask = ~(u32)0;
static void nlm_usb_fixup_final(struct pci_dev *dev)
{
dev->dev.dma_mask = &xlp_usb_dmamask;
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
switch (dev->devfn) {
case 0x10:
dev->irq = PIC_EHCI_0_IRQ;
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index c06e4c9f0478..9fb81fa6272a 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -49,7 +49,7 @@
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
-int __cpuinit xlr_wakeup_secondary_cpus(void)
+int xlr_wakeup_secondary_cpus(void)
{
struct nlm_soc_info *nodep;
unsigned int i, j, boot_cpu;
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 7b2ac81e1f59..162b4cb29dba 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -42,7 +42,7 @@ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
extern struct pci_ops bridge_pci_ops;
-int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
+int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
{
unsigned long offset = NODE_OFFSET(nasid);
struct bridge_controller *bc;
diff --git a/arch/mips/pmcs-msp71xx/msp_smtc.c b/arch/mips/pmcs-msp71xx/msp_smtc.c
index c8dcc1c01e18..6b5607fce279 100644
--- a/arch/mips/pmcs-msp71xx/msp_smtc.c
+++ b/arch/mips/pmcs-msp71xx/msp_smtc.c
@@ -33,7 +33,7 @@ static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
/*
* Post-config but pre-boot cleanup entry point
*/
-static void __cpuinit msp_smtc_init_secondary(void)
+static void msp_smtc_init_secondary(void)
{
int myvpe;
@@ -48,8 +48,7 @@ static void __cpuinit msp_smtc_init_secondary(void)
/*
* Platform "CPU" startup hook
*/
-static void __cpuinit msp_smtc_boot_secondary(int cpu,
- struct task_struct *idle)
+static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle)
{
smtc_boot_secondary(cpu, idle);
}
@@ -57,7 +56,7 @@ static void __cpuinit msp_smtc_boot_secondary(int cpu,
/*
* SMP initialization finalization entry point
*/
-static void __cpuinit msp_smtc_smp_finish(void)
+static void msp_smtc_smp_finish(void)
{
smtc_smp_finish();
}
diff --git a/arch/mips/pmcs-msp71xx/msp_time.c b/arch/mips/pmcs-msp71xx/msp_time.c
index 8f12ecc55ace..fea917be0ff1 100644
--- a/arch/mips/pmcs-msp71xx/msp_time.c
+++ b/arch/mips/pmcs-msp71xx/msp_time.c
@@ -88,7 +88,7 @@ void __init plat_time_init(void)
mips_hpt_frequency = cpu_rate/2;
}
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
/* MIPS_MT modes may want timer for second VPE */
if ((get_current_vpe()) && !tim_installed) {
diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c
index a4a90596c0ad..e460865873c1 100644
--- a/arch/mips/pnx833x/common/interrupts.c
+++ b/arch/mips/pnx833x/common/interrupts.c
@@ -281,7 +281,7 @@ void __init arch_init_irq(void)
write_c0_status(read_c0_status() | IE_IRQ2);
}
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
if (cpu_has_vint)
set_vi_handler(cp0_compare_irq, pnx833x_timer_dispatch);
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index 9f64c2387808..0238af1ba503 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -529,8 +529,7 @@ EXPORT_SYMBOL(asic_resource_get);
*/
void platform_release_memory(void *ptr, int size)
{
- free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size),
- -1, NULL);
+ free_reserved_area(ptr, ptr + size, -1, NULL);
}
EXPORT_SYMBOL(platform_release_memory);
diff --git a/arch/mips/powertv/time.c b/arch/mips/powertv/time.c
index 9fd7b67f2af7..f38b0d45eca9 100644
--- a/arch/mips/powertv/time.c
+++ b/arch/mips/powertv/time.c
@@ -25,7 +25,7 @@
#include "powertv-clock.h"
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
return irq_mips_timer;
}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 320b1f1043ff..781b3d14a489 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -73,7 +73,7 @@ static struct irq_chip ralink_intc_irq_chip = {
.irq_mask_ack = ralink_intc_irq_mask,
};
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index d41b1c6fb032..ee736bd103f8 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -54,7 +54,7 @@ extern void pcibr_setup(cnodeid_t);
extern void xtalk_probe_node(cnodeid_t nid);
-static void __cpuinit per_hub_init(cnodeid_t cnode)
+static void per_hub_init(cnodeid_t cnode)
{
struct hub_data *hub = hub_data(cnode);
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
@@ -110,7 +110,7 @@ static void __cpuinit per_hub_init(cnodeid_t cnode)
}
}
-void __cpuinit per_cpu_init(void)
+void per_cpu_init(void)
{
int cpu = smp_processor_id();
int slice = LOCAL_HUB_L(PI_CPU_NUM);
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index f94638141b20..f4ea8aa79ba2 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -173,12 +173,12 @@ static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
ip27_send_ipi_single(i, action);
}
-static void __cpuinit ip27_init_secondary(void)
+static void ip27_init_secondary(void)
{
per_cpu_init();
}
-static void __cpuinit ip27_smp_finish(void)
+static void ip27_smp_finish(void)
{
extern void hub_rt_clock_event_init(void);
@@ -195,7 +195,7 @@ static void __init ip27_cpus_done(void)
* set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work.
*/
-static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle)
+static void ip27_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long)task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 2e21b761cb9c..1d97eaba0c5f 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -106,7 +106,7 @@ struct irqaction hub_rt_irqaction = {
#define NSEC_PER_CYCLE 800
#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
-void __cpuinit hub_rt_clock_event_init(void)
+void hub_rt_clock_event_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
@@ -173,7 +173,7 @@ void __init plat_time_init(void)
hub_rt_clock_event_init();
}
-void __cpuinit cpu_time_init(void)
+void cpu_time_init(void)
{
lboard_t *board;
klcpu_t *cpu;
@@ -194,7 +194,7 @@ void __cpuinit cpu_time_init(void)
set_c0_status(SRB_TIMOCLK);
}
-void __cpuinit hub_rtc_init(cnodeid_t cnode)
+void hub_rtc_init(cnodeid_t cnode)
{
/*
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index a4df7d0f6f12..d59b820f528d 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -23,7 +23,7 @@
extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
-static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
+static int probe_one_port(nasid_t nasid, int widget, int masterwid)
{
widgetreg_t widget_id;
xwidget_part_num_t partnum;
@@ -47,7 +47,7 @@ static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
return 0;
}
-static int __cpuinit xbow_probe(nasid_t nasid)
+static int xbow_probe(nasid_t nasid)
{
lboard_t *brd;
klxbow_t *xbow_p;
@@ -100,7 +100,7 @@ static int __cpuinit xbow_probe(nasid_t nasid)
return 0;
}
-void __cpuinit xtalk_probe_node(cnodeid_t nid)
+void xtalk_probe_node(cnodeid_t nid)
{
volatile u64 hubreg;
nasid_t nasid;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index de88e22694a0..54e2c4de15c1 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -60,7 +60,7 @@ static void *mailbox_0_regs[] = {
/*
* SMP init and finish on secondary CPUs
*/
-void __cpuinit bcm1480_smp_init(void)
+void bcm1480_smp_init(void)
{
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0;
@@ -95,7 +95,7 @@ static void bcm1480_send_ipi_mask(const struct cpumask *mask,
/*
* Code to run on secondary just after probing the CPU
*/
-static void __cpuinit bcm1480_init_secondary(void)
+static void bcm1480_init_secondary(void)
{
extern void bcm1480_smp_init(void);
@@ -106,7 +106,7 @@ static void __cpuinit bcm1480_init_secondary(void)
* Do any tidying up before marking online and running the idle
* loop
*/
-static void __cpuinit bcm1480_smp_finish(void)
+static void bcm1480_smp_finish(void)
{
extern void sb1480_clockevent_init(void);
@@ -125,7 +125,7 @@ static void bcm1480_cpus_done(void)
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
-static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
+static void bcm1480_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 285cfef4ebc0..d7b942db0ea5 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -48,7 +48,7 @@ static void *mailbox_regs[] = {
/*
* SMP init and finish on secondary CPUs
*/
-void __cpuinit sb1250_smp_init(void)
+void sb1250_smp_init(void)
{
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0;
@@ -83,7 +83,7 @@ static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
/*
* Code to run on secondary just after probing the CPU
*/
-static void __cpuinit sb1250_init_secondary(void)
+static void sb1250_init_secondary(void)
{
extern void sb1250_smp_init(void);
@@ -94,7 +94,7 @@ static void __cpuinit sb1250_init_secondary(void)
* Do any tidying up before marking online and running the idle
* loop
*/
-static void __cpuinit sb1250_smp_finish(void)
+static void sb1250_smp_finish(void)
{
extern void sb1250_clockevent_init(void);
@@ -113,7 +113,7 @@ static void sb1250_cpus_done(void)
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
-static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
+static void sb1250_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index f4d5bedc3b4f..d7359ffbcbdd 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -267,7 +267,7 @@ void __init detect_unit_config(unsigned long upr, unsigned long mask,
*
*/
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
{
const int *val;
struct device_node *cpu = NULL;
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
new file mode 100644
index 000000000000..f11006361297
--- /dev/null
+++ b/arch/parisc/configs/c8000_defconfig
@@ -0,0 +1,279 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PA8X00=y
+CONFIG_MLONGCALLS=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_IOMMU_CCIO=y
+CONFIG_PCI=y
+CONFIG_PCI_LBA=y
+# CONFIG_SUPERIO is not set
+# CONFIG_CHASSIS_LCD_LED is not set
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_PDC_CHASSIS_WARN is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+# CONFIG_IPV6 is not set
+CONFIG_IP_DCCP=m
+# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_TIPC=m
+CONFIG_LLC2=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_WCACHE=y
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_PLATFORM=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_SIIMAGE=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=m
+CONFIG_TUN=y
+CONFIG_E1000=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+CONFIG_MOUSE_PS2=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_CM109=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_GSCPS2=m
+# CONFIG_HP_SDC is not set
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_MUX is not set
+CONFIG_SERIAL_JSM=m
+CONFIG_PRINTER=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_STI is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_STI_CONSOLE is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_AD1889=m
+# CONFIG_SND_USB is not set
+# CONFIG_SND_GSC is not set
+CONFIG_HID_A4TECH=m
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_ORTEK=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_HID_ZEROPLUS=m
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_NFS_FS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DEBUG_SLAB_LEAK=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_PROVE_RCU_DELAY=y
+CONFIG_DEBUG_BLOCK_EXT_DEVT=y
+CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_KEYS=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FONTS=y
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 9afdad6c2ffb..eaf4dc1c7294 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -23,6 +23,7 @@ struct parisc_device {
/* generic info returned from pdc_pat_cell_module() */
unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */
+ unsigned long mod0;
#endif
u64 dma_mask; /* DMA mask for I/O */
struct device dev;
@@ -61,4 +62,6 @@ parisc_get_drvdata(struct parisc_device *d)
extern struct bus_type parisc_bus_type;
+int iosapic_serial_irq(struct parisc_device *dev);
+
#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 2e65aa54bd10..c035673209f7 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@ flush_cache_all_local(void)
}
EXPORT_SYMBOL(flush_cache_all_local);
+/* Virtual address of pfn. */
+#define pfn_va(pfn) __va(PFN_PHYS(pfn))
+
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- struct page *page = pte_page(*ptep);
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
+ /* We don't have pte special. As a result, we can be called with
+ an invalid pfn and we don't need to flush the kernel dcache page.
+ This occurs with FireGL card in C8000. */
+ if (!pfn_valid(pfn))
+ return;
- flush_kernel_dcache_page(page);
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page(page);
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
}
void
@@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
void flush_cache_mm(struct mm_struct *mm)
{
+ struct vm_area_struct *vma;
+ pgd_t *pgd;
+
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
- if (mm_total_size(mm) < parisc_cache_flush_threshold) {
- struct vm_area_struct *vma;
-
- if (mm->context == mfsp(3)) {
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- flush_user_dcache_range_asm(vma->vm_start,
- vma->vm_end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(
- vma->vm_start, vma->vm_end);
- }
- } else {
- pgd_t *pgd = mm->pgd;
-
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- unsigned long addr;
-
- for (addr = vma->vm_start; addr < vma->vm_end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- __flush_cache_page(vma, addr,
- page_to_phys(pte_page(pte)));
- }
- }
- }
+ if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
+ return;
+ }
+
+ if (mm->context == mfsp(3)) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+ if ((vma->vm_flags & VM_EXEC) == 0)
+ continue;
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
}
return;
}
-#ifdef CONFIG_SMP
- flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ pgd = mm->pgd;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (!pfn_valid(pfn))
+ continue;
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
+ }
}
void
@@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ unsigned long addr;
+ pgd_t *pgd;
+
BUG_ON(!vma->vm_mm->context);
- if ((end - start) < parisc_cache_flush_threshold) {
- if (vma->vm_mm->context == mfsp(3)) {
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
- } else {
- unsigned long addr;
- pgd_t *pgd = vma->vm_mm->pgd;
-
- for (addr = start & PAGE_MASK; addr < end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- flush_cache_page(vma,
- addr, pte_pfn(pte));
- }
- }
- }
- } else {
-#ifdef CONFIG_SMP
+ if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ return;
+ }
+
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ return;
+ }
+
+ pgd = vma->vm_mm->pgd;
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- flush_tlb_page(vma, vmaddr);
- __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
-
+ if (pfn_valid(pfn)) {
+ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
}
#ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index f65fa480c905..22395901d47b 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -150,7 +150,7 @@ static void convert_to_wide(unsigned long *addr)
}
#ifdef CONFIG_64BIT
-void __cpuinit set_firmware_width_unlocked(void)
+void set_firmware_width_unlocked(void)
{
int ret;
@@ -167,7 +167,7 @@ void __cpuinit set_firmware_width_unlocked(void)
* This function must be called before any pdc_* function that uses the
* convert_to_wide function.
*/
-void __cpuinit set_firmware_width(void)
+void set_firmware_width(void)
{
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
@@ -175,11 +175,13 @@ void __cpuinit set_firmware_width(void)
spin_unlock_irqrestore(&pdc_lock, flags);
}
#else
-void __cpuinit set_firmware_width_unlocked(void) {
+void set_firmware_width_unlocked(void)
+{
return;
}
-void __cpuinit set_firmware_width(void) {
+void set_firmware_width(void)
+{
return;
}
#endif /*CONFIG_64BIT*/
@@ -301,7 +303,7 @@ int pdc_chassis_warn(unsigned long *warn)
return retval;
}
-int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
+int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
{
int ret;
@@ -322,7 +324,7 @@ int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
* This PDC call returns the presence and status of all the coprocessors
* attached to the processor.
*/
-int __cpuinit pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
+int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
{
int ret;
unsigned long flags;
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 872275659d98..06cb3992907e 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1367,7 +1367,7 @@ const char *parisc_hardware_description(struct parisc_device_id *id)
/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */
-enum cpu_type __cpuinit
+enum cpu_type
parisc_get_cpu_type(unsigned long hversion)
{
struct hp_cpu_type_mask *ptr;
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 3295ef4a185d..f0b6722fc706 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
/* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell->mod_location;
+ dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 8a96c8ab9fe6..b68d977ce30f 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -73,7 +73,7 @@ extern int update_cr16_clocksource(void); /* from time.c */
*
* FIXME: doesn't do much yet...
*/
-static void __cpuinit
+static void
init_percpu_prof(unsigned long cpunum)
{
struct cpuinfo_parisc *p;
@@ -92,7 +92,7 @@ init_percpu_prof(unsigned long cpunum)
* (return 1). If so, initialize the chip and tell other partners in crime
* they have work to do.
*/
-static int __cpuinit processor_probe(struct parisc_device *dev)
+static int processor_probe(struct parisc_device *dev)
{
unsigned long txn_addr;
unsigned long cpuid;
@@ -299,7 +299,7 @@ void __init collect_boot_cpu_data(void)
*
* o Enable CPU profiling hooks.
*/
-int __cpuinit init_per_cpu(int cpunum)
+int init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 940188d1942c..07349b002687 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -56,13 +56,6 @@
#define A(__x) ((unsigned long)(__x))
/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-#ifdef CONFIG_64BIT
-#include "sys32.h"
-#endif
-
-/*
* Do a signal return - restore sigcontext.
*/
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 33eca1b04926..6c6a271a6140 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -34,7 +34,6 @@
#include <asm/uaccess.h>
#include "signal32.h"
-#include "sys32.h"
#define DEBUG_COMPAT_SIG 0
#define DEBUG_COMPAT_SIG_LEVEL 2
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index e3614fb343e5..8a252f2d6c08 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -62,9 +62,9 @@ static int smp_debug_lvl = 0;
volatile struct task_struct *smp_init_current_idle_task;
/* track which CPU is booting */
-static volatile int cpu_now_booting __cpuinitdata;
+static volatile int cpu_now_booting;
-static int parisc_max_cpus __cpuinitdata = 1;
+static int parisc_max_cpus = 1;
static DEFINE_PER_CPU(spinlock_t, ipi_lock);
@@ -328,7 +328,7 @@ void __init smp_callin(void)
/*
* Bring one cpu online.
*/
-int __cpuinit smp_boot_one_cpu(int cpuid, struct task_struct *idle)
+int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
long timeout;
@@ -424,7 +424,7 @@ void smp_cpus_done(unsigned int cpu_max)
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
if (cpu != 0 && cpu < parisc_max_cpus)
smp_boot_one_cpu(cpu, tidle);
diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h
deleted file mode 100644
index 60dd470f39f8..000000000000
--- a/arch/parisc/kernel/sys32.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
- * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
- * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _PARISC64_KERNEL_SYS32_H
-#define _PARISC64_KERNEL_SYS32_H
-
-#include <linux/compat.h>
-
-/* Call a kernel syscall which will use kernel space instead of user
- * space for its copy_to/from_user.
- */
-#define KERNEL_SYSCALL(ret, syscall, args...) \
-{ \
- mm_segment_t old_fs = get_fs(); \
- set_fs(KERNEL_DS); \
- ret = syscall(args); \
- set_fs (old_fs); \
-}
-
-#endif
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index a134ff4da12e..bb9f3b64de55 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -42,8 +42,6 @@
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-#include "sys32.h"
-
#undef DEBUG
#ifdef DEBUG
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index c86fcb92358e..0e8cfd09da2f 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -58,7 +58,7 @@ CONFIG_SCHED_SMT=y
CONFIG_PPC_DENORMALISATION=y
CONFIG_PCCARD=y
CONFIG_ELECTRA_CF=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 4b20f76172e2..0085dc4642c5 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -32,7 +32,7 @@ CONFIG_IRQ_ALL_CPUS=y
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_PCI_MSI=y
CONFIG_PCCARD=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index bea8587c3af5..1d4b9763895d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -53,7 +53,7 @@ CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
CONFIG_PPC_DENORMALISATION=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PACKET=y
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 09a8743143f3..d3e5e9bc8f94 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -55,6 +55,8 @@ struct device_node;
#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */
+#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
+
struct eeh_pe {
int type; /* PE type: PHB/Bus/Device */
int state; /* PE EEH dependent mode */
@@ -72,8 +74,8 @@ struct eeh_pe {
struct list_head child; /* Child PEs */
};
-#define eeh_pe_for_each_dev(pe, edev) \
- list_for_each_entry(edev, &pe->edevs, list)
+#define eeh_pe_for_each_dev(pe, edev, tmp) \
+ list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
/*
* The struct is used to trace EEH state for the associated
@@ -82,7 +84,13 @@ struct eeh_pe {
* another tree except the currently existing tree of PCI
* buses and PCI devices
*/
-#define EEH_DEV_IRQ_DISABLED (1<<0) /* Interrupt disabled */
+#define EEH_DEV_BRIDGE (1 << 0) /* PCI bridge */
+#define EEH_DEV_ROOT_PORT (1 << 1) /* PCIe root port */
+#define EEH_DEV_DS_PORT (1 << 2) /* Downstream port */
+#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
+#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
+
+#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */
struct eeh_dev {
int mode; /* EEH mode */
@@ -90,11 +98,13 @@ struct eeh_dev {
int config_addr; /* Config address */
int pe_config_addr; /* PE config address */
u32 config_space[16]; /* Saved PCI config space */
+ u8 pcie_cap; /* Saved PCIe capability */
struct eeh_pe *pe; /* Associated PE */
struct list_head list; /* Form link list in the PE */
struct pci_controller *phb; /* Associated PHB */
struct device_node *dn; /* Associated device node */
struct pci_dev *pdev; /* Associated PCI device */
+ struct pci_bus *bus; /* PCI bus for partial hotplug */
};
static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
@@ -193,8 +203,10 @@ int eeh_phb_pe_create(struct pci_controller *phb);
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
struct eeh_pe *eeh_pe_get(struct eeh_dev *edev);
int eeh_add_to_parent_pe(struct eeh_dev *edev);
-int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe);
+int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
void eeh_pe_update_time_stamp(struct eeh_pe *pe);
+void *eeh_pe_traverse(struct eeh_pe *root,
+ eeh_traverse_func fn, void *flag);
void *eeh_pe_dev_traverse(struct eeh_pe *root,
eeh_traverse_func fn, void *flag);
void eeh_pe_restore_bars(struct eeh_pe *pe);
@@ -209,10 +221,12 @@ unsigned long eeh_check_failure(const volatile void __iomem *token,
unsigned long val);
int eeh_dev_check_failure(struct eeh_dev *edev);
void eeh_addr_cache_build(void);
+void eeh_add_device_early(struct device_node *);
void eeh_add_device_tree_early(struct device_node *);
+void eeh_add_device_late(struct pci_dev *);
void eeh_add_device_tree_late(struct pci_bus *);
void eeh_add_sysfs_files(struct pci_bus *);
-void eeh_remove_bus_device(struct pci_dev *, int);
+void eeh_remove_device(struct pci_dev *);
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -252,13 +266,17 @@ static inline unsigned long eeh_check_failure(const volatile void __iomem *token
static inline void eeh_addr_cache_build(void) { }
+static inline void eeh_add_device_early(struct device_node *dn) { }
+
static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+static inline void eeh_add_device_late(struct pci_dev *dev) { }
+
static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
-static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { }
+static inline void eeh_remove_device(struct pci_dev *dev) { }
#define EEH_POSSIBLE_ERROR(val, type) (0)
#define EEH_IO_ERROR_VALUE(size) (-1UL)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index ba713f166fa5..10be1dd01c6b 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -96,10 +96,11 @@ static inline bool arch_irqs_disabled(void)
#endif
#define hard_irq_disable() do { \
- u8 _was_enabled = get_paca()->soft_enabled; \
+ u8 _was_enabled; \
__hard_irq_disable(); \
- get_paca()->soft_enabled = 0; \
- get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
+ _was_enabled = local_paca->soft_enabled; \
+ local_paca->soft_enabled = 0; \
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (_was_enabled) \
trace_hardirqs_off(); \
} while(0)
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index c1df590ec444..49fa55bfbac4 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -82,10 +82,9 @@ struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);
-#ifdef CONFIG_MODVERSIONS
+#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
#define ARCH_RELOCATES_KCRCTAB
-
-extern const unsigned long reloc_start[];
+#define reloc_start PHYSICAL_START
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 2c1d8cb9b265..32d0d2018faf 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -209,7 +209,6 @@ static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
/** Remove all of the PCI devices under this bus */
-extern void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe);
extern void pcibios_remove_pci_devices(struct pci_bus *bus);
/** Discover new pci devices under this bus, and add them */
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 2dd7bfc459be..8b2492644754 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
#include <linux/device.h>
+#include <uapi/asm/perf_event.h>
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
@@ -69,11 +70,6 @@ struct power_pmu {
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
-/*
- * We use the event config bit 63 as a flag to request EBB.
- */
-#define EVENT_CONFIG_EBB_SHIFT 63
-
extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5d7d9c2a5473..a6840e4e24f7 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1088,7 +1088,8 @@
#define PVR_970MP 0x0044
#define PVR_970GX 0x0045
#define PVR_POWER7p 0x004A
-#define PVR_POWER8 0x004B
+#define PVR_POWER8E 0x004B
+#define PVR_POWER8 0x004D
#define PVR_BE 0x0070
#define PVR_PA6T 0x0090
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabebcdca..48cfc858abd6 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
#define smp_setup_cpu_maps()
static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {}
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 5182c8622b54..48be855ef37b 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -20,6 +20,7 @@ header-y += mman.h
header-y += msgbuf.h
header-y += nvram.h
header-y += param.h
+header-y += perf_event.h
header-y += poll.h
header-y += posix_types.h
header-y += ps3fb.h
diff --git a/arch/powerpc/include/uapi/asm/perf_event.h b/arch/powerpc/include/uapi/asm/perf_event.h
new file mode 100644
index 000000000000..80a4d40cf5bc
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/perf_event.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2013 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ */
+
+#ifndef _UAPI_ASM_POWERPC_PERF_EVENT_H
+#define _UAPI_ASM_POWERPC_PERF_EVENT_H
+
+/*
+ * We use bit 63 of perf_event_attr.config as a flag to request EBB.
+ */
+#define PERF_EVENT_CONFIG_EBB_SHIFT 63
+
+#endif /* _UAPI_ASM_POWERPC_PERF_EVENT_H */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 2a45d0f04385..22973a74df73 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -494,9 +494,27 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_power7,
.platform = "power7+",
},
- { /* Power8 */
+ { /* Power8E */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004b0000,
+ .cpu_name = "POWER8E (raw)",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .platform = "power8",
+ },
+ { /* Power8 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004d0000,
.cpu_name = "POWER8 (raw)",
.cpu_features = CPU_FTRS_POWER8,
.cpu_user_features = COMMON_USER_POWER8,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 39954fe941b8..ea9414c8088d 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -231,7 +231,7 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
{
size_t loglen = 0;
- struct eeh_dev *edev;
+ struct eeh_dev *edev, *tmp;
bool valid_cfg_log = true;
/*
@@ -251,7 +251,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
eeh_pe_restore_bars(pe);
pci_regs_buf[0] = 0;
- eeh_pe_for_each_dev(pe, edev) {
+ eeh_pe_for_each_dev(pe, edev, tmp) {
loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen,
EEH_PCI_REGS_LOG_LEN - loglen);
}
@@ -499,8 +499,6 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
}
eeh_dev_check_failure(edev);
-
- pci_dev_put(eeh_dev_to_pci_dev(edev));
return val;
}
@@ -838,7 +836,7 @@ core_initcall_sync(eeh_init);
* on the CEC architecture, type of the device, on earlier boot
* command-line arguments & etc.
*/
-static void eeh_add_device_early(struct device_node *dn)
+void eeh_add_device_early(struct device_node *dn)
{
struct pci_controller *phb;
@@ -886,7 +884,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
* This routine must be used to complete EEH initialization for PCI
* devices that were added after system boot (e.g. hotplug, dlpar).
*/
-static void eeh_add_device_late(struct pci_dev *dev)
+void eeh_add_device_late(struct pci_dev *dev)
{
struct device_node *dn;
struct eeh_dev *edev;
@@ -902,9 +900,23 @@ static void eeh_add_device_late(struct pci_dev *dev)
pr_debug("EEH: Already referenced !\n");
return;
}
- WARN_ON(edev->pdev);
- pci_dev_get(dev);
+ /*
+ * The EEH cache might not be removed correctly because of
+ * unbalanced kref to the device during unplug time, which
+ * relies on pcibios_release_device(). So we have to remove
+ * that here explicitly.
+ */
+ if (edev->pdev) {
+ eeh_rmv_from_parent_pe(edev);
+ eeh_addr_cache_rmv_dev(edev->pdev);
+ eeh_sysfs_remove_device(edev->pdev);
+ edev->mode &= ~EEH_DEV_SYSFS;
+
+ edev->pdev = NULL;
+ dev->dev.archdata.edev = NULL;
+ }
+
edev->pdev = dev;
dev->dev.archdata.edev = edev;
@@ -967,7 +979,6 @@ EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
/**
* eeh_remove_device - Undo EEH setup for the indicated pci device
* @dev: pci device to be removed
- * @purge_pe: remove the PE or not
*
* This routine should be called when a device is removed from
* a running system (e.g. by hotplug or dlpar). It unregisters
@@ -975,7 +986,7 @@ EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
* this device will no longer be detected after this call; thus,
* i/o errors affecting this slot may leave this device unusable.
*/
-static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
+void eeh_remove_device(struct pci_dev *dev)
{
struct eeh_dev *edev;
@@ -986,42 +997,29 @@ static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
/* Unregister the device with the EEH/PCI address search system */
pr_debug("EEH: Removing device %s\n", pci_name(dev));
- if (!edev || !edev->pdev) {
+ if (!edev || !edev->pdev || !edev->pe) {
pr_debug("EEH: Not referenced !\n");
return;
}
+
+ /*
+ * During the hotplug for EEH error recovery, we need the EEH
+ * device attached to the parent PE in order for BAR restore
+ * a bit later. So we keep it for BAR restore and remove it
+ * from the parent PE during the BAR resotre.
+ */
edev->pdev = NULL;
dev->dev.archdata.edev = NULL;
- pci_dev_put(dev);
+ if (!(edev->pe->state & EEH_PE_KEEP))
+ eeh_rmv_from_parent_pe(edev);
+ else
+ edev->mode |= EEH_DEV_DISCONNECTED;
- eeh_rmv_from_parent_pe(edev, purge_pe);
eeh_addr_cache_rmv_dev(dev);
eeh_sysfs_remove_device(dev);
+ edev->mode &= ~EEH_DEV_SYSFS;
}
-/**
- * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
- * @dev: PCI device
- * @purge_pe: remove the corresponding PE or not
- *
- * This routine must be called when a device is removed from the
- * running system through hotplug or dlpar. The corresponding
- * PCI address cache will be removed.
- */
-void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
-{
- struct pci_bus *bus = dev->subordinate;
- struct pci_dev *child, *tmp;
-
- eeh_remove_device(dev, purge_pe);
-
- if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
- eeh_remove_bus_device(child, purge_pe);
- }
-}
-EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
-
static int proc_eeh_show(struct seq_file *m, void *v)
{
if (0 == eeh_subsystem_enabled) {
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index f9ac1232a746..e8c9fd546a5c 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -68,16 +68,12 @@ static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
- if (addr < piar->addr_lo) {
+ if (addr < piar->addr_lo)
n = n->rb_left;
- } else {
- if (addr > piar->addr_hi) {
- n = n->rb_right;
- } else {
- pci_dev_get(piar->pcidev);
- return piar->edev;
- }
- }
+ else if (addr > piar->addr_hi)
+ n = n->rb_right;
+ else
+ return piar->edev;
}
return NULL;
@@ -156,7 +152,6 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
if (!piar)
return NULL;
- pci_dev_get(dev);
piar->addr_lo = alo;
piar->addr_hi = ahi;
piar->edev = pci_dev_to_eeh_dev(dev);
@@ -250,7 +245,6 @@ restart:
if (piar->pcidev == dev) {
rb_erase(n, &pci_io_addr_cache_root.rb_root);
- pci_dev_put(piar->pcidev);
kfree(piar);
goto restart;
}
@@ -302,12 +296,10 @@ void eeh_addr_cache_build(void)
if (!edev)
continue;
- pci_dev_get(dev); /* matching put is in eeh_remove_device() */
dev->dev.archdata.edev = edev;
edev->pdev = dev;
eeh_addr_cache_insert_dev(dev);
-
eeh_sysfs_add_device(dev);
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 2b1ce17cae50..36bed5a12750 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -143,10 +143,14 @@ static void eeh_disable_irq(struct pci_dev *dev)
static void eeh_enable_irq(struct pci_dev *dev)
{
struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
+ struct irq_desc *desc;
if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
edev->mode &= ~EEH_DEV_IRQ_DISABLED;
- enable_irq(dev->irq);
+
+ desc = irq_to_desc(dev->irq);
+ if (desc && desc->depth > 0)
+ enable_irq(dev->irq);
}
}
@@ -338,6 +342,54 @@ static void *eeh_report_failure(void *data, void *userdata)
return NULL;
}
+static void *eeh_rmv_device(void *data, void *userdata)
+{
+ struct pci_driver *driver;
+ struct eeh_dev *edev = (struct eeh_dev *)data;
+ struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
+ int *removed = (int *)userdata;
+
+ /*
+ * Actually, we should remove the PCI bridges as well.
+ * However, that's lots of complexity to do that,
+ * particularly some of devices under the bridge might
+ * support EEH. So we just care about PCI devices for
+ * simplicity here.
+ */
+ if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
+ return NULL;
+ driver = eeh_pcid_get(dev);
+ if (driver && driver->err_handler)
+ return NULL;
+
+ /* Remove it from PCI subsystem */
+ pr_debug("EEH: Removing %s without EEH sensitive driver\n",
+ pci_name(dev));
+ edev->bus = dev->bus;
+ edev->mode |= EEH_DEV_DISCONNECTED;
+ (*removed)++;
+
+ pci_stop_and_remove_bus_device(dev);
+
+ return NULL;
+}
+
+static void *eeh_pe_detach_dev(void *data, void *userdata)
+{
+ struct eeh_pe *pe = (struct eeh_pe *)data;
+ struct eeh_dev *edev, *tmp;
+
+ eeh_pe_for_each_dev(pe, edev, tmp) {
+ if (!(edev->mode & EEH_DEV_DISCONNECTED))
+ continue;
+
+ edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
+ eeh_rmv_from_parent_pe(edev);
+ }
+
+ return NULL;
+}
+
/**
* eeh_reset_device - Perform actual reset of a pci slot
* @pe: EEH PE
@@ -349,8 +401,9 @@ static void *eeh_report_failure(void *data, void *userdata)
*/
static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
{
+ struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
struct timeval tstamp;
- int cnt, rc;
+ int cnt, rc, removed = 0;
/* pcibios will clear the counter; save the value */
cnt = pe->freeze_count;
@@ -362,8 +415,11 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
* devices are expected to be attached soon when calling
* into pcibios_add_pci_devices().
*/
+ eeh_pe_state_mark(pe, EEH_PE_KEEP);
if (bus)
- __pcibios_remove_pci_devices(bus, 0);
+ pcibios_remove_pci_devices(bus);
+ else if (frozen_bus)
+ eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
/* Reset the pci controller. (Asserts RST#; resets config space).
* Reconfigure bridges and devices. Don't try to bring the system
@@ -384,9 +440,24 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
* potentially weird things happen.
*/
if (bus) {
+ pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
ssleep(5);
+
+ /*
+ * The EEH device is still connected with its parent
+ * PE. We should disconnect it so the binding can be
+ * rebuilt when adding PCI devices.
+ */
+ eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
pcibios_add_pci_devices(bus);
+ } else if (frozen_bus && removed) {
+ pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
+ ssleep(5);
+
+ eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
+ pcibios_add_pci_devices(frozen_bus);
}
+ eeh_pe_state_clear(pe, EEH_PE_KEEP);
pe->tstamp = tstamp;
pe->freeze_count = cnt;
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 016588a6f5ed..f9450537e335 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -149,8 +149,8 @@ static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe,
* callback returns something other than NULL, or no more PEs
* to be traversed.
*/
-static void *eeh_pe_traverse(struct eeh_pe *root,
- eeh_traverse_func fn, void *flag)
+void *eeh_pe_traverse(struct eeh_pe *root,
+ eeh_traverse_func fn, void *flag)
{
struct eeh_pe *pe;
void *ret;
@@ -176,7 +176,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
eeh_traverse_func fn, void *flag)
{
struct eeh_pe *pe;
- struct eeh_dev *edev;
+ struct eeh_dev *edev, *tmp;
void *ret;
if (!root) {
@@ -186,7 +186,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
/* Traverse root PE */
for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
- eeh_pe_for_each_dev(pe, edev) {
+ eeh_pe_for_each_dev(pe, edev, tmp) {
ret = fn(edev, flag);
if (ret)
return ret;
@@ -333,7 +333,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
while (parent) {
if (!(parent->type & EEH_PE_INVALID))
break;
- parent->type &= ~EEH_PE_INVALID;
+ parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
parent = parent->parent;
}
pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
@@ -397,21 +397,20 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
/**
* eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
* @edev: EEH device
- * @purge_pe: remove PE or not
*
* The PE hierarchy tree might be changed when doing PCI hotplug.
* Also, the PCI devices or buses could be removed from the system
* during EEH recovery. So we have to call the function remove the
* corresponding PE accordingly if necessary.
*/
-int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
+int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
{
struct eeh_pe *pe, *parent, *child;
int cnt;
if (!edev->pe) {
- pr_warning("%s: No PE found for EEH device %s\n",
- __func__, edev->dn->full_name);
+ pr_debug("%s: No PE found for EEH device %s\n",
+ __func__, edev->dn->full_name);
return -EEXIST;
}
@@ -431,7 +430,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
if (pe->type & EEH_PE_PHB)
break;
- if (purge_pe) {
+ if (!(pe->state & EEH_PE_KEEP)) {
if (list_empty(&pe->edevs) &&
list_empty(&pe->child_list)) {
list_del(&pe->child);
@@ -502,7 +501,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
{
struct eeh_pe *pe = (struct eeh_pe *)data;
int state = *((int *)flag);
- struct eeh_dev *tmp;
+ struct eeh_dev *edev, *tmp;
struct pci_dev *pdev;
/*
@@ -512,8 +511,8 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
* the PCI device driver.
*/
pe->state |= state;
- eeh_pe_for_each_dev(pe, tmp) {
- pdev = eeh_dev_to_pci_dev(tmp);
+ eeh_pe_for_each_dev(pe, edev, tmp) {
+ pdev = eeh_dev_to_pci_dev(edev);
if (pdev)
pdev->error_state = pci_channel_io_frozen;
}
@@ -579,7 +578,7 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
* blocked on normal path during the stage. So we need utilize
* eeh operations, which is always permitted.
*/
-static void eeh_bridge_check_link(struct pci_dev *pdev,
+static void eeh_bridge_check_link(struct eeh_dev *edev,
struct device_node *dn)
{
int cap;
@@ -590,16 +589,17 @@ static void eeh_bridge_check_link(struct pci_dev *pdev,
* We only check root port and downstream ports of
* PCIe switches
*/
- if (!pci_is_pcie(pdev) ||
- (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
- pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
+ if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
return;
- pr_debug("%s: Check PCIe link for %s ...\n",
- __func__, pci_name(pdev));
+ pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n",
+ __func__, edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF));
/* Check slot status */
- cap = pdev->pcie_cap;
+ cap = edev->pcie_cap;
eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
if (!(val & PCI_EXP_SLTSTA_PDS)) {
pr_debug(" No card in the slot (0x%04x) !\n", val);
@@ -653,8 +653,7 @@ static void eeh_bridge_check_link(struct pci_dev *pdev,
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
-static void eeh_restore_bridge_bars(struct pci_dev *pdev,
- struct eeh_dev *edev,
+static void eeh_restore_bridge_bars(struct eeh_dev *edev,
struct device_node *dn)
{
int i;
@@ -680,7 +679,7 @@ static void eeh_restore_bridge_bars(struct pci_dev *pdev,
eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
/* Check the PCIe link is ready */
- eeh_bridge_check_link(pdev, dn);
+ eeh_bridge_check_link(edev, dn);
}
static void eeh_restore_device_bars(struct eeh_dev *edev,
@@ -729,19 +728,12 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
*/
static void *eeh_restore_one_device_bars(void *data, void *flag)
{
- struct pci_dev *pdev = NULL;
struct eeh_dev *edev = (struct eeh_dev *)data;
struct device_node *dn = eeh_dev_to_of_node(edev);
- /* Trace the PCI bridge */
- if (eeh_probe_mode_dev()) {
- pdev = eeh_dev_to_pci_dev(edev);
- if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
- pdev = NULL;
- }
-
- if (pdev)
- eeh_restore_bridge_bars(pdev, edev, dn);
+ /* Do special restore for bridges */
+ if (edev->mode & EEH_DEV_BRIDGE)
+ eeh_restore_bridge_bars(edev, dn);
else
eeh_restore_device_bars(edev, dn);
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index e7ae3484918c..5d753d4f2c75 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -56,19 +56,40 @@ EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
void eeh_sysfs_add_device(struct pci_dev *pdev)
{
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
int rc=0;
+ if (edev && (edev->mode & EEH_DEV_SYSFS))
+ return;
+
rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
if (rc)
printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
+ else if (edev)
+ edev->mode |= EEH_DEV_SYSFS;
}
void eeh_sysfs_remove_device(struct pci_dev *pdev)
{
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
+
+ /*
+ * The parent directory might have been removed. We needn't
+ * continue for that case.
+ */
+ if (!pdev->dev.kobj.sd) {
+ if (edev)
+ edev->mode &= ~EEH_DEV_SYSFS;
+ return;
+ }
+
device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
+
+ if (edev)
+ edev->mode &= ~EEH_DEV_SYSFS;
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2e51cde616d2..c69440cef7af 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -362,7 +362,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
seq_printf(p, " Spurious interrupts\n");
- seq_printf(p, "%*s: ", prec, "CNT");
+ seq_printf(p, "%*s: ", prec, "PMI");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
seq_printf(p, " Performance monitoring interrupts\n");
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f46914a0f33e..7d22a675fe1a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1462,6 +1462,8 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
/* Allocate bus and devices resources */
pcibios_allocate_bus_resources(bus);
pcibios_claim_one_bus(bus);
+ if (!pci_has_flag(PCI_PROBE_ONLY))
+ pci_assign_unassigned_bus_resources(bus);
/* Fixup EEH */
eeh_add_device_tree_late(bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 3f608800c06b..c1e17ae68a08 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -22,45 +22,40 @@
#include <asm/eeh.h>
/**
- * __pcibios_remove_pci_devices - remove all devices under this bus
+ * pcibios_release_device - release PCI device
+ * @dev: PCI device
+ *
+ * The function is called before releasing the indicated PCI device.
+ */
+void pcibios_release_device(struct pci_dev *dev)
+{
+ eeh_remove_device(dev);
+}
+
+/**
+ * pcibios_remove_pci_devices - remove all devices under this bus
* @bus: the indicated PCI bus
- * @purge_pe: destroy the PE on removal of PCI devices
*
* Remove all of the PCI devices under this bus both from the
* linux pci device tree, and from the powerpc EEH address cache.
- * By default, the corresponding PE will be destroied during the
- * normal PCI hotplug path. For PCI hotplug during EEH recovery,
- * the corresponding PE won't be destroied and deallocated.
*/
-void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe)
+void pcibios_remove_pci_devices(struct pci_bus *bus)
{
struct pci_dev *dev, *tmp;
struct pci_bus *child_bus;
/* First go down child busses */
list_for_each_entry(child_bus, &bus->children, node)
- __pcibios_remove_pci_devices(child_bus, purge_pe);
+ pcibios_remove_pci_devices(child_bus);
pr_debug("PCI: Removing devices on bus %04x:%02x\n",
pci_domain_nr(bus), bus->number);
list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
- pr_debug(" * Removing %s...\n", pci_name(dev));
- eeh_remove_bus_device(dev, purge_pe);
+ pr_debug(" Removing %s...\n", pci_name(dev));
pci_stop_and_remove_bus_device(dev);
}
}
-/**
- * pcibios_remove_pci_devices - remove all devices under this bus
- * @bus: the indicated PCI bus
- *
- * Remove all of the PCI devices under this bus both from the
- * linux pci device tree, and from the powerpc EEH address cache.
- */
-void pcibios_remove_pci_devices(struct pci_bus *bus)
-{
- __pcibios_remove_pci_devices(bus, 1);
-}
EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
/**
@@ -76,7 +71,7 @@ EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
*/
void pcibios_add_pci_devices(struct pci_bus * bus)
{
- int slotno, num, mode, pass, max;
+ int slotno, mode, pass, max;
struct pci_dev *dev;
struct device_node *dn = pci_bus_to_OF_node(bus);
@@ -90,11 +85,15 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
/* use ofdt-based probe */
of_rescan_bus(dn, bus);
} else if (mode == PCI_PROBE_NORMAL) {
- /* use legacy probe */
+ /*
+ * Use legacy probe. In the partial hotplug case, we
+ * probably have grandchildren devices unplugged. So
+ * we don't check the return value from pci_scan_slot() in
+ * order for fully rescan all the way down to pick them up.
+ * They can have been removed during partial hotplug.
+ */
slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
- num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
- if (!num)
- return;
+ pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
pcibios_setup_bus_devices(bus);
max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++) {
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 6b0ba5854d99..15d9105323bf 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -230,11 +230,14 @@ void of_scan_pci_bridge(struct pci_dev *dev)
return;
}
- bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
+ bus = pci_find_bus(pci_domain_nr(dev->bus), busrange[0]);
if (!bus) {
- printk(KERN_ERR "Failed to create pci bus for %s\n",
- node->full_name);
- return;
+ bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
+ if (!bus) {
+ printk(KERN_ERR "Failed to create pci bus for %s\n",
+ node->full_name);
+ return;
+ }
}
bus->primary = dev->bus->number;
@@ -292,6 +295,38 @@ void of_scan_pci_bridge(struct pci_dev *dev)
}
EXPORT_SYMBOL(of_scan_pci_bridge);
+static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
+ struct device_node *dn)
+{
+ struct pci_dev *dev = NULL;
+ const u32 *reg;
+ int reglen, devfn;
+
+ pr_debug(" * %s\n", dn->full_name);
+ if (!of_device_is_available(dn))
+ return NULL;
+
+ reg = of_get_property(dn, "reg", &reglen);
+ if (reg == NULL || reglen < 20)
+ return NULL;
+ devfn = (reg[0] >> 8) & 0xff;
+
+ /* Check if the PCI device is already there */
+ dev = pci_get_slot(bus, devfn);
+ if (dev) {
+ pci_dev_put(dev);
+ return dev;
+ }
+
+ /* create a new pci_dev for this device */
+ dev = of_create_pci_dev(dn, bus, devfn);
+ if (!dev)
+ return NULL;
+
+ pr_debug(" dev header type: %x\n", dev->hdr_type);
+ return dev;
+}
+
/**
* __of_scan_bus - given a PCI bus node, setup bus and scan for child devices
* @node: device tree node for the PCI bus
@@ -302,8 +337,6 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
int rescan_existing)
{
struct device_node *child;
- const u32 *reg;
- int reglen, devfn;
struct pci_dev *dev;
pr_debug("of_scan_bus(%s) bus no %d...\n",
@@ -311,16 +344,7 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
/* Scan direct children */
for_each_child_of_node(node, child) {
- pr_debug(" * %s\n", child->full_name);
- if (!of_device_is_available(child))
- continue;
- reg = of_get_property(child, "reg", &reglen);
- if (reg == NULL || reglen < 20)
- continue;
- devfn = (reg[0] >> 8) & 0xff;
-
- /* create a new pci_dev for this device */
- dev = of_create_pci_dev(child, bus, devfn);
+ dev = of_scan_pci_dev(bus, child);
if (!dev)
continue;
pr_debug(" dev header type: %x\n", dev->hdr_type);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5eccda9fd33f..607902424e73 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -644,7 +644,8 @@ unsigned char ibm_architecture_vec[] = {
W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
W(0xffff0000), W(0x003e0000), /* POWER6 */
W(0xffff0000), W(0x003f0000), /* POWER7 */
- W(0xffff0000), W(0x004b0000), /* POWER8 */
+ W(0xffff0000), W(0x004b0000), /* POWER8E */
+ W(0xffff0000), W(0x004d0000), /* POWER8 */
W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
@@ -706,7 +707,7 @@ unsigned char ibm_architecture_vec[] = {
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
-#define IBM_ARCH_VEC_NRCORES_OFFSET 117
+#define IBM_ARCH_VEC_NRCORES_OFFSET 125
W(NR_CPUS), /* number of cores supported */
0,
0,
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479802f2..f096e72262f4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
- . = 0;
- reloc_start = .;
-
. = KERNELBASE;
/*
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 3f0c30ae4791..c33d939120c9 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -43,6 +43,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long va;
unsigned int penc;
+ unsigned long sllp;
/*
* We need 14 to 65 bits of va for a tlibe of 4K page
@@ -64,7 +65,9 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
/* clear out bits after (52) [0....52.....63] */
va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
- va |= mmu_psize_defs[apsize].sllp << 6;
+ sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
+ ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
+ va |= sllp << 5;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
@@ -98,6 +101,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long va;
unsigned int penc;
+ unsigned long sllp;
/* VPN_SHIFT can be atmost 12 */
va = vpn << VPN_SHIFT;
@@ -113,7 +117,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
/* clear out bits after(52) [0....52.....63] */
va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
- va |= mmu_psize_defs[apsize].sllp << 6;
+ sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
+ ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
+ va |= sllp << 5;
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
: : "r"(va) : "memory");
break;
@@ -554,6 +560,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
seg_off |= vpi << shift;
}
*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
+ break;
case MMU_SEGSIZE_1T:
/* We only have 40 - 23 bits of seg_off in avpn */
seg_off = (avpn & 0x1ffff) << 23;
@@ -563,6 +570,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
seg_off |= vpi << shift;
}
*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
+ break;
default:
*vpn = size = 0;
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 08397217e8ac..5850798826cd 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -27,6 +27,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <asm/cputhreads.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
@@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void)
}
}
if (changed) {
- cpumask_set_cpu(cpu, changes);
+ cpumask_or(changes, changes, cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
}
}
@@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data)
if (!data)
return -EINVAL;
- cpu = get_cpu();
+ cpu = smp_processor_id();
for (update = data; update; update = update->next) {
if (cpu != update->cpu)
@@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data)
*/
int arch_update_cpu_topology(void)
{
- unsigned int cpu, changed = 0;
+ unsigned int cpu, sibling, changed = 0;
struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
cpumask_t updated_cpus;
struct device *dev;
- int weight, i = 0;
+ int weight, new_nid, i = 0;
weight = cpumask_weight(&cpu_associativity_changes_mask);
if (!weight)
@@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void)
cpumask_clear(&updated_cpus);
for_each_cpu(cpu, &cpu_associativity_changes_mask) {
- ud = &updates[i++];
- ud->cpu = cpu;
- vphn_get_associativity(cpu, associativity);
- ud->new_nid = associativity_to_nid(associativity);
-
- if (ud->new_nid < 0 || !node_online(ud->new_nid))
- ud->new_nid = first_online_node;
+ /*
+ * If siblings aren't flagged for changes, updates list
+ * will be too short. Skip on this update and set for next
+ * update.
+ */
+ if (!cpumask_subset(cpu_sibling_mask(cpu),
+ &cpu_associativity_changes_mask)) {
+ pr_info("Sibling bits not set for associativity "
+ "change, cpu%d\n", cpu);
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- ud->old_nid = numa_cpu_lookup_table[cpu];
- cpumask_set_cpu(cpu, &updated_cpus);
+ /* Use associativity from first thread for all siblings */
+ vphn_get_associativity(cpu, associativity);
+ new_nid = associativity_to_nid(associativity);
+ if (new_nid < 0 || !node_online(new_nid))
+ new_nid = first_online_node;
+
+ if (new_nid == numa_cpu_lookup_table[cpu]) {
+ cpumask_andnot(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- if (i < weight)
- ud->next = &updates[i];
+ for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+ ud = &updates[i++];
+ ud->cpu = sibling;
+ ud->new_nid = new_nid;
+ ud->old_nid = numa_cpu_lookup_table[sibling];
+ cpumask_set_cpu(sibling, &updated_cpus);
+ if (i < weight)
+ ud->next = &updates[i];
+ }
+ cpu = cpu_last_thread_sibling(cpu);
}
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index a3985aee77fe..eeae308cf982 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -484,7 +484,7 @@ static bool is_ebb_event(struct perf_event *event)
* use bit 63 of the event code for something else if they wish.
*/
return (ppmu->flags & PPMU_EBB) &&
- ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1);
+ ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
}
static int ebb_event_check(struct perf_event *event)
@@ -1252,8 +1252,11 @@ nocheck:
ret = 0;
out:
- if (has_branch_stack(event))
+ if (has_branch_stack(event)) {
power_pmu_bhrb_enable(event);
+ cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+ }
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 96a64d6a8bdf..2ee4a707f0df 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -118,7 +118,7 @@
(EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
(EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
(EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
- (EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT) | \
+ (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \
EVENT_PSEL_MASK)
/* MMCRA IFM bits - POWER8 */
@@ -233,10 +233,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
- ebb = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
+ ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
/* Clear the EBB bit in the event, so event checks work below */
- event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT);
+ event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT);
if (pmc) {
if (pmc > 6)
@@ -561,18 +561,13 @@ static int power8_generic_events[] = {
static u64 power8_bhrb_filter_map(u64 branch_sample_type)
{
u64 pmu_bhrb_filter = 0;
- u64 br_privilege = branch_sample_type & ONLY_PLM;
- /* BHRB and regular PMU events share the same prvillege state
+ /* BHRB and regular PMU events share the same privilege state
* filter configuration. BHRB is always recorded along with a
- * regular PMU event. So privilege state filter criteria for BHRB
- * and the companion PMU events has to be the same. As a default
- * "perf record" tool sets all privillege bits ON when no filter
- * criteria is provided in the command line. So as along as all
- * privillege bits are ON or they are OFF, we are good to go.
+ * regular PMU event. As the privilege state filter is handled
+ * in the basic PMC configuration of the accompanying regular
+ * PMU event, we ignore any separate BHRB specific request.
*/
- if ((br_privilege != 7) && (br_privilege != 0))
- return -1;
/* No branch filter requested */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
@@ -621,10 +616,19 @@ static struct power_pmu power8_pmu = {
static int __init init_power8_pmu(void)
{
+ int rc;
+
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
return -ENODEV;
- return register_power_pmu(&power8_pmu);
+ rc = register_power_pmu(&power8_pmu);
+ if (rc)
+ return rc;
+
+ /* Tell userspace that EBB is supported */
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
+
+ return 0;
}
early_initcall(init_power8_pmu);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 969cce73055a..79663d26e6ea 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -114,7 +114,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* the root bridge. So it's not reasonable to continue
* the probing.
*/
- if (!dn || !edev)
+ if (!dn || !edev || edev->pe)
return 0;
/* Skip for PCI-ISA bridge */
@@ -122,8 +122,19 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
return 0;
/* Initialize eeh device */
- edev->class_code = dev->class;
- edev->mode = 0;
+ edev->class_code = dev->class;
+ edev->mode &= 0xFFFFFF00;
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ edev->mode |= EEH_DEV_BRIDGE;
+ if (pci_is_pcie(dev)) {
+ edev->pcie_cap = pci_pcie_cap(dev);
+
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ edev->mode |= EEH_DEV_ROOT_PORT;
+ else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
+ edev->mode |= EEH_DEV_DS_PORT;
+ }
+
edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 49b57b9f835d..d8140b125e62 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1266,7 +1266,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
}
-void pnv_pci_init_ioda2_phb(struct device_node *np)
+void __init pnv_pci_init_ioda2_phb(struct device_node *np)
{
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 1bd3399146ed..62b4f8025de0 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -19,7 +19,6 @@ config PPC_PSERIES
select ZLIB_DEFLATE
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
- select HOTPLUG if SMP
select HOTPLUG_CPU if SMP
default y
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index b456b157d33d..7fbc25b1813f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -133,6 +133,48 @@ static int pseries_eeh_init(void)
return 0;
}
+static int pseries_eeh_cap_start(struct device_node *dn)
+{
+ struct pci_dn *pdn = PCI_DN(dn);
+ u32 status;
+
+ if (!pdn)
+ return 0;
+
+ rtas_read_config(pdn, PCI_STATUS, 2, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ return PCI_CAPABILITY_LIST;
+}
+
+
+static int pseries_eeh_find_cap(struct device_node *dn, int cap)
+{
+ struct pci_dn *pdn = PCI_DN(dn);
+ int pos = pseries_eeh_cap_start(dn);
+ int cnt = 48; /* Maximal number of capabilities */
+ u32 id;
+
+ if (!pos)
+ return 0;
+
+ while (cnt--) {
+ rtas_read_config(pdn, pos, 1, &pos);
+ if (pos < 0x40)
+ break;
+ pos &= ~3;
+ rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos += PCI_CAP_LIST_NEXT;
+ }
+
+ return 0;
+}
+
/**
* pseries_eeh_of_probe - EEH probe on the given device
* @dn: OF node
@@ -146,14 +188,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
{
struct eeh_dev *edev;
struct eeh_pe pe;
+ struct pci_dn *pdn = PCI_DN(dn);
const u32 *class_code, *vendor_id, *device_id;
const u32 *regs;
+ u32 pcie_flags;
int enable = 0;
int ret;
/* Retrieve OF node and eeh device */
edev = of_node_to_eeh_dev(dn);
- if (!of_device_is_available(dn))
+ if (edev->pe || !of_device_is_available(dn))
return NULL;
/* Retrieve class/vendor/device IDs */
@@ -167,9 +211,26 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
if (dn->type && !strcmp(dn->type, "isa"))
return NULL;
- /* Update class code and mode of eeh device */
+ /*
+ * Update class code and mode of eeh device. We need
+ * correctly reflects that current device is root port
+ * or PCIe switch downstream port.
+ */
edev->class_code = *class_code;
- edev->mode = 0;
+ edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
+ edev->mode &= 0xFFFFFF00;
+ if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ edev->mode |= EEH_DEV_BRIDGE;
+ if (edev->pcie_cap) {
+ rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
+ 2, &pcie_flags);
+ pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
+ if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
+ edev->mode |= EEH_DEV_ROOT_PORT;
+ else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
+ edev->mode |= EEH_DEV_DS_PORT;
+ }
+ }
/* Retrieve the device address */
regs = of_get_property(dn, "reg", NULL);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 02d6e21619bb..8bad880bd177 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -146,7 +146,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
flags = 0;
/* Make pHyp happy */
- if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
+ if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
hpte_r &= ~_PAGE_COHERENT;
if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
flags |= H_COALESCE_CAND;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 7b3cbde8c783..721c0586b284 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -287,6 +287,9 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
unsigned long *savep;
struct rtas_error_log *h, *errhdr = NULL;
+ /* Mask top two bits */
+ regs->gpr[3] &= ~(0x3UL << 62);
+
if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
return NULL;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6b499870662f..b0e6435b2f02 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -91,7 +91,15 @@ struct thread_struct {
#endif
};
-#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */
+/* Flag to disable transactions. */
+#define PER_FLAG_NO_TE 1UL
+/* Flag to enable random transaction aborts. */
+#define PER_FLAG_TE_ABORT_RAND 2UL
+/* Flag to specify random transaction abort mode:
+ * - abort each transaction at a random instruction before TEND if set.
+ * - abort random transactions at a random instruction if cleared.
+ */
+#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
typedef struct thread_struct thread_struct;
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index f3a9e0f92704..80b6f11263c4 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -10,7 +10,7 @@
#include <linux/thread_info.h>
extern struct task_struct *__switch_to(void *, void *);
-extern void update_per_regs(struct task_struct *task);
+extern void update_cr_regs(struct task_struct *task);
static inline void save_fp_regs(s390_fp_regs *fpregs)
{
@@ -86,7 +86,7 @@ static inline void restore_access_regs(unsigned int *acrs)
restore_fp_regs(&next->thread.fp_regs); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
- update_per_regs(next); \
+ update_cr_regs(next); \
} \
prev = __switch_to(prev,next); \
} while (0)
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 3aa9f1ec5b29..7a84619e315e 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -400,6 +400,7 @@ typedef struct
#define PTRACE_POKE_SYSTEM_CALL 0x5008
#define PTRACE_ENABLE_TE 0x5009
#define PTRACE_DISABLE_TE 0x5010
+#define PTRACE_TE_ABORT_RAND 0x5011
/*
* PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 64b24650e4f8..dd62071624be 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -173,7 +173,7 @@ error:
}
}
-static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
+static struct cache_dir *cache_create_cache_dir(int cpu)
{
struct cache_dir *cache_dir;
struct kobject *kobj = NULL;
@@ -289,9 +289,8 @@ static struct kobj_type cache_index_type = {
.default_attrs = cache_index_default_attrs,
};
-static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
- struct cache *cache, int index,
- int cpu)
+static int cache_create_index_dir(struct cache_dir *cache_dir,
+ struct cache *cache, int index, int cpu)
{
struct cache_index_dir *index_dir;
int rc;
@@ -313,7 +312,7 @@ out:
return rc;
}
-static int __cpuinit cache_add_cpu(int cpu)
+static int cache_add_cpu(int cpu)
{
struct cache_dir *cache_dir;
struct cache *cache;
@@ -335,7 +334,7 @@ static int __cpuinit cache_add_cpu(int cpu)
return 0;
}
-static void __cpuinit cache_remove_cpu(int cpu)
+static void cache_remove_cpu(int cpu)
{
struct cache_index_dir *index, *next;
struct cache_dir *cache_dir;
@@ -354,8 +353,8 @@ static void __cpuinit cache_remove_cpu(int cpu)
cache_dir_cpu[cpu] = NULL;
}
-static int __cpuinit cache_hotplug(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
{
int cpu = (long)hcpu;
int rc = 0;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f703d91bf720..d8f355657171 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -21,6 +21,48 @@
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
+
+/*
+ * Return physical address for virtual address
+ */
+static inline void *load_real_addr(void *addr)
+{
+ unsigned long real_addr;
+
+ asm volatile(
+ " lra %0,0(%1)\n"
+ " jz 0f\n"
+ " la %0,0\n"
+ "0:"
+ : "=a" (real_addr) : "a" (addr) : "cc");
+ return (void *)real_addr;
+}
+
+/*
+ * Copy up to one page to vmalloc or real memory
+ */
+static ssize_t copy_page_real(void *buf, void *src, size_t csize)
+{
+ size_t size;
+
+ if (is_vmalloc_addr(buf)) {
+ BUG_ON(csize >= PAGE_SIZE);
+ /* If buf is not page aligned, copy first part */
+ size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
+ if (size) {
+ if (memcpy_real(load_real_addr(buf), src, size))
+ return -EFAULT;
+ buf += size;
+ src += size;
+ }
+ /* Copy second part */
+ size = csize - size;
+ return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
+ } else {
+ return memcpy_real(buf, src, csize);
+ }
+}
+
/*
* Copy one page from "oldmem"
*
@@ -32,6 +74,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
unsigned long src;
+ int rc;
if (!csize)
return 0;
@@ -43,11 +86,11 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
src < OLDMEM_BASE + OLDMEM_SIZE)
src -= OLDMEM_BASE;
if (userbuf)
- copy_to_user_real((void __force __user *) buf, (void *) src,
- csize);
+ rc = copy_to_user_real((void __force __user *) buf,
+ (void *) src, csize);
else
- memcpy_real(buf, (void *) src, csize);
- return csize;
+ rc = copy_page_real(buf, (void *) src, csize);
+ return (rc == 0) ? csize : rc;
}
/*
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 390d9ae57bb2..fb99c2057b85 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -639,8 +639,8 @@ static struct pmu cpumf_pmu = {
.cancel_txn = cpumf_pmu_cancel_txn,
};
-static int __cpuinit cpumf_pmu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (long) hcpu;
int flags;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 753c41d0ffd3..24612029f450 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
/*
* cpu_init - initializes state that is per-CPU.
*/
-void __cpuinit cpu_init(void)
+void cpu_init(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
struct cpuid *id = &__get_cpu_var(cpu_id);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a314c57f4e94..e9fadb04e3c6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -47,7 +47,7 @@ enum s390_regset {
REGSET_GENERAL_EXTENDED,
};
-void update_per_regs(struct task_struct *task)
+void update_cr_regs(struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
struct thread_struct *thread = &task->thread;
@@ -56,17 +56,25 @@ void update_per_regs(struct task_struct *task)
#ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
- unsigned long cr0, cr0_new;
+ unsigned long cr[3], cr_new[3];
- __ctl_store(cr0, 0, 0);
- /* set or clear transaction execution bits 8 and 9. */
+ __ctl_store(cr, 0, 2);
+ cr_new[1] = cr[1];
+ /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */
if (task->thread.per_flags & PER_FLAG_NO_TE)
- cr0_new = cr0 & ~(3UL << 54);
+ cr_new[0] = cr[0] & ~(3UL << 54);
else
- cr0_new = cr0 | (3UL << 54);
- /* Only load control register 0 if necessary. */
- if (cr0 != cr0_new)
- __ctl_load(cr0_new, 0, 0);
+ cr_new[0] = cr[0] | (3UL << 54);
+ /* Set or clear transaction execution TDC bits 62 and 63. */
+ cr_new[2] = cr[2] & ~3UL;
+ if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
+ if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
+ cr_new[2] |= 1UL;
+ else
+ cr_new[2] |= 2UL;
+ }
+ if (memcmp(&cr_new, &cr, sizeof(cr)))
+ __ctl_load(cr_new, 0, 2);
}
#endif
/* Copy user specified PER registers */
@@ -100,14 +108,14 @@ void user_enable_single_step(struct task_struct *task)
{
set_tsk_thread_flag(task, TIF_SINGLE_STEP);
if (task == current)
- update_per_regs(task);
+ update_cr_regs(task);
}
void user_disable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
if (task == current)
- update_per_regs(task);
+ update_cr_regs(task);
}
/*
@@ -447,6 +455,26 @@ long arch_ptrace(struct task_struct *child, long request,
if (!MACHINE_HAS_TE)
return -EIO;
child->thread.per_flags |= PER_FLAG_NO_TE;
+ child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
+ return 0;
+ case PTRACE_TE_ABORT_RAND:
+ if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
+ return -EIO;
+ switch (data) {
+ case 0UL:
+ child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
+ break;
+ case 1UL:
+ child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
+ child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
+ break;
+ case 2UL:
+ child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
+ child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
default:
/* Removing high order bit from addr (only for 31 bit). */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 15a016c10563..d386c4e9d2e5 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -165,7 +165,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
pcpu_sigp_retry(pcpu, order, 0);
}
-static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
+static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
struct _lowcore *lc;
@@ -616,10 +616,9 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
return info;
}
-static int __cpuinit smp_add_present_cpu(int cpu);
+static int smp_add_present_cpu(int cpu);
-static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
- int sysfs_add)
+static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
{
struct pcpu *pcpu;
cpumask_t avail;
@@ -685,7 +684,7 @@ static void __init smp_detect_cpus(void)
/*
* Activate a secondary processor.
*/
-static void __cpuinit smp_start_secondary(void *cpuvoid)
+static void smp_start_secondary(void *cpuvoid)
{
S390_lowcore.last_update_clock = get_tod_clock();
S390_lowcore.restart_stack = (unsigned long) restart_stack;
@@ -708,7 +707,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
}
/* Upping and downing of CPUs */
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct pcpu *pcpu;
int rc;
@@ -964,8 +963,8 @@ static struct attribute_group cpu_online_attr_group = {
.attrs = cpu_online_attrs,
};
-static int __cpuinit smp_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
struct cpu *c = &pcpu_devices[cpu].cpu;
@@ -983,7 +982,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
return notifier_from_errno(err);
}
-static int __cpuinit smp_add_present_cpu(int cpu)
+static int smp_add_present_cpu(int cpu)
{
struct cpu *c = &pcpu_devices[cpu].cpu;
struct device *s = &c->dev;
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 62f89d98e880..811f542b8ed4 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -418,7 +418,7 @@ void s390_adjust_jiffies(void)
/*
* calibrate the delay loop
*/
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
{
s390_adjust_jiffies();
/* Print the good old Bogomips line .. */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 3fb09359eda6..9b9c1b78ec67 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -371,14 +371,14 @@ EXPORT_SYMBOL(del_virt_timer);
/*
* Start the virtual CPU timer on the current CPU.
*/
-void __cpuinit init_cpu_vtimer(void)
+void init_cpu_vtimer(void)
{
/* set initial cpu timer */
set_vtimer(VTIMER_MAX_SLICE);
}
-static int __cpuinit s390_nohz_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
struct s390_idle_data *idle;
long cpu = (long) hcpu;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 047c3e4c59a2..f00aefb66a4e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -639,8 +639,8 @@ out:
put_task_struct(tsk);
}
-static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 82f165f8078c..d5f10a43a58f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -9,6 +9,8 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/filter.h>
+#include <linux/random.h>
+#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/facility.h>
@@ -221,6 +223,37 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
EMIT2(0x07fe);
}
+/* Helper to find the offset of pkt_type in sk_buff
+ * Make sure its still a 3bit field starting at the MSBs within a byte.
+ */
+#define PKT_TYPE_MAX 0xe0
+static int pkt_type_offset;
+
+static int __init bpf_pkt_type_offset_init(void)
+{
+ struct sk_buff skb_probe = {
+ .pkt_type = ~0,
+ };
+ char *ct = (char *)&skb_probe;
+ int off;
+
+ pkt_type_offset = -1;
+ for (off = 0; off < sizeof(struct sk_buff); off++) {
+ if (!ct[off])
+ continue;
+ if (ct[off] == PKT_TYPE_MAX)
+ pkt_type_offset = off;
+ else {
+ /* Found non matching bit pattern, fix needed. */
+ WARN_ON_ONCE(1);
+ pkt_type_offset = -1;
+ return -1;
+ }
+ }
+ return 0;
+}
+device_initcall(bpf_pkt_type_offset_init);
+
/*
* make sure we dont leak kernel information to user
*/
@@ -720,6 +753,16 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
EMIT4_DISP(0x88500000, 12);
}
break;
+ case BPF_S_ANC_PKTTYPE:
+ if (pkt_type_offset < 0)
+ goto out;
+ /* lhi %r5,0 */
+ EMIT4(0xa7580000);
+ /* ic %r5,<d(pkt_type_offset)>(%r2) */
+ EMIT4_DISP(0x43502000, pkt_type_offset);
+ /* srl %r5,5 */
+ EMIT4_DISP(0x88500000, 5);
+ break;
case BPF_S_ANC_CPU: /* A = smp_processor_id() */
#ifdef CONFIG_SMP
/* l %r5,<d(cpu_nr)> */
@@ -738,8 +781,41 @@ out:
return -1;
}
+/*
+ * Note: for security reasons, bpf code will follow a randomly
+ * sized amount of illegal instructions.
+ */
+struct bpf_binary_header {
+ unsigned int pages;
+ u8 image[];
+};
+
+static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
+ u8 **image_ptr)
+{
+ struct bpf_binary_header *header;
+ unsigned int sz, hole;
+
+ /* Most BPF filters are really small, but if some of them fill a page,
+ * allow at least 128 extra bytes for illegal instructions.
+ */
+ sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
+ header = module_alloc(sz);
+ if (!header)
+ return NULL;
+ memset(header, 0, sz);
+ header->pages = sz / PAGE_SIZE;
+ hole = sz - bpfsize + sizeof(*header);
+ /* Insert random number of illegal instructions before BPF code
+ * and make sure the first instruction starts at an even address.
+ */
+ *image_ptr = &header->image[(prandom_u32() % hole) & -2];
+ return header;
+}
+
void bpf_jit_compile(struct sk_filter *fp)
{
+ struct bpf_binary_header *header = NULL;
unsigned long size, prg_len, lit_len;
struct bpf_jit jit, cjit;
unsigned int *addrs;
@@ -772,12 +848,11 @@ void bpf_jit_compile(struct sk_filter *fp)
} else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
prg_len = jit.prg - jit.start;
lit_len = jit.lit - jit.mid;
- size = max_t(unsigned long, prg_len + lit_len,
- sizeof(struct work_struct));
+ size = prg_len + lit_len;
if (size >= BPF_SIZE_MAX)
goto out;
- jit.start = module_alloc(size);
- if (!jit.start)
+ header = bpf_alloc_binary(size, &jit.start);
+ if (!header)
goto out;
jit.prg = jit.mid = jit.start + prg_len;
jit.lit = jit.end = jit.start + prg_len + lit_len;
@@ -788,37 +863,25 @@ void bpf_jit_compile(struct sk_filter *fp)
cjit = jit;
}
if (bpf_jit_enable > 1) {
- pr_err("flen=%d proglen=%lu pass=%d image=%p\n",
- fp->len, jit.end - jit.start, pass, jit.start);
- if (jit.start) {
- printk(KERN_ERR "JIT code:\n");
+ bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
+ if (jit.start)
print_fn_code(jit.start, jit.mid - jit.start);
- print_hex_dump(KERN_ERR, "JIT literals:\n",
- DUMP_PREFIX_ADDRESS, 16, 1,
- jit.mid, jit.end - jit.mid, false);
- }
}
- if (jit.start)
+ if (jit.start) {
+ set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *) jit.start;
+ }
out:
kfree(addrs);
}
-static void jit_free_defer(struct work_struct *arg)
-{
- module_free(NULL, arg);
-}
-
-/* run from softirq, we must use a work_struct to call
- * module_free() from process context
- */
void bpf_jit_free(struct sk_filter *fp)
{
- struct work_struct *work;
+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+ struct bpf_binary_header *header = (void *)addr;
if (fp->bpf_func == sk_run_filter)
return;
- work = (struct work_struct *)fp->bpf_func;
- INIT_WORK(work, jit_free_defer);
- schedule_work(work);
+ set_memory_rw(addr, header->pages);
+ module_free(NULL, header);
}
diff --git a/arch/score/mm/tlb-score.c b/arch/score/mm/tlb-score.c
index 6fdb100244c8..004073717de0 100644
--- a/arch/score/mm/tlb-score.c
+++ b/arch/score/mm/tlb-score.c
@@ -240,7 +240,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
local_irq_restore(flags);
}
-void __cpuinit tlb_init(void)
+void tlb_init(void)
{
tlblock_set(0);
local_flush_tlb_all();
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 2051821724c6..0cf4097b71e8 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -22,7 +22,7 @@ CONFIG_PREEMPT=y
CONFIG_CMDLINE_OVERWRITE=y
CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs"
CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 61a07dafcd46..ecf83cd158dc 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -43,9 +43,9 @@
* peripherals (nofpu, nodsp, and so forth).
*/
#define onchip_setup(x) \
-static int x##_disabled __cpuinitdata = !cpu_has_##x; \
+static int x##_disabled = !cpu_has_##x; \
\
-static int __cpuinit x##_setup(char *opts) \
+static int x##_setup(char *opts) \
{ \
x##_disabled = 1; \
return 1; \
@@ -59,7 +59,7 @@ onchip_setup(dsp);
#define CPUOPM 0xff2f0000
#define CPUOPM_RABD (1 << 5)
-static void __cpuinit speculative_execution_init(void)
+static void speculative_execution_init(void)
{
/* Clear RABD */
__raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
@@ -78,7 +78,7 @@ static void __cpuinit speculative_execution_init(void)
#define EXPMASK_BRDSSLP (1 << 1)
#define EXPMASK_MMCAW (1 << 4)
-static void __cpuinit expmask_init(void)
+static void expmask_init(void)
{
unsigned long expmask = __raw_readl(EXPMASK);
@@ -217,7 +217,7 @@ static void detect_cache_shape(void)
l2_cache_shape = -1; /* No S-cache */
}
-static void __cpuinit fpu_init(void)
+static void fpu_init(void)
{
/* Disable the FPU */
if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
@@ -230,7 +230,7 @@ static void __cpuinit fpu_init(void)
}
#ifdef CONFIG_SH_DSP
-static void __cpuinit release_dsp(void)
+static void release_dsp(void)
{
unsigned long sr;
@@ -244,7 +244,7 @@ static void __cpuinit release_dsp(void)
);
}
-static void __cpuinit dsp_init(void)
+static void dsp_init(void)
{
unsigned long sr;
@@ -276,7 +276,7 @@ static void __cpuinit dsp_init(void)
release_dsp();
}
#else
-static inline void __cpuinit dsp_init(void) { }
+static inline void dsp_init(void) { }
#endif /* CONFIG_SH_DSP */
/**
@@ -295,7 +295,7 @@ static inline void __cpuinit dsp_init(void) { }
* Each processor family is still responsible for doing its own probing
* and cache configuration in cpu_probe().
*/
-asmlinkage void __cpuinit cpu_init(void)
+asmlinkage void cpu_init(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index bab8e75958ae..6c687ae812ef 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -13,7 +13,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-void __cpuinit cpu_probe(void)
+void cpu_probe(void)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
boot_cpu_data.type = CPU_SH7619;
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 5170b6aa4129..3f87971082f1 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -13,7 +13,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-void __cpuinit cpu_probe(void)
+void cpu_probe(void)
{
boot_cpu_data.family = CPU_FAMILY_SH2A;
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index bf23c322e164..426e1e1dcedc 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
#include <asm/cache.h>
#include <asm/io.h>
-void __cpuinit cpu_probe(void)
+void cpu_probe(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 0fbbd50bc8ad..a521bcf50695 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -15,7 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-void __cpuinit cpu_probe(void)
+void cpu_probe(void)
{
unsigned long pvr, prr, cvr;
unsigned long size;
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 03f2b55757cf..4a298808789c 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -124,7 +124,7 @@ static void shx3_update_boot_vector(unsigned int cpu)
__raw_writel(STBCR_RESET, STBCR_REG(cpu));
}
-static int __cpuinit
+static int
shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)hcpu;
@@ -143,11 +143,11 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
+static struct notifier_block shx3_cpu_notifier = {
.notifier_call = shx3_cpu_callback,
};
-static int __cpuinit register_shx3_cpu_notifier(void)
+static int register_shx3_cpu_notifier(void)
{
register_hotcpu_notifier(&shx3_cpu_notifier);
return 0;
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 9e882409e4e9..eca427c2f2f3 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -17,7 +17,7 @@
#include <asm/cache.h>
#include <asm/tlb.h>
-void __cpuinit cpu_probe(void)
+void cpu_probe(void)
{
unsigned long long cir;
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 068b8a2759b5..b9cefebda55c 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -367,7 +367,7 @@ static void sh_pmu_setup(int cpu)
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
}
-static int __cpuinit
+static int
sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
@@ -384,7 +384,7 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
+int register_sh_pmu(struct sh_pmu *_pmu)
{
if (sh_pmu)
return -EBUSY;
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 055d91b70305..53bc6c4c84ec 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -65,7 +65,7 @@ void arch_task_cache_init(void)
# define HAVE_SOFTFP 0
#endif
-void __cpuinit init_thread_xstate(void)
+void init_thread_xstate(void)
{
if (boot_cpu_data.flags & CPU_HAS_FPU)
xstate_size = sizeof(struct sh_fpu_hard_struct);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index ebe7a7d97215..1cf90e947dbf 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -172,7 +172,7 @@ disable:
#endif
}
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
{
struct clk *clk = clk_get(NULL, "cpu_clk");
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 45696451f0ea..86a7936a980b 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -37,7 +37,7 @@ struct plat_smp_ops *mp_ops = NULL;
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
-void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
+void register_smp_ops(struct plat_smp_ops *ops)
{
if (mp_ops)
printk(KERN_WARNING "Overriding previously set SMP ops\n");
@@ -45,7 +45,7 @@ void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
mp_ops = ops;
}
-static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
+static inline void smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
@@ -174,7 +174,7 @@ void native_play_dead(void)
}
#endif
-asmlinkage void __cpuinit start_secondary(void)
+asmlinkage void start_secondary(void)
{
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
@@ -215,7 +215,7 @@ extern struct {
void *thread_info;
} stack_start;
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tsk)
+int __cpu_up(unsigned int cpu, struct task_struct *tsk)
{
unsigned long timeout;
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 5f513a64dedf..68e99f09171d 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -741,7 +741,7 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
die_if_kernel("exception", regs, ex);
}
-void __cpuinit per_cpu_trap_init(void)
+void per_cpu_trap_init(void)
{
extern void *vbr_base;
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index f87d20da1791..112ea11c030d 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -810,7 +810,7 @@ asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}
-void __cpuinit per_cpu_trap_init(void)
+void per_cpu_trap_init(void)
{
/* Nothing to do for now, VBR initialization later. */
}
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index ff1c40a31cbc..e4bb2a8e0a69 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -17,7 +17,7 @@
/**
* sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
*/
-int __cpuinit sh64_tlb_init(void)
+int sh64_tlb_init(void)
{
/* Assign some sane DTLB defaults */
cpu_data->dtlb.entries = 64;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 11d460f6f9cc..62d6b153ffa2 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -528,10 +528,8 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
}
}
-static int __cpuinit dr_cpu_configure(struct ds_info *dp,
- struct ds_cap_state *cp,
- u64 req_num,
- cpumask_t *mask)
+static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp,
+ u64 req_num, cpumask_t *mask)
{
struct ds_data *resp;
int resp_len, ncpus, cpu;
@@ -627,9 +625,8 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
return 0;
}
-static void __cpuinit dr_cpu_data(struct ds_info *dp,
- struct ds_cap_state *cp,
- void *buf, int len)
+static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf,
+ int len)
{
struct ds_data *data = buf;
struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index cc3c5cb47cda..9c179fbfb219 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -250,7 +250,7 @@ extern struct ino_bucket *ivector_table;
extern unsigned long ivector_table_pa;
extern void init_irqwork_curcpu(void);
-extern void __cpuinit sun4v_register_mondo_queues(int this_cpu);
+extern void sun4v_register_mondo_queues(int this_cpu);
#endif /* CONFIG_SPARC32 */
#endif /* _ENTRY_H */
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index 605c960b2fa6..4eb1a5a1d544 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -16,7 +16,6 @@
#include <asm/asi.h>
#include <asm/pil.h>
- __CPUINIT
.align 8
.globl hv_cpu_startup, hv_cpu_startup_end
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 9bcbbe2c4e7e..d4840cec2c55 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -835,7 +835,8 @@ void notrace init_irqwork_curcpu(void)
* Therefore you cannot make any OBP calls, not even prom_printf,
* from these two routines.
*/
-static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
+static void notrace register_one_mondo(unsigned long paddr, unsigned long type,
+ unsigned long qmask)
{
unsigned long num_entries = (qmask + 1) / 64;
unsigned long status;
@@ -848,7 +849,7 @@ static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned l
}
}
-void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
+void notrace sun4v_register_mondo_queues(int this_cpu)
{
struct trap_per_cpu *tb = &trap_block[this_cpu];
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index d7aa524b7283..6edf955f987c 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -54,7 +54,7 @@ extern ctxd_t *srmmu_ctx_table_phys;
static int smp_processors_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
-void __cpuinit leon_configure_cache_smp(void);
+void leon_configure_cache_smp(void);
static void leon_ipi_init(void);
/* IRQ number of LEON IPIs */
@@ -69,12 +69,12 @@ static inline unsigned long do_swap(volatile unsigned long *ptr,
return val;
}
-void __cpuinit leon_cpu_pre_starting(void *arg)
+void leon_cpu_pre_starting(void *arg)
{
leon_configure_cache_smp();
}
-void __cpuinit leon_cpu_pre_online(void *arg)
+void leon_cpu_pre_online(void *arg)
{
int cpuid = hard_smp_processor_id();
@@ -106,7 +106,7 @@ void __cpuinit leon_cpu_pre_online(void *arg)
extern struct linux_prom_registers smp_penguin_ctable;
-void __cpuinit leon_configure_cache_smp(void)
+void leon_configure_cache_smp(void)
{
unsigned long cfg = sparc_leon3_get_dcachecfg();
int me = smp_processor_id();
@@ -186,7 +186,7 @@ void __init leon_boot_cpus(void)
}
-int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle)
+int leon_boot_one_cpu(int i, struct task_struct *idle)
{
int timeout;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 831c001604e8..b90bf23e3aab 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -571,9 +571,7 @@ static void __init report_platform_properties(void)
mdesc_release(hp);
}
-static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
- struct mdesc_handle *hp,
- u64 mp)
+static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
{
const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
@@ -616,7 +614,7 @@ static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
}
}
-static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
+static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
{
u64 a;
@@ -649,7 +647,7 @@ static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id
}
}
-static void __cpuinit set_core_ids(struct mdesc_handle *hp)
+static void set_core_ids(struct mdesc_handle *hp)
{
int idx;
u64 mp;
@@ -674,7 +672,7 @@ static void __cpuinit set_core_ids(struct mdesc_handle *hp)
}
}
-static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
+static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
{
u64 a;
@@ -693,7 +691,7 @@ static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id
}
}
-static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
+static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
{
int idx;
u64 mp;
@@ -714,14 +712,14 @@ static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_u
}
}
-static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
+static void set_proc_ids(struct mdesc_handle *hp)
{
__set_proc_ids(hp, "exec_unit");
__set_proc_ids(hp, "exec-unit");
}
-static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
- unsigned long def, unsigned long max)
+static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
+ unsigned long def, unsigned long max)
{
u64 val;
@@ -742,8 +740,8 @@ use_default:
*mask = ((1U << def) * 64U) - 1U;
}
-static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
- struct trap_per_cpu *tb)
+static void get_mondo_data(struct mdesc_handle *hp, u64 mp,
+ struct trap_per_cpu *tb)
{
static int printed;
const u64 *val;
@@ -769,7 +767,7 @@ static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
}
}
-static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
+static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
{
struct mdesc_handle *hp = mdesc_grab();
void *ret = NULL;
@@ -799,7 +797,8 @@ out:
return ret;
}
-static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
+ void *arg)
{
ncpus_probed++;
#ifdef CONFIG_SMP
@@ -808,7 +807,7 @@ static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpui
return NULL;
}
-void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
+void mdesc_populate_present_mask(cpumask_t *mask)
{
if (tlb_type != hypervisor)
return;
@@ -841,7 +840,8 @@ void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
}
-static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
+ void *arg)
{
const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
struct trap_per_cpu *tb;
@@ -890,7 +890,7 @@ static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpu
return NULL;
}
-void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
+void mdesc_fill_in_cpu_data(cpumask_t *mask)
{
struct mdesc_handle *hp;
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index e3f2b81c23f1..a102bfba6ea8 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -39,7 +39,7 @@
#include "kernel.h"
#include "irq.h"
-volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
+volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
@@ -53,7 +53,7 @@ const struct sparc32_ipi_ops *sparc32_ipi_ops;
* instruction which is much better...
*/
-void __cpuinit smp_store_cpu_info(int id)
+void smp_store_cpu_info(int id)
{
int cpu_node;
int mid;
@@ -120,7 +120,7 @@ void cpu_panic(void)
panic("SMP bolixed\n");
}
-struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
+struct linux_prom_registers smp_penguin_ctable = { 0 };
void smp_send_reschedule(int cpu)
{
@@ -259,10 +259,10 @@ void __init smp_prepare_boot_cpu(void)
set_cpu_possible(cpuid, true);
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- extern int __cpuinit smp4m_boot_one_cpu(int, struct task_struct *);
- extern int __cpuinit smp4d_boot_one_cpu(int, struct task_struct *);
+ extern int smp4m_boot_one_cpu(int, struct task_struct *);
+ extern int smp4d_boot_one_cpu(int, struct task_struct *);
int ret=0;
switch(sparc_cpu_model) {
@@ -297,7 +297,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
return ret;
}
-void __cpuinit arch_cpu_pre_starting(void *arg)
+void arch_cpu_pre_starting(void *arg)
{
local_ops->cache_all();
local_ops->tlb_all();
@@ -317,7 +317,7 @@ void __cpuinit arch_cpu_pre_starting(void *arg)
}
}
-void __cpuinit arch_cpu_pre_online(void *arg)
+void arch_cpu_pre_online(void *arg)
{
unsigned int cpuid = hard_smp_processor_id();
@@ -344,7 +344,7 @@ void __cpuinit arch_cpu_pre_online(void *arg)
}
}
-void __cpuinit sparc_start_secondary(void *arg)
+void sparc_start_secondary(void *arg)
{
unsigned int cpu;
@@ -375,7 +375,7 @@ void __cpuinit sparc_start_secondary(void *arg)
BUG();
}
-void __cpuinit smp_callin(void)
+void smp_callin(void)
{
sparc_start_secondary(NULL);
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 77539eda928c..e142545244f2 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -87,7 +87,7 @@ extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
-void __cpuinit smp_callin(void)
+void smp_callin(void)
{
int cpuid = hard_smp_processor_id();
@@ -281,7 +281,8 @@ static unsigned long kimage_addr_to_ra(void *p)
return kern_base + (val - KERNBASE);
}
-static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
+static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
+ void **descrp)
{
extern unsigned long sparc64_ttable_tl0;
extern unsigned long kern_locked_tte_data;
@@ -342,7 +343,7 @@ extern unsigned long sparc64_cpu_startup;
*/
static struct thread_info *cpu_new_thread = NULL;
-static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
+static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
{
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
@@ -1266,7 +1267,7 @@ void smp_fill_in_sib_core_maps(void)
}
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = smp_boot_one_cpu(cpu, tidle);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index c9eb82f23d92..d5c319553fd0 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -50,7 +50,7 @@ static inline void show_leds(int cpuid)
"i" (ASI_M_CTL));
}
-void __cpuinit sun4d_cpu_pre_starting(void *arg)
+void sun4d_cpu_pre_starting(void *arg)
{
int cpuid = hard_smp_processor_id();
@@ -62,7 +62,7 @@ void __cpuinit sun4d_cpu_pre_starting(void *arg)
cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
}
-void __cpuinit sun4d_cpu_pre_online(void *arg)
+void sun4d_cpu_pre_online(void *arg)
{
unsigned long flags;
int cpuid;
@@ -118,7 +118,7 @@ void __init smp4d_boot_cpus(void)
local_ops->cache_all();
}
-int __cpuinit smp4d_boot_one_cpu(int i, struct task_struct *idle)
+int smp4d_boot_one_cpu(int i, struct task_struct *idle)
{
unsigned long *entry = &sun4d_cpu_startup;
int timeout;
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 8a65f158153d..d3408e72d20c 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -34,11 +34,11 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
return val;
}
-void __cpuinit sun4m_cpu_pre_starting(void *arg)
+void sun4m_cpu_pre_starting(void *arg)
{
}
-void __cpuinit sun4m_cpu_pre_online(void *arg)
+void sun4m_cpu_pre_online(void *arg)
{
int cpuid = hard_smp_processor_id();
@@ -75,7 +75,7 @@ void __init smp4m_boot_cpus(void)
local_ops->cache_all();
}
-int __cpuinit smp4m_boot_one_cpu(int i, struct task_struct *idle)
+int smp4m_boot_one_cpu(int i, struct task_struct *idle)
{
unsigned long *entry = &sun4m_cpu_startup;
int timeout;
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 654e8aad3bbe..c21c673e5f7c 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -246,7 +246,7 @@ static void unregister_cpu_online(unsigned int cpu)
}
#endif
-static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
+static int sysfs_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
+static struct notifier_block sysfs_cpu_nb = {
.notifier_call = sysfs_cpu_notify,
};
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 6cdb08cdabf0..76dcbd3c988a 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -18,7 +18,6 @@
.globl sun4m_cpu_startup
.globl sun4d_cpu_startup
- __CPUINIT
.align 4
/* When we start up a cpu for the first time it enters this routine.
@@ -94,7 +93,6 @@ smp_panic:
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000
- __CPUINIT
.align 4
sun4d_cpu_startup:
@@ -146,7 +144,6 @@ sun4d_cpu_startup:
b,a smp_panic
- __CPUINIT
.align 4
.global leon_smp_cpu_startup, smp_penguin_ctable
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index 2e973a26fbda..e0b1e13a0736 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -32,13 +32,11 @@ itlb_load:
dtlb_load:
.asciz "SUNW,dtlb-load"
- /* XXX __cpuinit this thing XXX */
#define TRAMP_STACK_SIZE 1024
.align 16
tramp_stack:
.skip TRAMP_STACK_SIZE
- __CPUINIT
.align 8
.globl sparc64_cpu_startup, sparc64_cpu_startup_end
sparc64_cpu_startup:
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index a9c42a7ffb6a..ed82edad1a39 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1694,7 +1694,7 @@ static void __init sun4v_ktsb_init(void)
#endif
}
-void __cpuinit sun4v_ktsb_register(void)
+void sun4v_ktsb_register(void)
{
unsigned long pa, ret;
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 036c2797dece..5d721df48a72 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -858,7 +858,7 @@ static void __init map_kernel(void)
}
}
-void (*poke_srmmu)(void) __cpuinitdata = NULL;
+void (*poke_srmmu)(void) = NULL;
extern unsigned long bootmem_init(unsigned long *pages_avail);
@@ -1055,7 +1055,7 @@ static void __init init_vac_layout(void)
(int)vac_cache_size, (int)vac_line_size);
}
-static void __cpuinit poke_hypersparc(void)
+static void poke_hypersparc(void)
{
volatile unsigned long clear;
unsigned long mreg = srmmu_get_mmureg();
@@ -1107,7 +1107,7 @@ static void __init init_hypersparc(void)
hypersparc_setup_blockops();
}
-static void __cpuinit poke_swift(void)
+static void poke_swift(void)
{
unsigned long mreg;
@@ -1287,7 +1287,7 @@ static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long
}
-static void __cpuinit poke_turbosparc(void)
+static void poke_turbosparc(void)
{
unsigned long mreg = srmmu_get_mmureg();
unsigned long ccreg;
@@ -1350,7 +1350,7 @@ static void __init init_turbosparc(void)
poke_srmmu = poke_turbosparc;
}
-static void __cpuinit poke_tsunami(void)
+static void poke_tsunami(void)
{
unsigned long mreg = srmmu_get_mmureg();
@@ -1391,7 +1391,7 @@ static void __init init_tsunami(void)
tsunami_setup_blockops();
}
-static void __cpuinit poke_viking(void)
+static void poke_viking(void)
{
unsigned long mreg = srmmu_get_mmureg();
static int smp_catch;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 02e628065012..3ccf2cd7182e 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -220,7 +220,7 @@ void __init init_IRQ(void)
ipi_init();
}
-void __cpuinit setup_irq_regs(void)
+void setup_irq_regs(void)
{
/* Enable interrupt delivery. */
unmask_irqs(~0UL);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 0858ee6b520f..00331af9525d 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -25,7 +25,7 @@
/* All messages are stored here */
static DEFINE_PER_CPU(HV_MsgState, msg_state);
-void __cpuinit init_messaging(void)
+void init_messaging(void)
{
/* Allocate storage for messages in kernel space */
HV_MsgState *state = &__get_cpu_var(msg_state);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 68b542677f6a..eceb8344280f 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -58,8 +58,8 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
/* Information on the NUMA nodes that we compute early */
-unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
-unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
+unsigned long node_start_pfn[MAX_NUMNODES];
+unsigned long node_end_pfn[MAX_NUMNODES];
unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
unsigned long __initdata node_free_pfn[MAX_NUMNODES];
@@ -84,7 +84,7 @@ unsigned long __initdata boot_pc = (unsigned long)start_kernel;
#ifdef CONFIG_HIGHMEM
/* Page frame index of end of lowmem on each controller. */
-unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
+unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
/* Number of pages that can be mapped into lowmem. */
static unsigned long __initdata mappable_physpages;
@@ -290,7 +290,7 @@ static void *__init setup_pa_va_mapping(void)
* This is up to 4 mappings for lowmem, one mapping per memory
* controller, plus one for our text segment.
*/
-static void __cpuinit store_permanent_mappings(void)
+static void store_permanent_mappings(void)
{
int i;
@@ -935,7 +935,7 @@ subsys_initcall(topology_init);
* So the values we set up here in the hypervisor may be overridden on
* the boot cpu as arguments are parsed.
*/
-static __cpuinit void init_super_pages(void)
+static void init_super_pages(void)
{
#ifdef CONFIG_HUGETLB_SUPER_PAGES
int i;
@@ -950,7 +950,7 @@ static __cpuinit void init_super_pages(void)
*
* Called from setup_arch() on the boot cpu, or online_secondary().
*/
-void __cpuinit setup_cpu(int boot)
+void setup_cpu(int boot)
{
/* The boot cpu sets up its permanent mappings much earlier. */
if (!boot)
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 44bab29bf2f3..a535655b7089 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -133,14 +133,14 @@ static __init int reset_init_affinity(void)
}
late_initcall(reset_init_affinity);
-static struct cpumask cpu_started __cpuinitdata;
+static struct cpumask cpu_started;
/*
* Activate a secondary processor. Very minimal; don't add anything
* to this path without knowing what you're doing, since SMP booting
* is pretty fragile.
*/
-static void __cpuinit start_secondary(void)
+static void start_secondary(void)
{
int cpuid = smp_processor_id();
@@ -183,7 +183,7 @@ static void __cpuinit start_secondary(void)
/*
* Bring a secondary processor online.
*/
-void __cpuinit online_secondary(void)
+void online_secondary(void)
{
/*
* low-memory mappings have been cleared, flush them from
@@ -210,7 +210,7 @@ void __cpuinit online_secondary(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
/* Wait 5s total for all CPUs for them to come online */
static int timeout;
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 5ac397ec6986..7c353d8c2da9 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -159,7 +159,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
.set_mode = tile_timer_set_mode,
};
-void __cpuinit setup_tile_timer(void)
+void setup_tile_timer(void)
{
struct clock_event_device *evt = &__get_cpu_var(tile_timer);
diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h
index e584e40ee832..f2ca5702a4e2 100644
--- a/arch/um/include/shared/frame_kern.h
+++ b/arch/um/include/shared/frame_kern.h
@@ -6,13 +6,13 @@
#ifndef __FRAME_KERN_H_
#define __FRAME_KERN_H_
-extern int setup_signal_stack_sc(unsigned long stack_top, int sig,
+extern int setup_signal_stack_sc(unsigned long stack_top, int sig,
struct k_sigaction *ka,
- struct pt_regs *regs,
+ struct pt_regs *regs,
sigset_t *mask);
-extern int setup_signal_stack_si(unsigned long stack_top, int sig,
+extern int setup_signal_stack_si(unsigned long stack_top, int sig,
struct k_sigaction *ka,
- struct pt_regs *regs, siginfo_t *info,
+ struct pt_regs *regs, struct siginfo *info,
sigset_t *mask);
#endif
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 3e831b3fd07b..f57e02e7910f 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -19,7 +19,7 @@ EXPORT_SYMBOL(unblock_signals);
* OK, we're invoking a handler
*/
static void handle_signal(struct pt_regs *regs, unsigned long signr,
- struct k_sigaction *ka, siginfo_t *info)
+ struct k_sigaction *ka, struct siginfo *info)
{
sigset_t *oldset = sigmask_to_save();
int singlestep = 0;
@@ -71,7 +71,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
static int kern_do_signal(struct pt_regs *regs)
{
struct k_sigaction ka_copy;
- siginfo_t info;
+ struct siginfo info;
int sig, handled_sig = 0;
while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index ff03067a3b14..007d5503f49b 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -123,7 +123,7 @@ void uml_setup_stubs(struct mm_struct *mm)
/* dup_mmap already holds mmap_sem */
err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
VM_READ | VM_MAYREAD | VM_EXEC |
- VM_MAYEXEC | VM_DONTCOPY,
+ VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
mm->context.stub_pages);
if (err) {
printk(KERN_ERR "install_special_mapping returned %d\n", err);
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 1d3e0c17340b..4ffb644d6c07 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -254,6 +254,6 @@ int strnlen_user(const void __user *str, int len)
n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
if (n == 0)
return count + 1;
- return -EFAULT;
+ return 0;
}
EXPORT_SYMBOL(strnlen_user);
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index ba4398056fe9..3c4af77e51a2 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -53,6 +53,25 @@ static void __init find_tempdir(void)
}
/*
+ * Remove bytes from the front of the buffer and refill it so that if there's a
+ * partial string that we care about, it will be completed, and we can recognize
+ * it.
+ */
+static int pop(int fd, char *buf, size_t size, size_t npop)
+{
+ ssize_t n;
+ size_t len = strlen(&buf[npop]);
+
+ memmove(buf, &buf[npop], len + 1);
+ n = read(fd, &buf[len], size - len - 1);
+ if (n < 0)
+ return -errno;
+
+ buf[len + n] = '\0';
+ return 1;
+}
+
+/*
* This will return 1, with the first character in buf being the
* character following the next instance of c in the file. This will
* read the file as needed. If there's an error, -errno is returned;
@@ -61,7 +80,6 @@ static void __init find_tempdir(void)
static int next(int fd, char *buf, size_t size, char c)
{
ssize_t n;
- size_t len;
char *ptr;
while ((ptr = strchr(buf, c)) == NULL) {
@@ -74,20 +92,129 @@ static int next(int fd, char *buf, size_t size, char c)
buf[n] = '\0';
}
- ptr++;
- len = strlen(ptr);
- memmove(buf, ptr, len + 1);
+ return pop(fd, buf, size, ptr - buf + 1);
+}
+
+/*
+ * Decode an octal-escaped and space-terminated path of the form used by
+ * /proc/mounts. May be used to decode a path in-place. "out" must be at least
+ * as large as the input. The output is always null-terminated. "len" gets the
+ * length of the output, excluding the trailing null. Returns 0 if a full path
+ * was successfully decoded, otherwise an error.
+ */
+static int decode_path(const char *in, char *out, size_t *len)
+{
+ char *first = out;
+ int c;
+ int i;
+ int ret = -EINVAL;
+ while (1) {
+ switch (*in) {
+ case '\0':
+ goto out;
+
+ case ' ':
+ ret = 0;
+ goto out;
+
+ case '\\':
+ in++;
+ c = 0;
+ for (i = 0; i < 3; i++) {
+ if (*in < '0' || *in > '7')
+ goto out;
+ c = (c << 3) | (*in++ - '0');
+ }
+ *(unsigned char *)out++ = (unsigned char) c;
+ break;
+
+ default:
+ *out++ = *in++;
+ break;
+ }
+ }
+
+out:
+ *out = '\0';
+ *len = out - first;
+ return ret;
+}
+
+/*
+ * Computes the length of s when encoded with three-digit octal escape sequences
+ * for the characters in chars.
+ */
+static size_t octal_encoded_length(const char *s, const char *chars)
+{
+ size_t len = strlen(s);
+ while ((s = strpbrk(s, chars)) != NULL) {
+ len += 3;
+ s++;
+ }
+
+ return len;
+}
+
+enum {
+ OUTCOME_NOTHING_MOUNTED,
+ OUTCOME_TMPFS_MOUNT,
+ OUTCOME_NON_TMPFS_MOUNT,
+};
+
+/* Read a line of /proc/mounts data looking for a tmpfs mount at "path". */
+static int read_mount(int fd, char *buf, size_t bufsize, const char *path,
+ int *outcome)
+{
+ int found;
+ int match;
+ char *space;
+ size_t len;
+
+ enum {
+ MATCH_NONE,
+ MATCH_EXACT,
+ MATCH_PARENT,
+ };
+
+ found = next(fd, buf, bufsize, ' ');
+ if (found != 1)
+ return found;
/*
- * Refill the buffer so that if there's a partial string that we care
- * about, it will be completed, and we can recognize it.
+ * If there's no following space in the buffer, then this path is
+ * truncated, so it can't be the one we're looking for.
*/
- n = read(fd, &buf[len], size - len - 1);
- if (n < 0)
- return -errno;
+ space = strchr(buf, ' ');
+ if (space) {
+ match = MATCH_NONE;
+ if (!decode_path(buf, buf, &len)) {
+ if (!strcmp(buf, path))
+ match = MATCH_EXACT;
+ else if (!strncmp(buf, path, len)
+ && (path[len] == '/' || !strcmp(buf, "/")))
+ match = MATCH_PARENT;
+ }
+
+ found = pop(fd, buf, bufsize, space - buf + 1);
+ if (found != 1)
+ return found;
+
+ switch (match) {
+ case MATCH_EXACT:
+ if (!strncmp(buf, "tmpfs", strlen("tmpfs")))
+ *outcome = OUTCOME_TMPFS_MOUNT;
+ else
+ *outcome = OUTCOME_NON_TMPFS_MOUNT;
+ break;
- buf[len + n] = '\0';
- return 1;
+ case MATCH_PARENT:
+ /* This mount obscures any previous ones. */
+ *outcome = OUTCOME_NOTHING_MOUNTED;
+ break;
+ }
+ }
+
+ return next(fd, buf, bufsize, '\n');
}
/* which_tmpdir is called only during early boot */
@@ -106,8 +233,12 @@ static int checked_tmpdir = 0;
*/
static void which_tmpdir(void)
{
- int fd, found;
- char buf[128] = { '\0' };
+ int fd;
+ int found;
+ int outcome;
+ char *path;
+ char *buf;
+ size_t bufsize;
if (checked_tmpdir)
return;
@@ -116,49 +247,66 @@ static void which_tmpdir(void)
printf("Checking for tmpfs mount on /dev/shm...");
+ path = realpath("/dev/shm", NULL);
+ if (!path) {
+ printf("failed to check real path, errno = %d\n", errno);
+ return;
+ }
+ printf("%s...", path);
+
+ /*
+ * The buffer needs to be able to fit the full octal-escaped path, a
+ * space, and a trailing null in order to successfully decode it.
+ */
+ bufsize = octal_encoded_length(path, " \t\n\\") + 2;
+
+ if (bufsize < 128)
+ bufsize = 128;
+
+ buf = malloc(bufsize);
+ if (!buf) {
+ printf("malloc failed, errno = %d\n", errno);
+ goto out;
+ }
+ buf[0] = '\0';
+
fd = open("/proc/mounts", O_RDONLY);
if (fd < 0) {
printf("failed to open /proc/mounts, errno = %d\n", errno);
- return;
+ goto out1;
}
+ outcome = OUTCOME_NOTHING_MOUNTED;
while (1) {
- found = next(fd, buf, ARRAY_SIZE(buf), ' ');
- if (found != 1)
- break;
-
- if (!strncmp(buf, "/dev/shm", strlen("/dev/shm")))
- goto found;
-
- found = next(fd, buf, ARRAY_SIZE(buf), '\n');
+ found = read_mount(fd, buf, bufsize, path, &outcome);
if (found != 1)
break;
}
-err:
- if (found == 0)
- printf("nothing mounted on /dev/shm\n");
- else if (found < 0)
+ if (found < 0) {
printf("read returned errno %d\n", -found);
+ } else {
+ switch (outcome) {
+ case OUTCOME_TMPFS_MOUNT:
+ printf("OK\n");
+ default_tmpdir = "/dev/shm";
+ break;
-out:
- close(fd);
-
- return;
-
-found:
- found = next(fd, buf, ARRAY_SIZE(buf), ' ');
- if (found != 1)
- goto err;
+ case OUTCOME_NON_TMPFS_MOUNT:
+ printf("not tmpfs\n");
+ break;
- if (strncmp(buf, "tmpfs", strlen("tmpfs"))) {
- printf("not tmpfs\n");
- goto out;
+ default:
+ printf("nothing mounted on /dev/shm\n");
+ break;
+ }
}
- printf("OK\n");
- default_tmpdir = "/dev/shm";
- goto out;
+ close(fd);
+out1:
+ free(buf);
+out:
+ free(path);
}
static int __init make_tempfile(const char *template, char **out_tempname,
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 9d9f1b4bf826..905924b773d3 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -25,7 +25,7 @@ void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGIO] = sigio_handler,
[SIGVTALRM] = timer_handler };
-static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc)
+static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
{
struct uml_pt_regs r;
int save_errno = errno;
@@ -61,7 +61,7 @@ static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc)
static int signals_enabled;
static unsigned int signals_pending;
-void sig_handler(int sig, siginfo_t *si, mcontext_t *mc)
+void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
{
int enabled;
@@ -120,7 +120,7 @@ void set_sigstack(void *sig_stack, int size)
panic("enabling signal stack failed, errno = %d\n", errno);
}
-static void (*handlers[_NSIG])(int sig, siginfo_t *si, mcontext_t *mc) = {
+static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
[SIGSEGV] = sig_handler,
[SIGBUS] = sig_handler,
[SIGILL] = sig_handler,
@@ -162,7 +162,7 @@ static void hard_handler(int sig, siginfo_t *si, void *p)
while ((sig = ffs(pending)) != 0){
sig--;
pending &= ~(1 << sig);
- (*handlers[sig])(sig, si, mc);
+ (*handlers[sig])(sig, (struct siginfo *)si, mc);
}
/*
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 4625949bf1e4..d531879a4617 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
void wait_stub_done(int pid)
{
- int n, status, err;
+ int n, status, err, bad_stop = 0;
while (1) {
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,6 +74,8 @@ void wait_stub_done(int pid)
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
return;
+ else
+ bad_stop = 1;
bad_wait:
err = ptrace_dump_regs(pid);
@@ -83,7 +85,10 @@ bad_wait:
printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
"pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
status);
- fatal_sigsegv();
+ if (bad_stop)
+ kill(pid, SIGKILL);
+ else
+ fatal_sigsegv();
}
extern unsigned long current_stub_stack(void);
@@ -409,7 +414,7 @@ void userspace(struct uml_pt_regs *regs)
if (WIFSTOPPED(status)) {
int sig = WSTOPSIG(status);
- ptrace(PTRACE_GETSIGINFO, pid, 0, &si);
+ ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
switch (sig) {
case SIGSEGV:
@@ -417,7 +422,7 @@ void userspace(struct uml_pt_regs *regs)
!ptrace_faultinfo) {
get_skas_faultinfo(pid,
&regs->faultinfo);
- (*sig_info[SIGSEGV])(SIGSEGV, &si,
+ (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
regs);
}
else handle_segv(pid, regs);
@@ -426,14 +431,14 @@ void userspace(struct uml_pt_regs *regs)
handle_trap(pid, regs, local_using_sysemu);
break;
case SIGTRAP:
- relay_signal(SIGTRAP, &si, regs);
+ relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
break;
case SIGVTALRM:
now = os_nsecs();
if (now < nsecs)
break;
block_signals();
- (*sig_info[sig])(sig, &si, regs);
+ (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
unblock_signals();
nsecs = timer.it_value.tv_sec *
UM_NSEC_PER_SEC +
@@ -447,7 +452,7 @@ void userspace(struct uml_pt_regs *regs)
case SIGFPE:
case SIGWINCH:
block_signals();
- (*sig_info[sig])(sig, &si, regs);
+ (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
unblock_signals();
break;
default:
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..6c63c358a7e6 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
-obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
# These modules require assembler to support AVX.
ifeq ($(avx_supported),yes)
@@ -82,4 +81,3 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
-crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
deleted file mode 100644
index 35e97569d05f..000000000000
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ /dev/null
@@ -1,643 +0,0 @@
-########################################################################
-# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
-#
-# Copyright (c) 2013, Intel Corporation
-#
-# Authors:
-# Erdinc Ozturk <erdinc.ozturk@intel.com>
-# Vinodh Gopal <vinodh.gopal@intel.com>
-# James Guilford <james.guilford@intel.com>
-# Tim Chen <tim.c.chen@linux.intel.com>
-#
-# This software is available to you under a choice of one of two
-# licenses. You may choose to be licensed under the terms of the GNU
-# General Public License (GPL) Version 2, available from the file
-# COPYING in the main directory of this source tree, or the
-# OpenIB.org BSD license below:
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the
-# distribution.
-#
-# * Neither the name of the Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-#
-# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-########################################################################
-# Function API:
-# UINT16 crc_t10dif_pcl(
-# UINT16 init_crc, //initial CRC value, 16 bits
-# const unsigned char *buf, //buffer pointer to calculate CRC on
-# UINT64 len //buffer length in bytes (64-bit data)
-# );
-#
-# Reference paper titled "Fast CRC Computation for Generic
-# Polynomials Using PCLMULQDQ Instruction"
-# URL: http://www.intel.com/content/dam/www/public/us/en/documents
-# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
-#
-#
-
-#include <linux/linkage.h>
-
-.text
-
-#define arg1 %rdi
-#define arg2 %rsi
-#define arg3 %rdx
-
-#define arg1_low32 %edi
-
-ENTRY(crc_t10dif_pcl)
-.align 16
-
- # adjust the 16-bit initial_crc value, scale it to 32 bits
- shl $16, arg1_low32
-
- # Allocate Stack Space
- mov %rsp, %rcx
- sub $16*2, %rsp
- # align stack to 16 byte boundary
- and $~(0x10 - 1), %rsp
-
- # check if smaller than 256
- cmp $256, arg3
-
- # for sizes less than 128, we can't fold 64B at a time...
- jl _less_than_128
-
-
- # load the initial crc value
- movd arg1_low32, %xmm10 # initial crc
-
- # crc value does not need to be byte-reflected, but it needs
- # to be moved to the high part of the register.
- # because data will be byte-reflected and will align with
- # initial crc at correct place.
- pslldq $12, %xmm10
-
- movdqa SHUF_MASK(%rip), %xmm11
- # receive the initial 64B data, xor the initial crc value
- movdqu 16*0(arg2), %xmm0
- movdqu 16*1(arg2), %xmm1
- movdqu 16*2(arg2), %xmm2
- movdqu 16*3(arg2), %xmm3
- movdqu 16*4(arg2), %xmm4
- movdqu 16*5(arg2), %xmm5
- movdqu 16*6(arg2), %xmm6
- movdqu 16*7(arg2), %xmm7
-
- pshufb %xmm11, %xmm0
- # XOR the initial_crc value
- pxor %xmm10, %xmm0
- pshufb %xmm11, %xmm1
- pshufb %xmm11, %xmm2
- pshufb %xmm11, %xmm3
- pshufb %xmm11, %xmm4
- pshufb %xmm11, %xmm5
- pshufb %xmm11, %xmm6
- pshufb %xmm11, %xmm7
-
- movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4
- #imm value of pclmulqdq instruction
- #will determine which constant to use
-
- #################################################################
- # we subtract 256 instead of 128 to save one instruction from the loop
- sub $256, arg3
-
- # at this section of the code, there is 64*x+y (0<=y<64) bytes of
- # buffer. The _fold_64_B_loop will fold 64B at a time
- # until we have 64+y Bytes of buffer
-
-
- # fold 64B at a time. This section of the code folds 4 xmm
- # registers in parallel
-_fold_64_B_loop:
-
- # update the buffer pointer
- add $128, arg2 # buf += 64#
-
- movdqu 16*0(arg2), %xmm9
- movdqu 16*1(arg2), %xmm12
- pshufb %xmm11, %xmm9
- pshufb %xmm11, %xmm12
- movdqa %xmm0, %xmm8
- movdqa %xmm1, %xmm13
- pclmulqdq $0x0 , %xmm10, %xmm0
- pclmulqdq $0x11, %xmm10, %xmm8
- pclmulqdq $0x0 , %xmm10, %xmm1
- pclmulqdq $0x11, %xmm10, %xmm13
- pxor %xmm9 , %xmm0
- xorps %xmm8 , %xmm0
- pxor %xmm12, %xmm1
- xorps %xmm13, %xmm1
-
- movdqu 16*2(arg2), %xmm9
- movdqu 16*3(arg2), %xmm12
- pshufb %xmm11, %xmm9
- pshufb %xmm11, %xmm12
- movdqa %xmm2, %xmm8
- movdqa %xmm3, %xmm13
- pclmulqdq $0x0, %xmm10, %xmm2
- pclmulqdq $0x11, %xmm10, %xmm8
- pclmulqdq $0x0, %xmm10, %xmm3
- pclmulqdq $0x11, %xmm10, %xmm13
- pxor %xmm9 , %xmm2
- xorps %xmm8 , %xmm2
- pxor %xmm12, %xmm3
- xorps %xmm13, %xmm3
-
- movdqu 16*4(arg2), %xmm9
- movdqu 16*5(arg2), %xmm12
- pshufb %xmm11, %xmm9
- pshufb %xmm11, %xmm12
- movdqa %xmm4, %xmm8
- movdqa %xmm5, %xmm13
- pclmulqdq $0x0, %xmm10, %xmm4
- pclmulqdq $0x11, %xmm10, %xmm8
- pclmulqdq $0x0, %xmm10, %xmm5
- pclmulqdq $0x11, %xmm10, %xmm13
- pxor %xmm9 , %xmm4
- xorps %xmm8 , %xmm4
- pxor %xmm12, %xmm5
- xorps %xmm13, %xmm5
-
- movdqu 16*6(arg2), %xmm9
- movdqu 16*7(arg2), %xmm12
- pshufb %xmm11, %xmm9
- pshufb %xmm11, %xmm12
- movdqa %xmm6 , %xmm8
- movdqa %xmm7 , %xmm13
- pclmulqdq $0x0 , %xmm10, %xmm6
- pclmulqdq $0x11, %xmm10, %xmm8
- pclmulqdq $0x0 , %xmm10, %xmm7
- pclmulqdq $0x11, %xmm10, %xmm13
- pxor %xmm9 , %xmm6
- xorps %xmm8 , %xmm6
- pxor %xmm12, %xmm7
- xorps %xmm13, %xmm7
-
- sub $128, arg3
-
- # check if there is another 64B in the buffer to be able to fold
- jge _fold_64_B_loop
- ##################################################################
-
-
- add $128, arg2
- # at this point, the buffer pointer is pointing at the last y Bytes
- # of the buffer the 64B of folded data is in 4 of the xmm
- # registers: xmm0, xmm1, xmm2, xmm3
-
-
- # fold the 8 xmm registers to 1 xmm register with different constants
-
- movdqa rk9(%rip), %xmm10
- movdqa %xmm0, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm0
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- xorps %xmm0, %xmm7
-
- movdqa rk11(%rip), %xmm10
- movdqa %xmm1, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm1
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- xorps %xmm1, %xmm7
-
- movdqa rk13(%rip), %xmm10
- movdqa %xmm2, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm2
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- pxor %xmm2, %xmm7
-
- movdqa rk15(%rip), %xmm10
- movdqa %xmm3, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm3
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- xorps %xmm3, %xmm7
-
- movdqa rk17(%rip), %xmm10
- movdqa %xmm4, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm4
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- pxor %xmm4, %xmm7
-
- movdqa rk19(%rip), %xmm10
- movdqa %xmm5, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm5
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- xorps %xmm5, %xmm7
-
- movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2
- #imm value of pclmulqdq instruction
- #will determine which constant to use
- movdqa %xmm6, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm6
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- pxor %xmm6, %xmm7
-
-
- # instead of 64, we add 48 to the loop counter to save 1 instruction
- # from the loop instead of a cmp instruction, we use the negative
- # flag with the jl instruction
- add $128-16, arg3
- jl _final_reduction_for_128
-
- # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7
- # and the rest is in memory. We can fold 16 bytes at a time if y>=16
- # continue folding 16B at a time
-
-_16B_reduction_loop:
- movdqa %xmm7, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm7
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- movdqu (arg2), %xmm0
- pshufb %xmm11, %xmm0
- pxor %xmm0 , %xmm7
- add $16, arg2
- sub $16, arg3
- # instead of a cmp instruction, we utilize the flags with the
- # jge instruction equivalent of: cmp arg3, 16-16
- # check if there is any more 16B in the buffer to be able to fold
- jge _16B_reduction_loop
-
- #now we have 16+z bytes left to reduce, where 0<= z < 16.
- #first, we reduce the data in the xmm7 register
-
-
-_final_reduction_for_128:
- # check if any more data to fold. If not, compute the CRC of
- # the final 128 bits
- add $16, arg3
- je _128_done
-
- # here we are getting data that is less than 16 bytes.
- # since we know that there was data before the pointer, we can
- # offset the input pointer before the actual point, to receive
- # exactly 16 bytes. after that the registers need to be adjusted.
-_get_last_two_xmms:
- movdqa %xmm7, %xmm2
-
- movdqu -16(arg2, arg3), %xmm1
- pshufb %xmm11, %xmm1
-
- # get rid of the extra data that was loaded before
- # load the shift constant
- lea pshufb_shf_table+16(%rip), %rax
- sub arg3, %rax
- movdqu (%rax), %xmm0
-
- # shift xmm2 to the left by arg3 bytes
- pshufb %xmm0, %xmm2
-
- # shift xmm7 to the right by 16-arg3 bytes
- pxor mask1(%rip), %xmm0
- pshufb %xmm0, %xmm7
- pblendvb %xmm2, %xmm1 #xmm0 is implicit
-
- # fold 16 Bytes
- movdqa %xmm1, %xmm2
- movdqa %xmm7, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm7
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- pxor %xmm2, %xmm7
-
-_128_done:
- # compute crc of a 128-bit value
- movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10
- movdqa %xmm7, %xmm0
-
- #64b fold
- pclmulqdq $0x1, %xmm10, %xmm7
- pslldq $8 , %xmm0
- pxor %xmm0, %xmm7
-
- #32b fold
- movdqa %xmm7, %xmm0
-
- pand mask2(%rip), %xmm0
-
- psrldq $12, %xmm7
- pclmulqdq $0x10, %xmm10, %xmm7
- pxor %xmm0, %xmm7
-
- #barrett reduction
-_barrett:
- movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10
- movdqa %xmm7, %xmm0
- pclmulqdq $0x01, %xmm10, %xmm7
- pslldq $4, %xmm7
- pclmulqdq $0x11, %xmm10, %xmm7
-
- pslldq $4, %xmm7
- pxor %xmm0, %xmm7
- pextrd $1, %xmm7, %eax
-
-_cleanup:
- # scale the result back to 16 bits
- shr $16, %eax
- mov %rcx, %rsp
- ret
-
-########################################################################
-
-.align 16
-_less_than_128:
-
- # check if there is enough buffer to be able to fold 16B at a time
- cmp $32, arg3
- jl _less_than_32
- movdqa SHUF_MASK(%rip), %xmm11
-
- # now if there is, load the constants
- movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
-
- movd arg1_low32, %xmm0 # get the initial crc value
- pslldq $12, %xmm0 # align it to its correct place
- movdqu (arg2), %xmm7 # load the plaintext
- pshufb %xmm11, %xmm7 # byte-reflect the plaintext
- pxor %xmm0, %xmm7
-
-
- # update the buffer pointer
- add $16, arg2
-
- # update the counter. subtract 32 instead of 16 to save one
- # instruction from the loop
- sub $32, arg3
-
- jmp _16B_reduction_loop
-
-
-.align 16
-_less_than_32:
- # mov initial crc to the return value. this is necessary for
- # zero-length buffers.
- mov arg1_low32, %eax
- test arg3, arg3
- je _cleanup
-
- movdqa SHUF_MASK(%rip), %xmm11
-
- movd arg1_low32, %xmm0 # get the initial crc value
- pslldq $12, %xmm0 # align it to its correct place
-
- cmp $16, arg3
- je _exact_16_left
- jl _less_than_16_left
-
- movdqu (arg2), %xmm7 # load the plaintext
- pshufb %xmm11, %xmm7 # byte-reflect the plaintext
- pxor %xmm0 , %xmm7 # xor the initial crc value
- add $16, arg2
- sub $16, arg3
- movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
- jmp _get_last_two_xmms
-
-
-.align 16
-_less_than_16_left:
- # use stack space to load data less than 16 bytes, zero-out
- # the 16B in memory first.
-
- pxor %xmm1, %xmm1
- mov %rsp, %r11
- movdqa %xmm1, (%r11)
-
- cmp $4, arg3
- jl _only_less_than_4
-
- # backup the counter value
- mov arg3, %r9
- cmp $8, arg3
- jl _less_than_8_left
-
- # load 8 Bytes
- mov (arg2), %rax
- mov %rax, (%r11)
- add $8, %r11
- sub $8, arg3
- add $8, arg2
-_less_than_8_left:
-
- cmp $4, arg3
- jl _less_than_4_left
-
- # load 4 Bytes
- mov (arg2), %eax
- mov %eax, (%r11)
- add $4, %r11
- sub $4, arg3
- add $4, arg2
-_less_than_4_left:
-
- cmp $2, arg3
- jl _less_than_2_left
-
- # load 2 Bytes
- mov (arg2), %ax
- mov %ax, (%r11)
- add $2, %r11
- sub $2, arg3
- add $2, arg2
-_less_than_2_left:
- cmp $1, arg3
- jl _zero_left
-
- # load 1 Byte
- mov (arg2), %al
- mov %al, (%r11)
-_zero_left:
- movdqa (%rsp), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- # shl r9, 4
- lea pshufb_shf_table+16(%rip), %rax
- sub %r9, %rax
- movdqu (%rax), %xmm0
- pxor mask1(%rip), %xmm0
-
- pshufb %xmm0, %xmm7
- jmp _128_done
-
-.align 16
-_exact_16_left:
- movdqu (arg2), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- jmp _128_done
-
-_only_less_than_4:
- cmp $3, arg3
- jl _only_less_than_3
-
- # load 3 Bytes
- mov (arg2), %al
- mov %al, (%r11)
-
- mov 1(arg2), %al
- mov %al, 1(%r11)
-
- mov 2(arg2), %al
- mov %al, 2(%r11)
-
- movdqa (%rsp), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- psrldq $5, %xmm7
-
- jmp _barrett
-_only_less_than_3:
- cmp $2, arg3
- jl _only_less_than_2
-
- # load 2 Bytes
- mov (arg2), %al
- mov %al, (%r11)
-
- mov 1(arg2), %al
- mov %al, 1(%r11)
-
- movdqa (%rsp), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- psrldq $6, %xmm7
-
- jmp _barrett
-_only_less_than_2:
-
- # load 1 Byte
- mov (arg2), %al
- mov %al, (%r11)
-
- movdqa (%rsp), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- psrldq $7, %xmm7
-
- jmp _barrett
-
-ENDPROC(crc_t10dif_pcl)
-
-.data
-
-# precomputed constants
-# these constants are precomputed from the poly:
-# 0x8bb70000 (0x8bb7 scaled to 32 bits)
-.align 16
-# Q = 0x18BB70000
-# rk1 = 2^(32*3) mod Q << 32
-# rk2 = 2^(32*5) mod Q << 32
-# rk3 = 2^(32*15) mod Q << 32
-# rk4 = 2^(32*17) mod Q << 32
-# rk5 = 2^(32*3) mod Q << 32
-# rk6 = 2^(32*2) mod Q << 32
-# rk7 = floor(2^64/Q)
-# rk8 = Q
-rk1:
-.quad 0x2d56000000000000
-rk2:
-.quad 0x06df000000000000
-rk3:
-.quad 0x9d9d000000000000
-rk4:
-.quad 0x7cf5000000000000
-rk5:
-.quad 0x2d56000000000000
-rk6:
-.quad 0x1368000000000000
-rk7:
-.quad 0x00000001f65a57f8
-rk8:
-.quad 0x000000018bb70000
-
-rk9:
-.quad 0xceae000000000000
-rk10:
-.quad 0xbfd6000000000000
-rk11:
-.quad 0x1e16000000000000
-rk12:
-.quad 0x713c000000000000
-rk13:
-.quad 0xf7f9000000000000
-rk14:
-.quad 0x80a6000000000000
-rk15:
-.quad 0x044c000000000000
-rk16:
-.quad 0xe658000000000000
-rk17:
-.quad 0xad18000000000000
-rk18:
-.quad 0xa497000000000000
-rk19:
-.quad 0x6ee3000000000000
-rk20:
-.quad 0xe7b5000000000000
-
-
-
-mask1:
-.octa 0x80808080808080808080808080808080
-mask2:
-.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
-
-SHUF_MASK:
-.octa 0x000102030405060708090A0B0C0D0E0F
-
-pshufb_shf_table:
-# use these values for shift constants for the pshufb instruction
-# different alignments result in values as shown:
-# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
-# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
-# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
-# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
-# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
-# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
-# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
-# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
-# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
-# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
-# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
-# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
-# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
-# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
-# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
-.octa 0x8f8e8d8c8b8a89888786858483828100
-.octa 0x000e0d0c0b0a09080706050403020100
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
deleted file mode 100644
index 7845d7fd54c0..000000000000
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Cryptographic API.
- *
- * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions
- *
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/crc-t10dif.h>
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <asm/i387.h>
-#include <asm/cpufeature.h>
-#include <asm/cpu_device_id.h>
-
-asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
- size_t len);
-
-struct chksum_desc_ctx {
- __u16 crc;
-};
-
-/*
- * Steps through buffer one byte at at time, calculates reflected
- * crc using table.
- */
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- if (irq_fpu_usable()) {
- kernel_fpu_begin();
- ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
- kernel_fpu_end();
- } else
- ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- *(__u16 *)out = ctx->crc;
- return 0;
-}
-
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
- u8 *out)
-{
- if (irq_fpu_usable()) {
- kernel_fpu_begin();
- *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
- kernel_fpu_end();
- } else
- *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(&ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(&ctx->crc, data, length, out);
-}
-
-static struct shash_alg alg = {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crct10dif",
- .cra_driver_name = "crct10dif-pclmul",
- .cra_priority = 200,
- .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static const struct x86_cpu_id crct10dif_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
-
-static int __init crct10dif_intel_mod_init(void)
-{
- if (!x86_match_cpu(crct10dif_cpu_id))
- return -ENODEV;
-
- return crypto_register_shash(&alg);
-}
-
-static void __exit crct10dif_intel_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crct10dif_intel_mod_init);
-module_exit(crct10dif_intel_mod_fini);
-
-MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
-MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("crct10dif");
-MODULE_ALIAS("crct10dif-pclmul");
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 5f9a1243190e..d2b12988d2ed 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -28,7 +28,7 @@ struct x86_cpu {
#ifdef CONFIG_HOTPLUG_CPU
extern int arch_register_cpu(int num);
extern void arch_unregister_cpu(int);
-extern void __cpuinit start_cpu0(void);
+extern void start_cpu0(void);
#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
extern int _debug_hotplug_cpu(int cpu, int action);
#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 6bc3985ee473..f98bd6625318 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
#ifdef CONFIG_MICROCODE_EARLY
#define MAX_UCODE_COUNT 128
extern void __init load_ucode_bsp(void);
-extern void __cpuinit load_ucode_ap(void);
+extern void load_ucode_ap(void);
extern int __init save_microcode_in_initrd(void);
#else
static inline void __init load_ucode_bsp(void) {}
-static inline void __cpuinit load_ucode_ap(void) {}
+static inline void load_ucode_ap(void) {}
static inline int __init save_microcode_in_initrd(void)
{
return 0;
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index c6b043f40271..50e5c58ced23 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -67,11 +67,11 @@ extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
extern u8 amd_bsp_mpb[MPB_MAX_SIZE];
#endif
extern void __init load_ucode_amd_bsp(void);
-extern void __cpuinit load_ucode_amd_ap(void);
+extern void load_ucode_amd_ap(void);
extern int __init save_microcode_in_initrd_amd(void);
#else
static inline void __init load_ucode_amd_bsp(void) {}
-static inline void __cpuinit load_ucode_amd_ap(void) {}
+static inline void load_ucode_amd_ap(void) {}
static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
#endif
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 87a085333cbf..9067166409bf 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -65,12 +65,12 @@ update_match_revision(struct microcode_header_intel *mc_header, int rev);
#ifdef CONFIG_MICROCODE_INTEL_EARLY
extern void __init load_ucode_intel_bsp(void);
-extern void __cpuinit load_ucode_intel_ap(void);
+extern void load_ucode_intel_ap(void);
extern void show_ucode_info_early(void);
extern int __init save_microcode_in_initrd_intel(void);
#else
static inline __init void load_ucode_intel_bsp(void) {}
-static inline __cpuinit void load_ucode_intel_ap(void) {}
+static inline void load_ucode_intel_ap(void) {}
static inline void show_ucode_info_early(void) {}
static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
#endif
diff --git a/arch/x86/include/asm/mmconfig.h b/arch/x86/include/asm/mmconfig.h
index 9b119da1d105..04a3fed22cfe 100644
--- a/arch/x86/include/asm/mmconfig.h
+++ b/arch/x86/include/asm/mmconfig.h
@@ -2,8 +2,8 @@
#define _ASM_X86_MMCONFIG_H
#ifdef CONFIG_PCI_MMCONFIG
-extern void __cpuinit fam10h_check_enable_mmcfg(void);
-extern void __cpuinit check_enable_amd_mmconf_dmi(void);
+extern void fam10h_check_enable_mmcfg(void);
+extern void check_enable_amd_mmconf_dmi(void);
#else
static inline void fam10h_check_enable_mmcfg(void) { }
static inline void check_enable_amd_mmconf_dmi(void) { }
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 3e2f42a4b872..626cf70082d7 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
#define default_get_smp_config x86_init_uint_noop
#endif
-void __cpuinit generic_processor_info(int apicid, int version);
+void generic_processor_info(int apicid, int version);
#ifdef CONFIG_ACPI
extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 1b99ee5c9f00..4064acae625d 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -39,7 +39,7 @@ static inline void set_apicid_to_node(int apicid, s16 node)
__apicid_to_node[apicid] = node;
}
-extern int __cpuinit numa_cpu_node(int cpu);
+extern int numa_cpu_node(int cpu);
#else /* CONFIG_NUMA */
static inline void set_apicid_to_node(int apicid, s16 node)
@@ -60,8 +60,8 @@ static inline int numa_cpu_node(int cpu)
extern void numa_set_node(int cpu, int node);
extern void numa_clear_node(int cpu);
extern void __init init_cpu_to_node(void);
-extern void __cpuinit numa_add_cpu(int cpu);
-extern void __cpuinit numa_remove_cpu(int cpu);
+extern void numa_add_cpu(int cpu);
+extern void numa_remove_cpu(int cpu);
#else /* CONFIG_NUMA */
static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { }
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 29937c4f6ff8..24cf5aefb704 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -164,7 +164,7 @@ extern const struct seq_operations cpuinfo_op;
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect(struct cpuinfo_x86 *c);
-extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c);
+extern void fpu_detect(struct cpuinfo_x86 *c);
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 60bef663609a..bade6ac3b14f 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -27,7 +27,7 @@ extern int of_ioapic;
extern u64 initial_dtb;
extern void add_dtb(u64 data);
extern void x86_add_irq_domains(void);
-void __cpuinit x86_of_pci_init(void);
+void x86_of_pci_init(void);
void x86_dtb_init(void);
#else
static inline void add_dtb(u64 data) { }
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index b073aaea747c..4137890e88e3 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -179,7 +179,7 @@ static inline int wbinvd_on_all_cpus(void)
}
#endif /* CONFIG_SMP */
-extern unsigned disabled_cpus __cpuinitdata;
+extern unsigned disabled_cpus;
#ifdef CONFIG_X86_32_SMP
/*
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d81a972dd506..2627a81253ee 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -195,7 +195,7 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
return 0;
}
-static void __cpuinit acpi_register_lapic(int id, u8 enabled)
+static void acpi_register_lapic(int id, u8 enabled)
{
unsigned int ver = 0;
@@ -607,7 +607,7 @@ void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
-static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;
@@ -620,7 +620,7 @@ static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
#endif
}
-static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 2a34aaf3c8f1..33120100ff5e 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -48,9 +48,20 @@ int x86_acpi_suspend_lowlevel(void)
#ifndef CONFIG_64BIT
native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
+ /*
+ * We have to check that we can write back the value, and not
+ * just read it. At least on 90 nm Pentium M (Family 6, Model
+ * 13), reading an invalid MSR is not guaranteed to trap, see
+ * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
+ * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
+ * nm process with 512-KB L2 Cache Specification Update".
+ */
if (!rdmsr_safe(MSR_EFER,
&header->pmode_efer_low,
- &header->pmode_efer_high))
+ &header->pmode_efer_high) &&
+ !wrmsr_safe(MSR_EFER,
+ header->pmode_efer_low,
+ header->pmode_efer_high))
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
#endif /* !CONFIG_64BIT */
@@ -61,7 +72,10 @@ int x86_acpi_suspend_lowlevel(void)
}
if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
&header->pmode_misc_en_low,
- &header->pmode_misc_en_high))
+ &header->pmode_misc_en_high) &&
+ !wrmsr_safe(MSR_IA32_MISC_ENABLE,
+ header->pmode_misc_en_low,
+ header->pmode_misc_en_high))
header->pmode_behavior |=
(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
header->realmode_flags = acpi_realmode_flags;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 99663b59123a..eca89c53a7f5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -58,7 +58,7 @@
unsigned int num_processors;
-unsigned disabled_cpus __cpuinitdata;
+unsigned disabled_cpus;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
@@ -544,7 +544,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
* Setup the local APIC timer for this CPU. Copy the initialized values
* of the boot CPU and register the clock event in the framework.
*/
-static void __cpuinit setup_APIC_timer(void)
+static void setup_APIC_timer(void)
{
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
@@ -866,7 +866,7 @@ void __init setup_boot_APIC_clock(void)
setup_APIC_timer();
}
-void __cpuinit setup_secondary_APIC_clock(void)
+void setup_secondary_APIC_clock(void)
{
setup_APIC_timer();
}
@@ -1229,7 +1229,7 @@ void __init init_bsp_APIC(void)
apic_write(APIC_LVT1, value);
}
-static void __cpuinit lapic_setup_esr(void)
+static void lapic_setup_esr(void)
{
unsigned int oldvalue, value, maxlvt;
@@ -1276,7 +1276,7 @@ static void __cpuinit lapic_setup_esr(void)
* Used to setup local APIC while initializing BSP or bringin up APs.
* Always called with preemption disabled.
*/
-void __cpuinit setup_local_APIC(void)
+void setup_local_APIC(void)
{
int cpu = smp_processor_id();
unsigned int value, queued;
@@ -1471,7 +1471,7 @@ void __cpuinit setup_local_APIC(void)
#endif
}
-void __cpuinit end_local_APIC_setup(void)
+void end_local_APIC_setup(void)
{
lapic_setup_esr();
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
apic_write(APIC_LVT1, value);
}
-void __cpuinit generic_processor_info(int apicid, int version)
+void generic_processor_info(int apicid, int version)
{
int cpu, max = nr_cpu_ids;
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2377,7 +2377,7 @@ static struct syscore_ops lapic_syscore_ops = {
.suspend = lapic_suspend,
};
-static void __cpuinit apic_pm_activate(void)
+static void apic_pm_activate(void)
{
apic_pm_state.active = 1;
}
@@ -2402,7 +2402,7 @@ static void apic_pm_activate(void) { }
#ifdef CONFIG_X86_64
-static int __cpuinit apic_cluster_num(void)
+static int apic_cluster_num(void)
{
int i, clusters, zeros;
unsigned id;
@@ -2447,10 +2447,10 @@ static int __cpuinit apic_cluster_num(void)
return clusters;
}
-static int __cpuinitdata multi_checked;
-static int __cpuinitdata multi;
+static int multi_checked;
+static int multi;
-static int __cpuinit set_multi(const struct dmi_system_id *d)
+static int set_multi(const struct dmi_system_id *d)
{
if (multi)
return 0;
@@ -2459,7 +2459,7 @@ static int __cpuinit set_multi(const struct dmi_system_id *d)
return 0;
}
-static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
+static const struct dmi_system_id multi_dmi_table[] = {
{
.callback = set_multi,
.ident = "IBM System Summit2",
@@ -2471,7 +2471,7 @@ static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
{}
};
-static void __cpuinit dmi_check_multi(void)
+static void dmi_check_multi(void)
{
if (multi_checked)
return;
@@ -2488,7 +2488,7 @@ static void __cpuinit dmi_check_multi(void)
* multi-chassis.
* Use DMI to check them
*/
-__cpuinit int apic_is_clustered_box(void)
+int apic_is_clustered_box(void)
{
dmi_check_multi();
if (multi)
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 9a9110918ca7..3e67f9e3d7ef 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -74,7 +74,7 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
return initial_apic_id >> index_msb;
}
-static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
+static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{
union numachip_csr_g3_ext_irq_gen int_gen;
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 0874799a98c6..c55224731b2d 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -130,7 +130,7 @@ int es7000_plat;
*/
-static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
+static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
{
unsigned long vect = 0, psaival = 0;
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index d661ee95cabf..1e42e8f305ee 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -105,7 +105,7 @@ static void __init smp_dump_qct(void)
}
}
-void __cpuinit numaq_tsc_disable(void)
+void numaq_tsc_disable(void)
{
if (!found_numaq)
return;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index c88baa4ff0e5..140e29db478d 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -148,7 +148,7 @@ static void init_x2apic_ldr(void)
/*
* At CPU state changes, update the x2apic cluster sibling info.
*/
-static int __cpuinit
+static int
update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int this_cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 63092afb142e..1191ac1c9d25 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -209,7 +209,7 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
unsigned long sn_rtc_cycles_per_second;
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
-static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
+static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{
#ifdef CONFIG_SMP
unsigned long val;
@@ -416,7 +416,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
};
-static __cpuinit void set_x2apic_extra_bits(int pnode)
+static void set_x2apic_extra_bits(int pnode)
{
__this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
}
@@ -735,7 +735,7 @@ static void uv_heartbeat(unsigned long ignored)
mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
}
-static void __cpuinit uv_heartbeat_enable(int cpu)
+static void uv_heartbeat_enable(int cpu)
{
while (!uv_cpu_hub_info(cpu)->scir.enabled) {
struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
@@ -752,7 +752,7 @@ static void __cpuinit uv_heartbeat_enable(int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
-static void __cpuinit uv_heartbeat_disable(int cpu)
+static void uv_heartbeat_disable(int cpu)
{
if (uv_cpu_hub_info(cpu)->scir.enabled) {
uv_cpu_hub_info(cpu)->scir.enabled = 0;
@@ -764,8 +764,8 @@ static void __cpuinit uv_heartbeat_disable(int cpu)
/*
* cpu hotplug notifier
*/
-static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
long cpu = (long)hcpu;
@@ -835,7 +835,7 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode,
* Called on each cpu to initialize the per_cpu UV data area.
* FIXME: hotplug not supported yet
*/
-void __cpuinit uv_cpu_init(void)
+void uv_cpu_init(void)
{
/* CPU 0 initilization will be done via uv_system_init. */
if (!uv_blade_info)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c587a8757227..f654ecefea5b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -69,7 +69,7 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
extern void vide(void);
__asm__(".align 4\nvide: ret");
-static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
+static void init_amd_k5(struct cpuinfo_x86 *c)
{
/*
* General Systems BIOSen alias the cpu frequency registers
@@ -87,7 +87,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
}
-static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
+static void init_amd_k6(struct cpuinfo_x86 *c)
{
u32 l, h;
int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
@@ -179,7 +179,7 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
}
}
-static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
+static void amd_k7_smp_check(struct cpuinfo_x86 *c)
{
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
@@ -222,7 +222,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
}
-static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
+static void init_amd_k7(struct cpuinfo_x86 *c)
{
u32 l, h;
@@ -267,7 +267,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
* To workaround broken NUMA config. Read the comment in
* srat_detect_node().
*/
-static int __cpuinit nearby_node(int apicid)
+static int nearby_node(int apicid)
{
int i, node;
@@ -292,7 +292,7 @@ static int __cpuinit nearby_node(int apicid)
* (2) AMD processors supporting compute units
*/
#ifdef CONFIG_X86_HT
-static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
+static void amd_get_topology(struct cpuinfo_x86 *c)
{
u32 nodes, cores_per_cu = 1;
u8 node_id;
@@ -342,7 +342,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
* On a AMD dual core setup the lower bits of the APIC id distingush the cores.
* Assumes number of cores is a power of two.
*/
-static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
+static void amd_detect_cmp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_HT
unsigned bits;
@@ -369,7 +369,7 @@ u16 amd_get_nb_id(int cpu)
}
EXPORT_SYMBOL_GPL(amd_get_nb_id);
-static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
+static void srat_detect_node(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_NUMA
int cpu = smp_processor_id();
@@ -421,7 +421,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
-static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
+static void early_init_amd_mc(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_HT
unsigned bits, ecx;
@@ -447,7 +447,7 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
#endif
}
-static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
+static void bsp_init_amd(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@@ -475,7 +475,7 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
}
}
-static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
+static void early_init_amd(struct cpuinfo_x86 *c)
{
early_init_amd_mc(c);
@@ -514,7 +514,7 @@ static const int amd_erratum_383[];
static const int amd_erratum_400[];
static bool cpu_has_amd_erratum(const int *erratum);
-static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+static void init_amd(struct cpuinfo_x86 *c)
{
u32 dummy;
unsigned long long value;
@@ -740,8 +740,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
}
#ifdef CONFIG_X86_32
-static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
- unsigned int size)
+static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/* AMD errata T13 (order #21922) */
if ((c->x86 == 6)) {
@@ -757,7 +756,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
}
#endif
-static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
+static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
{
tlb_flushall_shift = 5;
@@ -765,7 +764,7 @@ static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
tlb_flushall_shift = 4;
}
-static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
+static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
{
u32 ebx, eax, ecx, edx;
u16 mask = 0xfff;
@@ -820,7 +819,7 @@ static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
cpu_set_tlb_flushall_shift(c);
}
-static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
+static const struct cpu_dev amd_cpu_dev = {
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 159103c0b1f4..fbf6c3bc2400 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -11,7 +11,7 @@
#ifdef CONFIG_X86_OOSTORE
-static u32 __cpuinit power2(u32 x)
+static u32 power2(u32 x)
{
u32 s = 1;
@@ -25,7 +25,7 @@ static u32 __cpuinit power2(u32 x)
/*
* Set up an actual MCR
*/
-static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
+static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
{
u32 lo, hi;
@@ -42,7 +42,7 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
*
* Shortcut: We know you can't put 4Gig of RAM on a winchip
*/
-static u32 __cpuinit ramtop(void)
+static u32 ramtop(void)
{
u32 clip = 0xFFFFFFFFUL;
u32 top = 0;
@@ -91,7 +91,7 @@ static u32 __cpuinit ramtop(void)
/*
* Compute a set of MCR's to give maximum coverage
*/
-static int __cpuinit centaur_mcr_compute(int nr, int key)
+static int centaur_mcr_compute(int nr, int key)
{
u32 mem = ramtop();
u32 root = power2(mem);
@@ -157,7 +157,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
return ct;
}
-static void __cpuinit centaur_create_optimal_mcr(void)
+static void centaur_create_optimal_mcr(void)
{
int used;
int i;
@@ -181,7 +181,7 @@ static void __cpuinit centaur_create_optimal_mcr(void)
wrmsr(MSR_IDT_MCR0+i, 0, 0);
}
-static void __cpuinit winchip2_create_optimal_mcr(void)
+static void winchip2_create_optimal_mcr(void)
{
u32 lo, hi;
int used;
@@ -217,7 +217,7 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
/*
* Handle the MCR key on the Winchip 2.
*/
-static void __cpuinit winchip2_unprotect_mcr(void)
+static void winchip2_unprotect_mcr(void)
{
u32 lo, hi;
u32 key;
@@ -229,7 +229,7 @@ static void __cpuinit winchip2_unprotect_mcr(void)
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
}
-static void __cpuinit winchip2_protect_mcr(void)
+static void winchip2_protect_mcr(void)
{
u32 lo, hi;
@@ -247,7 +247,7 @@ static void __cpuinit winchip2_protect_mcr(void)
#define RNG_ENABLED (1 << 3)
#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
-static void __cpuinit init_c3(struct cpuinfo_x86 *c)
+static void init_c3(struct cpuinfo_x86 *c)
{
u32 lo, hi;
@@ -318,7 +318,7 @@ enum {
EAMD3D = 1<<20,
};
-static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
+static void early_init_centaur(struct cpuinfo_x86 *c)
{
switch (c->x86) {
#ifdef CONFIG_X86_32
@@ -337,7 +337,7 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
#endif
}
-static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
+static void init_centaur(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_32
char *name;
@@ -468,7 +468,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
#endif
}
-static unsigned int __cpuinit
+static unsigned int
centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
#ifdef CONFIG_X86_32
@@ -488,7 +488,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
return size;
}
-static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
+static const struct cpu_dev centaur_cpu_dev = {
.c_vendor = "Centaur",
.c_ident = { "CentaurHauls" },
.c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 548bd039784e..25eb2747b063 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -63,7 +63,7 @@ void __init setup_cpu_local_masks(void)
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
}
-static void __cpuinit default_init(struct cpuinfo_x86 *c)
+static void default_init(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
cpu_detect_cache_sizes(c);
@@ -80,13 +80,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
#endif
}
-static const struct cpu_dev __cpuinitconst default_cpu = {
+static const struct cpu_dev default_cpu = {
.c_init = default_init,
.c_vendor = "Unknown",
.c_x86_vendor = X86_VENDOR_UNKNOWN,
};
-static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+static const struct cpu_dev *this_cpu = &default_cpu;
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
#ifdef CONFIG_X86_64
@@ -160,8 +160,8 @@ static int __init x86_xsaveopt_setup(char *s)
__setup("noxsaveopt", x86_xsaveopt_setup);
#ifdef CONFIG_X86_32
-static int cachesize_override __cpuinitdata = -1;
-static int disable_x86_serial_nr __cpuinitdata = 1;
+static int cachesize_override = -1;
+static int disable_x86_serial_nr = 1;
static int __init cachesize_setup(char *str)
{
@@ -215,12 +215,12 @@ static inline int flag_is_changeable_p(u32 flag)
}
/* Probe for the CPUID instruction */
-int __cpuinit have_cpuid_p(void)
+int have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
-static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
unsigned long lo, hi;
@@ -298,7 +298,7 @@ struct cpuid_dependent_feature {
u32 level;
};
-static const struct cpuid_dependent_feature __cpuinitconst
+static const struct cpuid_dependent_feature
cpuid_dependent_features[] = {
{ X86_FEATURE_MWAIT, 0x00000005 },
{ X86_FEATURE_DCA, 0x00000009 },
@@ -306,7 +306,7 @@ cpuid_dependent_features[] = {
{ 0, 0 }
};
-static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
{
const struct cpuid_dependent_feature *df;
@@ -344,7 +344,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
*/
/* Look up CPU names by table lookup. */
-static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
+static const char *table_lookup_model(struct cpuinfo_x86 *c)
{
const struct cpu_model_info *info;
@@ -364,8 +364,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
return NULL; /* Not found */
}
-__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
-__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
+__u32 cpu_caps_cleared[NCAPINTS];
+__u32 cpu_caps_set[NCAPINTS];
void load_percpu_segment(int cpu)
{
@@ -394,9 +394,9 @@ void switch_to_new_gdt(int cpu)
load_percpu_segment(cpu);
}
-static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
+static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
-static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
+static void get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
char *p, *q;
@@ -425,7 +425,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
}
}
-void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
+void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
{
unsigned int n, dummy, ebx, ecx, edx, l2size;
@@ -479,7 +479,7 @@ u16 __read_mostly tlb_lld_4m[NR_INFO];
*/
s8 __read_mostly tlb_flushall_shift = -1;
-void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
+void cpu_detect_tlb(struct cpuinfo_x86 *c)
{
if (this_cpu->c_detect_tlb)
this_cpu->c_detect_tlb(c);
@@ -493,7 +493,7 @@ void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
tlb_flushall_shift);
}
-void __cpuinit detect_ht(struct cpuinfo_x86 *c)
+void detect_ht(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_HT
u32 eax, ebx, ecx, edx;
@@ -544,7 +544,7 @@ out:
#endif
}
-static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+static void get_cpu_vendor(struct cpuinfo_x86 *c)
{
char *v = c->x86_vendor_id;
int i;
@@ -571,7 +571,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
this_cpu = &default_cpu;
}
-void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
+void cpu_detect(struct cpuinfo_x86 *c)
{
/* Get vendor name */
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
@@ -601,7 +601,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
}
}
-void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+void get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
u32 ebx;
@@ -652,7 +652,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
init_scattered_cpuid_features(c);
}
-static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_32
int i;
@@ -769,7 +769,7 @@ void __init early_cpu_init(void)
* unless we can find a reliable way to detect all the broken cases.
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
*/
-static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
+static void detect_nopl(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_32
clear_cpu_cap(c, X86_FEATURE_NOPL);
@@ -778,7 +778,7 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
#endif
}
-static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
+static void generic_identify(struct cpuinfo_x86 *c)
{
c->extended_cpuid_level = 0;
@@ -815,7 +815,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
-static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+static void identify_cpu(struct cpuinfo_x86 *c)
{
int i;
@@ -960,7 +960,7 @@ void __init identify_boot_cpu(void)
cpu_detect_tlb(&boot_cpu_data);
}
-void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
+void identify_secondary_cpu(struct cpuinfo_x86 *c)
{
BUG_ON(c == &boot_cpu_data);
identify_cpu(c);
@@ -975,14 +975,14 @@ struct msr_range {
unsigned max;
};
-static const struct msr_range msr_range_array[] __cpuinitconst = {
+static const struct msr_range msr_range_array[] = {
{ 0x00000000, 0x00000418},
{ 0xc0000000, 0xc000040b},
{ 0xc0010000, 0xc0010142},
{ 0xc0011000, 0xc001103b},
};
-static void __cpuinit __print_cpu_msr(void)
+static void __print_cpu_msr(void)
{
unsigned index_min, index_max;
unsigned index;
@@ -1001,7 +1001,7 @@ static void __cpuinit __print_cpu_msr(void)
}
}
-static int show_msr __cpuinitdata;
+static int show_msr;
static __init int setup_show_msr(char *arg)
{
@@ -1022,7 +1022,7 @@ static __init int setup_noclflush(char *arg)
}
__setup("noclflush", setup_noclflush);
-void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+void print_cpu_info(struct cpuinfo_x86 *c)
{
const char *vendor = NULL;
@@ -1051,7 +1051,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
print_cpu_msr(c);
}
-void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
+void print_cpu_msr(struct cpuinfo_x86 *c)
{
if (c->cpu_index < show_msr)
__print_cpu_msr();
@@ -1216,7 +1216,7 @@ static void dbg_restore_debug_regs(void)
*/
#ifdef CONFIG_X86_64
-void __cpuinit cpu_init(void)
+void cpu_init(void)
{
struct orig_ist *oist;
struct task_struct *me;
@@ -1315,7 +1315,7 @@ void __cpuinit cpu_init(void)
#else
-void __cpuinit cpu_init(void)
+void cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 7582f475b163..d0969c75ab54 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,7 +15,7 @@
/*
* Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
*/
-static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
+static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
{
unsigned char ccr2, ccr3;
@@ -44,7 +44,7 @@ static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
}
}
-static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
+static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
{
unsigned long flags;
@@ -59,25 +59,25 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
* Actually since bugs.h doesn't even reference this perhaps someone should
* fix the documentation ???
*/
-static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
+static unsigned char Cx86_dir0_msb = 0;
-static const char __cpuinitconst Cx86_model[][9] = {
+static const char Cx86_model[][9] = {
"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
"M II ", "Unknown"
};
-static const char __cpuinitconst Cx486_name[][5] = {
+static const char Cx486_name[][5] = {
"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
"SRx2", "DRx2"
};
-static const char __cpuinitconst Cx486S_name[][4] = {
+static const char Cx486S_name[][4] = {
"S", "S2", "Se", "S2e"
};
-static const char __cpuinitconst Cx486D_name[][4] = {
+static const char Cx486D_name[][4] = {
"DX", "DX2", "?", "?", "?", "DX4"
};
-static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
-static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
-static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
+static char Cx86_cb[] = "?.5x Core/Bus Clock";
+static const char cyrix_model_mult1[] = "12??43";
+static const char cyrix_model_mult2[] = "12233445";
/*
* Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -87,7 +87,7 @@ static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
* FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
*/
-static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
+static void check_cx686_slop(struct cpuinfo_x86 *c)
{
unsigned long flags;
@@ -112,7 +112,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
}
-static void __cpuinit set_cx86_reorder(void)
+static void set_cx86_reorder(void)
{
u8 ccr3;
@@ -127,7 +127,7 @@ static void __cpuinit set_cx86_reorder(void)
setCx86(CX86_CCR3, ccr3);
}
-static void __cpuinit set_cx86_memwb(void)
+static void set_cx86_memwb(void)
{
printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
@@ -143,7 +143,7 @@ static void __cpuinit set_cx86_memwb(void)
* Configure later MediaGX and/or Geode processor.
*/
-static void __cpuinit geode_configure(void)
+static void geode_configure(void)
{
unsigned long flags;
u8 ccr3;
@@ -166,7 +166,7 @@ static void __cpuinit geode_configure(void)
local_irq_restore(flags);
}
-static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
+static void early_init_cyrix(struct cpuinfo_x86 *c)
{
unsigned char dir0, dir0_msn, dir1 = 0;
@@ -185,7 +185,7 @@ static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
}
}
-static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
+static void init_cyrix(struct cpuinfo_x86 *c)
{
unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
char *buf = c->x86_model_id;
@@ -356,7 +356,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
/*
* Handle National Semiconductor branded processors
*/
-static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
+static void init_nsc(struct cpuinfo_x86 *c)
{
/*
* There may be GX1 processors in the wild that are branded
@@ -405,7 +405,7 @@ static inline int test_cyrix_52div(void)
return (unsigned char) (test >> 8) == 0x02;
}
-static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
+static void cyrix_identify(struct cpuinfo_x86 *c)
{
/* Detect Cyrix with disabled CPUID */
if (c->x86 == 4 && test_cyrix_52div()) {
@@ -441,7 +441,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
}
}
-static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
+static const struct cpu_dev cyrix_cpu_dev = {
.c_vendor = "Cyrix",
.c_ident = { "CyrixInstead" },
.c_early_init = early_init_cyrix,
@@ -452,7 +452,7 @@ static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
cpu_dev_register(cyrix_cpu_dev);
-static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
+static const struct cpu_dev nsc_cpu_dev = {
.c_vendor = "NSC",
.c_ident = { "Geode by NSC" },
.c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 1e7e84a02eba..87279212d318 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -60,7 +60,7 @@ detect_hypervisor_vendor(void)
}
}
-void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
+void init_hypervisor(struct cpuinfo_x86 *c)
{
if (x86_hyper && x86_hyper->set_cpu_features)
x86_hyper->set_cpu_features(c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9b0c441c03f5..ec7299566f79 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -26,7 +26,7 @@
#include <asm/apic.h>
#endif
-static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
+static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -163,7 +163,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
* This is called before we do cpu ident work
*/
-int __cpuinit ppro_with_ram_bug(void)
+int ppro_with_ram_bug(void)
{
/* Uses data from early_cpu_detect now */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -176,7 +176,7 @@ int __cpuinit ppro_with_ram_bug(void)
return 0;
}
-static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
+static void intel_smp_check(struct cpuinfo_x86 *c)
{
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
@@ -196,7 +196,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
}
}
-static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
+static void intel_workarounds(struct cpuinfo_x86 *c)
{
unsigned long lo, hi;
@@ -275,12 +275,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
intel_smp_check(c);
}
#else
-static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
+static void intel_workarounds(struct cpuinfo_x86 *c)
{
}
#endif
-static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
+static void srat_detect_node(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_NUMA
unsigned node;
@@ -300,7 +300,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
/*
* find out the number of processor cores on the die
*/
-static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
+static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
{
unsigned int eax, ebx, ecx, edx;
@@ -315,7 +315,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
return 1;
}
-static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
+static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
{
/* Intel VMX MSR indicated features */
#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
@@ -353,7 +353,7 @@ static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
}
}
-static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+static void init_intel(struct cpuinfo_x86 *c)
{
unsigned int l2 = 0;
@@ -472,7 +472,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
}
#ifdef CONFIG_X86_32
-static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/*
* Intel PIII Tualatin. This comes in two flavours.
@@ -506,7 +506,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
#define STLB_4K 0x41
-static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
+static const struct _tlb_table intel_tlb_table[] = {
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
{ 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
{ 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
@@ -536,7 +536,7 @@ static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
{ 0x00, 0, 0 }
};
-static void __cpuinit intel_tlb_lookup(const unsigned char desc)
+static void intel_tlb_lookup(const unsigned char desc)
{
unsigned char k;
if (desc == 0)
@@ -605,7 +605,7 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc)
}
}
-static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
+static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
{
switch ((c->x86 << 8) + c->x86_model) {
case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
@@ -634,7 +634,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
}
}
-static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
+static void intel_detect_tlb(struct cpuinfo_x86 *c)
{
int i, j, n;
unsigned int regs[4];
@@ -661,7 +661,7 @@ static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
intel_tlb_flushall_shift_set(c);
}
-static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
+static const struct cpu_dev intel_cpu_dev = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 8dc72dda66fe..1414c90feaba 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -37,7 +37,7 @@ struct _cache_table {
/* All the cache descriptor types we care about (no TLB or
trace cache entries) */
-static const struct _cache_table __cpuinitconst cache_table[] =
+static const struct _cache_table cache_table[] =
{
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -203,7 +203,7 @@ union l3_cache {
unsigned val;
};
-static const unsigned short __cpuinitconst assocs[] = {
+static const unsigned short assocs[] = {
[1] = 1,
[2] = 2,
[4] = 4,
@@ -217,10 +217,10 @@ static const unsigned short __cpuinitconst assocs[] = {
[0xf] = 0xffff /* fully associative - no way to show this currently */
};
-static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
-static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
+static const unsigned char levels[] = { 1, 1, 2, 3 };
+static const unsigned char types[] = { 1, 2, 3, 3 };
-static void __cpuinit
+static void
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
union _cpuid4_leaf_ecx *ecx)
@@ -302,7 +302,7 @@ struct _cache_attr {
/*
* L3 cache descriptors
*/
-static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
+static void amd_calc_l3_indices(struct amd_northbridge *nb)
{
struct amd_l3_cache *l3 = &nb->l3_cache;
unsigned int sc0, sc1, sc2, sc3;
@@ -325,7 +325,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
}
-static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
+static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
{
int node;
@@ -528,8 +528,7 @@ static struct _cache_attr subcaches =
#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
static int
-__cpuinit cpuid4_cache_lookup_regs(int index,
- struct _cpuid4_info_regs *this_leaf)
+cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
{
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
@@ -560,7 +559,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
return 0;
}
-static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
+static int find_num_cache_leaves(struct cpuinfo_x86 *c)
{
unsigned int eax, ebx, ecx, edx, op;
union _cpuid4_leaf_eax cache_eax;
@@ -580,7 +579,7 @@ static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
return i;
}
-void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
+void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{
if (cpu_has_topoext) {
@@ -593,7 +592,7 @@ void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
}
}
-unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
+unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
/* Cache sizes */
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
@@ -744,7 +743,7 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
#ifdef CONFIG_SMP
-static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
+static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf;
int i, sibling;
@@ -793,7 +792,7 @@ static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
return 1;
}
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
+static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
@@ -828,7 +827,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
}
}
}
-static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
+static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
int sibling;
@@ -841,16 +840,16 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
}
}
#else
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
+static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
}
-static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
+static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
{
}
#endif
-static void __cpuinit free_cache_attributes(unsigned int cpu)
+static void free_cache_attributes(unsigned int cpu)
{
int i;
@@ -861,7 +860,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
per_cpu(ici_cpuid4_info, cpu) = NULL;
}
-static void __cpuinit get_cpu_leaves(void *_retval)
+static void get_cpu_leaves(void *_retval)
{
int j, *retval = _retval, cpu = smp_processor_id();
@@ -881,7 +880,7 @@ static void __cpuinit get_cpu_leaves(void *_retval)
}
}
-static int __cpuinit detect_cache_attributes(unsigned int cpu)
+static int detect_cache_attributes(unsigned int cpu)
{
int retval;
@@ -1015,7 +1014,7 @@ static struct attribute *default_attrs[] = {
};
#ifdef CONFIG_AMD_NB
-static struct attribute ** __cpuinit amd_l3_attrs(void)
+static struct attribute **amd_l3_attrs(void)
{
static struct attribute **attrs;
int n;
@@ -1091,7 +1090,7 @@ static struct kobj_type ktype_percpu_entry = {
.sysfs_ops = &sysfs_ops,
};
-static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
+static void cpuid4_cache_sysfs_exit(unsigned int cpu)
{
kfree(per_cpu(ici_cache_kobject, cpu));
kfree(per_cpu(ici_index_kobject, cpu));
@@ -1100,7 +1099,7 @@ static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
free_cache_attributes(cpu);
}
-static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
+static int cpuid4_cache_sysfs_init(unsigned int cpu)
{
int err;
@@ -1132,7 +1131,7 @@ err_out:
static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
/* Add/Remove cache interface for CPU device */
-static int __cpuinit cache_add_dev(struct device *dev)
+static int cache_add_dev(struct device *dev)
{
unsigned int cpu = dev->id;
unsigned long i, j;
@@ -1183,7 +1182,7 @@ static int __cpuinit cache_add_dev(struct device *dev)
return 0;
}
-static void __cpuinit cache_remove_dev(struct device *dev)
+static void cache_remove_dev(struct device *dev)
{
unsigned int cpu = dev->id;
unsigned long i;
@@ -1200,8 +1199,8 @@ static void __cpuinit cache_remove_dev(struct device *dev)
cpuid4_cache_sysfs_exit(cpu);
}
-static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int cacheinfo_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
@@ -1220,7 +1219,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
+static struct notifier_block cacheinfo_cpu_notifier = {
.notifier_call = cacheinfo_cpu_callback,
};
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index e2703520d120..c370e1c4468b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -111,8 +111,8 @@ static struct severity {
#ifdef CONFIG_MEMORY_FAILURE
MCESEV(
KEEP, "Action required but unaffected thread is continuable",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR),
- MCGMASK(MCG_STATUS_RIPV, MCG_STATUS_RIPV)
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
+ MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
),
MCESEV(
AR, "Action required: data load error in a user process",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bf49cdbb010f..87a65c939bcd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1363,7 +1363,7 @@ int mce_notify_irq(void)
}
EXPORT_SYMBOL_GPL(mce_notify_irq);
-static int __cpuinit __mcheck_cpu_mce_banks_init(void)
+static int __mcheck_cpu_mce_banks_init(void)
{
int i;
u8 num_banks = mca_cfg.banks;
@@ -1384,7 +1384,7 @@ static int __cpuinit __mcheck_cpu_mce_banks_init(void)
/*
* Initialize Machine Checks for a CPU.
*/
-static int __cpuinit __mcheck_cpu_cap_init(void)
+static int __mcheck_cpu_cap_init(void)
{
unsigned b;
u64 cap;
@@ -1483,7 +1483,7 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
}
/* Add per CPU specific workarounds here */
-static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
+static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
{
struct mca_config *cfg = &mca_cfg;
@@ -1593,7 +1593,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
return 0;
}
-static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
+static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
{
if (c->x86 != 5)
return 0;
@@ -1664,7 +1664,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
* Called for each booted CPU to set up machine checks.
* Must be called with preempt off:
*/
-void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+void mcheck_cpu_init(struct cpuinfo_x86 *c)
{
if (mca_cfg.disabled)
return;
@@ -2082,7 +2082,6 @@ static struct bus_type mce_subsys = {
DEFINE_PER_CPU(struct device *, mce_device);
-__cpuinitdata
void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
@@ -2228,7 +2227,7 @@ static void mce_device_release(struct device *dev)
}
/* Per cpu device init. All of the cpus still share the same ctrl bank: */
-static __cpuinit int mce_device_create(unsigned int cpu)
+static int mce_device_create(unsigned int cpu)
{
struct device *dev;
int err;
@@ -2274,7 +2273,7 @@ error:
return err;
}
-static __cpuinit void mce_device_remove(unsigned int cpu)
+static void mce_device_remove(unsigned int cpu)
{
struct device *dev = per_cpu(mce_device, cpu);
int i;
@@ -2294,7 +2293,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
}
/* Make sure there are no machine checks on offlined CPUs. */
-static void __cpuinit mce_disable_cpu(void *h)
+static void mce_disable_cpu(void *h)
{
unsigned long action = *(unsigned long *)h;
int i;
@@ -2312,7 +2311,7 @@ static void __cpuinit mce_disable_cpu(void *h)
}
}
-static void __cpuinit mce_reenable_cpu(void *h)
+static void mce_reenable_cpu(void *h)
{
unsigned long action = *(unsigned long *)h;
int i;
@@ -2331,7 +2330,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int __cpuinit
+static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -2367,7 +2366,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
+static struct notifier_block mce_cpu_notifier = {
.notifier_call = mce_cpu_callback,
};
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9cb52767999a..603df4f74640 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -458,10 +458,8 @@ static struct kobj_type threshold_ktype = {
.default_attrs = default_attrs,
};
-static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
- unsigned int bank,
- unsigned int block,
- u32 address)
+static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
+ unsigned int block, u32 address)
{
struct threshold_block *b = NULL;
u32 low, high;
@@ -543,7 +541,7 @@ out_free:
return err;
}
-static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
+static int __threshold_add_blocks(struct threshold_bank *b)
{
struct list_head *head = &b->blocks->miscj;
struct threshold_block *pos = NULL;
@@ -567,7 +565,7 @@ static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
return err;
}
-static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
+static int threshold_create_bank(unsigned int cpu, unsigned int bank)
{
struct device *dev = per_cpu(mce_device, cpu);
struct amd_northbridge *nb = NULL;
@@ -632,7 +630,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
}
/* create dir/files for all valid threshold banks */
-static __cpuinit int threshold_create_device(unsigned int cpu)
+static int threshold_create_device(unsigned int cpu)
{
unsigned int bank;
struct threshold_bank **bp;
@@ -736,7 +734,7 @@ static void threshold_remove_device(unsigned int cpu)
}
/* get notified when a cpu comes on/off */
-static void __cpuinit
+static void
amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
{
switch (action) {
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 41e8e00a6637..3eec7de76efb 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -240,8 +240,7 @@ __setup("int_pln_enable", int_pln_enable_setup);
#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct device *dev,
- unsigned int cpu)
+static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
{
int err;
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -267,7 +266,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev,
return err;
}
-static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
+static void thermal_throttle_remove_dev(struct device *dev)
{
sysfs_remove_group(&dev->kobj, &thermal_attr_group);
}
@@ -276,7 +275,7 @@ static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
static DEFINE_MUTEX(therm_cpu_lock);
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static __cpuinit int
+static int
thermal_throttle_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
@@ -307,7 +306,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
return notifier_from_errno(err);
}
-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
+static struct notifier_block thermal_throttle_cpu_notifier =
{
.notifier_call = thermal_throttle_cpu_callback,
};
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9e581c5cf6d0..a7c7305030cc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1295,7 +1295,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
struct event_constraint emptyconstraint;
struct event_constraint unconstrained;
-static int __cpuinit
+static int
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 5f0581e713c2..e09f0bfb7b8f 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -851,7 +851,7 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
-static int __cpuinit
+static int
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index c0c661adf03e..754291adec33 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -288,13 +288,13 @@ static struct pmu amd_l2_pmu = {
.read = amd_uncore_read,
};
-static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu)
+static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
{
return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
cpu_to_node(cpu));
}
-static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
+static void amd_uncore_cpu_up_prepare(unsigned int cpu)
{
struct amd_uncore *uncore;
@@ -322,8 +322,8 @@ static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
}
static struct amd_uncore *
-__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
- struct amd_uncore * __percpu *uncores)
+amd_uncore_find_online_sibling(struct amd_uncore *this,
+ struct amd_uncore * __percpu *uncores)
{
unsigned int cpu;
struct amd_uncore *that;
@@ -348,7 +348,7 @@ __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
return this;
}
-static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
+static void amd_uncore_cpu_starting(unsigned int cpu)
{
unsigned int eax, ebx, ecx, edx;
struct amd_uncore *uncore;
@@ -376,8 +376,8 @@ static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
}
}
-static void __cpuinit uncore_online(unsigned int cpu,
- struct amd_uncore * __percpu *uncores)
+static void uncore_online(unsigned int cpu,
+ struct amd_uncore * __percpu *uncores)
{
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
@@ -388,7 +388,7 @@ static void __cpuinit uncore_online(unsigned int cpu,
cpumask_set_cpu(cpu, uncore->active_mask);
}
-static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
+static void amd_uncore_cpu_online(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_online(cpu, amd_uncore_nb);
@@ -397,8 +397,8 @@ static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
uncore_online(cpu, amd_uncore_l2);
}
-static void __cpuinit uncore_down_prepare(unsigned int cpu,
- struct amd_uncore * __percpu *uncores)
+static void uncore_down_prepare(unsigned int cpu,
+ struct amd_uncore * __percpu *uncores)
{
unsigned int i;
struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
@@ -423,7 +423,7 @@ static void __cpuinit uncore_down_prepare(unsigned int cpu,
}
}
-static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
+static void amd_uncore_cpu_down_prepare(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_down_prepare(cpu, amd_uncore_nb);
@@ -432,8 +432,7 @@ static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
uncore_down_prepare(cpu, amd_uncore_l2);
}
-static void __cpuinit uncore_dead(unsigned int cpu,
- struct amd_uncore * __percpu *uncores)
+static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
{
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
@@ -445,7 +444,7 @@ static void __cpuinit uncore_dead(unsigned int cpu,
*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
}
-static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
+static void amd_uncore_cpu_dead(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_dead(cpu, amd_uncore_nb);
@@ -454,7 +453,7 @@ static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
uncore_dead(cpu, amd_uncore_l2);
}
-static int __cpuinit
+static int
amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
void *hcpu)
{
@@ -489,7 +488,7 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
return NOTIFY_OK;
}
-static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = {
+static struct notifier_block amd_uncore_cpu_notifier_block = {
.notifier_call = amd_uncore_cpu_notifier,
.priority = CPU_PRI_PERF + 1,
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 9dd99751ccf9..cad791dbde95 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3297,7 +3297,7 @@ static void __init uncore_pci_exit(void)
/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
static LIST_HEAD(boxes_to_free);
-static void __cpuinit uncore_kfree_boxes(void)
+static void uncore_kfree_boxes(void)
{
struct intel_uncore_box *box;
@@ -3309,7 +3309,7 @@ static void __cpuinit uncore_kfree_boxes(void)
}
}
-static void __cpuinit uncore_cpu_dying(int cpu)
+static void uncore_cpu_dying(int cpu)
{
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
@@ -3328,7 +3328,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
}
}
-static int __cpuinit uncore_cpu_starting(int cpu)
+static int uncore_cpu_starting(int cpu)
{
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
@@ -3371,7 +3371,7 @@ static int __cpuinit uncore_cpu_starting(int cpu)
return 0;
}
-static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
+static int uncore_cpu_prepare(int cpu, int phys_id)
{
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
@@ -3397,7 +3397,7 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
return 0;
}
-static void __cpuinit
+static void
uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
{
struct intel_uncore_type *type;
@@ -3435,7 +3435,7 @@ uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_c
}
}
-static void __cpuinit uncore_event_exit_cpu(int cpu)
+static void uncore_event_exit_cpu(int cpu)
{
int i, phys_id, target;
@@ -3463,7 +3463,7 @@ static void __cpuinit uncore_event_exit_cpu(int cpu)
uncore_change_context(pci_uncores, cpu, target);
}
-static void __cpuinit uncore_event_init_cpu(int cpu)
+static void uncore_event_init_cpu(int cpu)
{
int i, phys_id;
@@ -3479,8 +3479,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu)
uncore_change_context(pci_uncores, -1, cpu);
}
-static int
- __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int uncore_cpu_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
@@ -3520,7 +3520,7 @@ static int
return NOTIFY_OK;
}
-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
+static struct notifier_block uncore_cpu_nb = {
.notifier_call = uncore_cpu_notifier,
/*
* to migrate uncore events, our notifier should be executed
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index feca286c2bb4..88db010845cb 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -52,7 +52,7 @@ static inline int rdrand_long(unsigned long *v)
*/
#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
-void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c)
+void x86_init_rdrand(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_ARCH_RANDOM
unsigned long tmp;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d92b5dad15dd..f2cc63e9cf08 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -24,13 +24,13 @@ enum cpuid_regs {
CR_EBX
};
-void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
+void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
{
u32 max_level;
u32 regs[4];
const struct cpuid_bit *cb;
- static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
+ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 4397e987a1cf..4c60eaf0571c 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -26,7 +26,7 @@
* exists, use it for populating initial_apicid and cpu topology
* detection.
*/
-void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
+void detect_extended_topology(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx, sub_index;
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 28000743bbb0..aa0430d69b90 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -5,7 +5,7 @@
#include <asm/msr.h>
#include "cpu.h"
-static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
+static void early_init_transmeta(struct cpuinfo_x86 *c)
{
u32 xlvl;
@@ -17,7 +17,7 @@ static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
}
}
-static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
+static void init_transmeta(struct cpuinfo_x86 *c)
{
unsigned int cap_mask, uk, max, dummy;
unsigned int cms_rev1, cms_rev2;
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
#endif
}
-static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
+static const struct cpu_dev transmeta_cpu_dev = {
.c_vendor = "Transmeta",
.c_ident = { "GenuineTMx86", "TransmetaCPU" },
.c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index fd2c37bf7acb..202759a14121 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
* so no special init takes place.
*/
-static const struct cpu_dev __cpuinitconst umc_cpu_dev = {
+static const struct cpu_dev umc_cpu_dev = {
.c_vendor = "UMC",
.c_ident = { "UMC UMC UMC" },
.c_models = {
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 03a36321ec54..7076878404ec 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -122,7 +122,7 @@ static bool __init vmware_platform(void)
* so that the kernel could just trust the hypervisor with providing a
* reliable virtual TSC that is suitable for timekeeping.
*/
-static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
+static void vmware_set_cpu_features(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 1e4dbcfe6d31..7d9481c743f8 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -137,7 +137,7 @@ static const struct file_operations cpuid_fops = {
.open = cpuid_open,
};
-static __cpuinit int cpuid_device_create(int cpu)
+static int cpuid_device_create(int cpu)
{
struct device *dev;
@@ -151,9 +151,8 @@ static void cpuid_device_destroy(int cpu)
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
}
-static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int cpuid_class_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
int err = 0;
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 4934890e4db2..69eb2fa25494 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -133,7 +133,7 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev)
{
}
-void __cpuinit x86_of_pci_init(void)
+void x86_of_pci_init(void)
{
pcibios_enable_irq = x86_of_pci_irq_enable;
pcibios_disable_irq = x86_of_pci_irq_disable;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e65ddc62e113..5dd87a89f011 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -292,7 +292,6 @@ ENDPROC(start_cpu0)
* If cpu hotplug is not supported then this code can go in init section
* which will be freed later
*/
-__CPUINIT
ENTRY(startup_32_smp)
cld
movl $(__BOOT_DS),%eax
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 5e4d8a8a5c40..e1aabdb314c8 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -512,21 +512,6 @@ ENTRY(phys_base)
#include "../../x86/xen/xen-head.S"
- .section .bss, "aw", @nobits
- .align L1_CACHE_BYTES
-ENTRY(idt_table)
- .skip IDT_ENTRIES * 16
-
- .align L1_CACHE_BYTES
-ENTRY(debug_idt_table)
- .skip IDT_ENTRIES * 16
-
-#ifdef CONFIG_TRACING
- .align L1_CACHE_BYTES
-ENTRY(trace_idt_table)
- .skip IDT_ENTRIES * 16
-#endif
-
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index b627746f6b1a..202d24f0f7e7 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(unlazy_fpu);
unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
unsigned int xstate_size;
EXPORT_SYMBOL_GPL(xstate_size);
-static struct i387_fxsave_struct fx_scratch __cpuinitdata;
+static struct i387_fxsave_struct fx_scratch;
-static void __cpuinit mxcsr_feature_mask_init(void)
+static void mxcsr_feature_mask_init(void)
{
unsigned long mask = 0;
@@ -124,7 +124,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
mxcsr_feature_mask &= mask;
}
-static void __cpuinit init_thread_xstate(void)
+static void init_thread_xstate(void)
{
/*
* Note that xstate_size might be overwriten later during
@@ -153,7 +153,7 @@ static void __cpuinit init_thread_xstate(void)
* into all processes.
*/
-void __cpuinit fpu_init(void)
+void fpu_init(void)
{
unsigned long cr0;
unsigned long cr4_mask = 0;
@@ -608,7 +608,7 @@ static int __init no_387(char *s)
__setup("no387", no_387);
-void __cpuinit fpu_detect(struct cpuinfo_x86 *c)
+void fpu_detect(struct cpuinfo_x86 *c)
{
unsigned long cr0;
u16 fsw, fcw;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 344faf8d0d62..4186755f1d7c 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -119,7 +119,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
/*
* allocate per-cpu stacks for hardirq and for softirq processing
*/
-void __cpuinit irq_ctx_init(int cpu)
+void irq_ctx_init(int cpu)
{
union irq_ctx *irqctx;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index cd6d9a5a42f6..a96d32cc55b8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -320,7 +320,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
apic_write(APIC_EOI, APIC_EOI_ACK);
}
-void __cpuinit kvm_guest_cpu_init(void)
+void kvm_guest_cpu_init(void)
{
if (!kvm_para_available())
return;
@@ -421,7 +421,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
native_smp_prepare_boot_cpu();
}
-static void __cpuinit kvm_guest_cpu_online(void *dummy)
+static void kvm_guest_cpu_online(void *dummy)
{
kvm_guest_cpu_init();
}
@@ -435,8 +435,8 @@ static void kvm_guest_cpu_offline(void *dummy)
apf_task_wake_all();
}
-static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
int cpu = (unsigned long)hcpu;
switch (action) {
@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
+static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_notify,
};
#endif
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1f354f4b602b..1570e0741344 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -182,7 +182,7 @@ static void kvm_restore_sched_clock_state(void)
}
#ifdef CONFIG_X86_LOCAL_APIC
-static void __cpuinit kvm_setup_secondary_clock(void)
+static void kvm_setup_secondary_clock(void)
{
/*
* Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1ac6e9aee766..1d14ffee5749 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -82,7 +82,7 @@ static struct cpio_data __init find_ucode_in_initrd(void)
* load_microcode_amd() to save equivalent cpu table and microcode patches in
* kernel heap memory.
*/
-static void __cpuinit apply_ucode_in_initrd(void *ucode, size_t size)
+static void apply_ucode_in_initrd(void *ucode, size_t size)
{
struct equiv_cpu_entry *eq;
u32 *header;
@@ -206,7 +206,7 @@ u8 amd_bsp_mpb[MPB_MAX_SIZE];
* save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
* is used upon resume from suspend.
*/
-void __cpuinit load_ucode_amd_ap(void)
+void load_ucode_amd_ap(void)
{
struct microcode_amd *mc;
unsigned long *initrd;
@@ -238,7 +238,7 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
#else
-static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
+static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
struct ucode_cpu_info *uci)
{
u32 rev, eax;
@@ -252,7 +252,7 @@ static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
}
-void __cpuinit load_ucode_amd_ap(void)
+void load_ucode_amd_ap(void)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 22db92bbdf1a..15c987698b0f 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -468,7 +468,7 @@ static struct syscore_ops mc_syscore_ops = {
.resume = mc_bp_resume,
};
-static __cpuinit int
+static int
mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c
index 86119f63db0c..be7f8514f577 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/microcode_core_early.c
@@ -41,7 +41,7 @@
*
* x86_vendor() gets vendor information directly through cpuid.
*/
-static int __cpuinit x86_vendor(void)
+static int x86_vendor(void)
{
u32 eax = 0x00000000;
u32 ebx, ecx = 0, edx;
@@ -57,7 +57,7 @@ static int __cpuinit x86_vendor(void)
return X86_VENDOR_UNKNOWN;
}
-static int __cpuinit x86_family(void)
+static int x86_family(void)
{
u32 eax = 0x00000001;
u32 ebx, ecx = 0, edx;
@@ -96,7 +96,7 @@ void __init load_ucode_bsp(void)
}
}
-void __cpuinit load_ucode_ap(void)
+void load_ucode_ap(void)
{
int vendor, x86;
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index dabef95506f3..1575deb2e636 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -34,7 +34,7 @@ struct mc_saved_data {
struct microcode_intel **mc_saved;
} mc_saved_data;
-static enum ucode_state __cpuinit
+static enum ucode_state
generic_load_microcode_early(struct microcode_intel **mc_saved_p,
unsigned int mc_saved_count,
struct ucode_cpu_info *uci)
@@ -69,7 +69,7 @@ out:
return state;
}
-static void __cpuinit
+static void
microcode_pointer(struct microcode_intel **mc_saved,
unsigned long *mc_saved_in_initrd,
unsigned long initrd_start, int mc_saved_count)
@@ -82,7 +82,7 @@ microcode_pointer(struct microcode_intel **mc_saved,
}
#ifdef CONFIG_X86_32
-static void __cpuinit
+static void
microcode_phys(struct microcode_intel **mc_saved_tmp,
struct mc_saved_data *mc_saved_data)
{
@@ -101,7 +101,7 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
}
#endif
-static enum ucode_state __cpuinit
+static enum ucode_state
load_microcode(struct mc_saved_data *mc_saved_data,
unsigned long *mc_saved_in_initrd,
unsigned long initrd_start,
@@ -375,7 +375,7 @@ do { \
#define native_wrmsr(msr, low, high) \
native_write_msr(msr, low, high);
-static int __cpuinit collect_cpu_info_early(struct ucode_cpu_info *uci)
+static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
u8 x86, x86_model;
@@ -584,7 +584,7 @@ scan_microcode(unsigned long start, unsigned long end,
/*
* Print ucode update info.
*/
-static void __cpuinit
+static void
print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
{
int cpu = smp_processor_id();
@@ -605,7 +605,7 @@ static int current_mc_date;
/*
* Print early updated ucode info after printk works. This is delayed info dump.
*/
-void __cpuinit show_ucode_info_early(void)
+void show_ucode_info_early(void)
{
struct ucode_cpu_info uci;
@@ -621,7 +621,7 @@ void __cpuinit show_ucode_info_early(void)
* mc_saved_data.mc_saved and delay printing microcode info in
* show_ucode_info_early() until printk() works.
*/
-static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
+static void print_ucode(struct ucode_cpu_info *uci)
{
struct microcode_intel *mc_intel;
int *delay_ucode_info_p;
@@ -643,12 +643,12 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
* Flush global tlb. We only do this in x86_64 where paging has been enabled
* already and PGE should be enabled as well.
*/
-static inline void __cpuinit flush_tlb_early(void)
+static inline void flush_tlb_early(void)
{
__native_flush_tlb_global_irq_disabled();
}
-static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
+static inline void print_ucode(struct ucode_cpu_info *uci)
{
struct microcode_intel *mc_intel;
@@ -660,8 +660,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
}
#endif
-static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
- struct ucode_cpu_info *uci)
+static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
+ struct ucode_cpu_info *uci)
{
struct microcode_intel *mc_intel;
unsigned int val[2];
@@ -763,7 +763,7 @@ load_ucode_intel_bsp(void)
#endif
}
-void __cpuinit load_ucode_intel_ap(void)
+void load_ucode_intel_ap(void)
{
struct mc_saved_data *mc_saved_data_p;
struct ucode_cpu_info uci;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index ac861b8348e2..f4c886d9165c 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -24,14 +24,14 @@ struct pci_hostbridge_probe {
u32 device;
};
-static u64 __cpuinitdata fam10h_pci_mmconf_base;
+static u64 fam10h_pci_mmconf_base;
-static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = {
+static struct pci_hostbridge_probe pci_probes[] = {
{ 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
{ 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
};
-static int __cpuinit cmp_range(const void *x1, const void *x2)
+static int cmp_range(const void *x1, const void *x2)
{
const struct range *r1 = x1;
const struct range *r2 = x2;
@@ -49,7 +49,7 @@ static int __cpuinit cmp_range(const void *x1, const void *x2)
/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */
#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
-static void __cpuinit get_fam10h_pci_mmconf_base(void)
+static void get_fam10h_pci_mmconf_base(void)
{
int i;
unsigned bus;
@@ -166,7 +166,7 @@ out:
fam10h_pci_mmconf_base = base;
}
-void __cpuinit fam10h_check_enable_mmcfg(void)
+void fam10h_check_enable_mmcfg(void)
{
u64 val;
u32 address;
@@ -230,7 +230,7 @@ static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
{}
};
-/* Called from a __cpuinit function, but only on the BSP. */
+/* Called from a non __init function, but only on the BSP. */
void __ref check_enable_amd_mmconf_dmi(void)
{
dmi_check_system(mmconf_dmi_table);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index ce130493b802..88458faea2f8 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -200,7 +200,7 @@ static const struct file_operations msr_fops = {
.compat_ioctl = msr_ioctl,
};
-static int __cpuinit msr_device_create(int cpu)
+static int msr_device_create(int cpu)
{
struct device *dev;
@@ -214,8 +214,8 @@ static void msr_device_destroy(int cpu)
device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
}
-static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int msr_class_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
int err = 0;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 81a5f5e8f142..83369e5a1d27 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -398,7 +398,7 @@ static void amd_e400_idle(void)
default_idle();
}
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+void select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e68709da8251..f8ec57815c05 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -170,7 +170,7 @@ static struct resource bss_resource = {
#ifdef CONFIG_X86_32
/* cpu data as detected by the assembly code in head.S */
-struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
+struct cpuinfo_x86 new_cpu_data = {
.wp_works_ok = -1,
};
/* common cpu data for all cpus */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bfd348e99369..aecc98a93d1b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -130,7 +130,7 @@ atomic_t init_deasserted;
* Report back to the Boot Processor during boot time or to the caller processor
* during CPU online.
*/
-static void __cpuinit smp_callin(void)
+static void smp_callin(void)
{
int cpuid, phys_id;
unsigned long timeout;
@@ -237,7 +237,7 @@ static int enable_start_cpu0;
/*
* Activate a secondary processor.
*/
-notrace static void __cpuinit start_secondary(void *unused)
+static void notrace start_secondary(void *unused)
{
/*
* Don't put *anything* before cpu_init(), SMP booting is too
@@ -300,7 +300,7 @@ void __init smp_store_boot_cpu_info(void)
* The bootstrap kernel entry code has set these up. Save them for
* a given CPU
*/
-void __cpuinit smp_store_cpu_info(int id)
+void smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c = &cpu_data(id);
@@ -313,7 +313,7 @@ void __cpuinit smp_store_cpu_info(int id)
identify_secondary_cpu(c);
}
-static bool __cpuinit
+static bool
topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
{
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -330,7 +330,7 @@ do { \
cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
} while (0)
-static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
if (cpu_has_topoext) {
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -348,7 +348,7 @@ static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
-static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -359,7 +359,7 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
-static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
if (c->phys_proc_id == o->phys_proc_id) {
if (cpu_has(c, X86_FEATURE_AMD_DCM))
@@ -370,7 +370,7 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
-void __cpuinit set_cpu_sibling_map(int cpu)
+void set_cpu_sibling_map(int cpu)
{
bool has_smt = smp_num_siblings > 1;
bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
@@ -499,7 +499,7 @@ void __inquire_remote_apic(int apicid)
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
* won't ... remember to clear down the APIC, etc later.
*/
-int __cpuinit
+int
wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
{
unsigned long send_status, accept_status = 0;
@@ -533,7 +533,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
return (send_status | accept_status);
}
-static int __cpuinit
+static int
wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
{
unsigned long send_status, accept_status = 0;
@@ -649,7 +649,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
}
/* reduce the number of lines printed when booting a large cpu count system */
-static void __cpuinit announce_cpu(int cpu, int apicid)
+static void announce_cpu(int cpu, int apicid)
{
static int current_node = -1;
int node = early_cpu_to_node(cpu);
@@ -691,7 +691,7 @@ static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
* We'll change this code in the future to wake up hard offlined CPU0 if
* real platform and request are available.
*/
-static int __cpuinit
+static int
wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
int *cpu0_nmi_registered)
{
@@ -731,7 +731,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
* Returns zero if CPU booted OK, else error code from
* ->wakeup_secondary_cpu.
*/
-static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
+static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
{
volatile u32 *trampoline_status =
(volatile u32 *) __va(real_mode_header->trampoline_status);
@@ -872,7 +872,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
return boot_error;
}
-int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int apicid = apic->cpu_present_to_apicid(cpu);
unsigned long flags;
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 3ff42d2f046d..addf7b58f4e8 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -320,8 +320,8 @@ static int tboot_wait_for_aps(int num_aps)
return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
}
-static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
{
switch (action) {
case CPU_DYING:
@@ -334,7 +334,7 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
+static struct notifier_block tboot_cpu_notifier =
{
.notifier_call = tboot_cpu_callback,
};
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 4e584a8d6edd..1c113db9ed57 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -12,10 +12,8 @@ atomic_t trace_idt_ctr = ATOMIC_INIT(0);
struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
(unsigned long) trace_idt_table };
-#ifndef CONFIG_X86_64
-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_data
- = { { { { 0, 0 } } }, };
-#endif
+/* No need to be aligned, but done to keep all IDTs defined the same way. */
+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
static int trace_irq_vector_refcount;
static DEFINE_MUTEX(irq_vector_mutex);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b0865e88d3cc..1b23a1c92746 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -63,19 +63,19 @@
#include <asm/x86_init.h>
#include <asm/pgalloc.h>
#include <asm/proto.h>
+
+/* No need to be aligned, but done to keep all IDTs defined the same way. */
+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
#else
#include <asm/processor-flags.h>
#include <asm/setup.h>
asmlinkage int system_call(void);
-
-/*
- * The IDT has to be page-aligned to simplify the Pentium
- * F0 0F bug workaround.
- */
-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
#endif
+/* Must be page-aligned because the real IDT is used in a fixmap. */
+gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
+
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 098b3cfda72e..6ff49247edf8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -824,7 +824,7 @@ static void __init check_system_tsc_reliable(void)
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs.
*/
-__cpuinit int unsynchronized_tsc(void)
+int unsynchronized_tsc(void)
{
if (!cpu_has_tsc || tsc_unstable)
return 1;
@@ -1020,7 +1020,7 @@ void __init tsc_init(void)
* been calibrated. This assumes that CONSTANT_TSC applies to all
* cpus in the socket - this should be a safe assumption.
*/
-unsigned long __cpuinit calibrate_delay_is_known(void)
+unsigned long calibrate_delay_is_known(void)
{
int i, cpu = smp_processor_id();
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index fc25e60a5884..adfdf56a3714 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -25,24 +25,24 @@
* Entry/exit counters that make sure that both CPUs
* run the measurement code at once:
*/
-static __cpuinitdata atomic_t start_count;
-static __cpuinitdata atomic_t stop_count;
+static atomic_t start_count;
+static atomic_t stop_count;
/*
* We use a raw spinlock in this exceptional case, because
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
-static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-static __cpuinitdata cycles_t last_tsc;
-static __cpuinitdata cycles_t max_warp;
-static __cpuinitdata int nr_warps;
+static cycles_t last_tsc;
+static cycles_t max_warp;
+static int nr_warps;
/*
* TSC-warp measurement loop running on both CPUs:
*/
-static __cpuinit void check_tsc_warp(unsigned int timeout)
+static void check_tsc_warp(unsigned int timeout)
{
cycles_t start, now, prev, end;
int i;
@@ -121,7 +121,7 @@ static inline unsigned int loop_timeout(int cpu)
* Source CPU calls into this - it waits for the freshly booted
* target CPU to arrive and then starts the measurement:
*/
-void __cpuinit check_tsc_sync_source(int cpu)
+void check_tsc_sync_source(int cpu)
{
int cpus = 2;
@@ -187,7 +187,7 @@ void __cpuinit check_tsc_sync_source(int cpu)
/*
* Freshly booted CPUs call into this:
*/
-void __cpuinit check_tsc_sync_target(void)
+void check_tsc_sync_target(void)
{
int cpus = 2;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9a907a67be8f..1f96f9347ed9 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -331,7 +331,7 @@ sigsegv:
* Assume __initcall executes before all user space. Hopefully kmod
* doesn't violate that. We'll find out if it does.
*/
-static void __cpuinit vsyscall_set_cpu(int cpu)
+static void vsyscall_set_cpu(int cpu)
{
unsigned long d;
unsigned long node = 0;
@@ -353,13 +353,13 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
}
-static void __cpuinit cpu_vsyscall_init(void *arg)
+static void cpu_vsyscall_init(void *arg)
{
/* preemption should be already off */
vsyscall_set_cpu(raw_smp_processor_id());
}
-static int __cpuinit
+static int
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
{
long cpu = (long)arg;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 45a14dbbddaf..5f24c71accaa 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -25,7 +25,7 @@
#include <asm/iommu.h>
#include <asm/mach_traps.h>
-void __cpuinit x86_init_noop(void) { }
+void x86_init_noop(void) { }
void __init x86_init_uint_noop(unsigned int unused) { }
int __init iommu_init_noop(void) { return 0; }
void iommu_shutdown_noop(void) { }
@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
},
};
-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
+struct x86_cpuinit_ops x86_cpuinit = {
.early_percpu_clock_init = x86_init_noop,
.setup_percpu_clockev = setup_secondary_APIC_clock,
};
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index d6c28acdf99c..422fd8223470 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -573,7 +573,7 @@ static void __init xstate_enable_boot_cpu(void)
* This is somewhat obfuscated due to the lack of powerful enough
* overrides for the section checks.
*/
-void __cpuinit xsave_init(void)
+void xsave_init(void)
{
static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
void (*this_func)(void);
@@ -594,7 +594,7 @@ static inline void __init eager_fpu_init_bp(void)
setup_init_fpu_buf();
}
-void __cpuinit eager_fpu_init(void)
+void eager_fpu_init(void)
{
static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0d094da49541..9e9285ae9b94 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2811,6 +2811,13 @@ exit:
static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code)
{
/*
+ * Do not fix the mmio spte with invalid generation number which
+ * need to be updated by slow page fault path.
+ */
+ if (unlikely(error_code & PFERR_RSVD_MASK))
+ return false;
+
+ /*
* #PF can be fast only if the shadow page table is present and it
* is caused by write-protect, that means we just need change the
* W bit of the spte which can be done out of mmu-lock.
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index dc0b727742f4..0057a7accfb1 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -410,9 +410,7 @@ out:
pr_warning("multiple CPUs still online, may miss events.\n");
}
-/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
- but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
-static void __ref leave_uniprocessor(void)
+static void leave_uniprocessor(void)
{
int cpu;
int err;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index a71c4e207679..8bf93bae1f13 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -60,7 +60,7 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] = {
[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
};
-int __cpuinit numa_cpu_node(int cpu)
+int numa_cpu_node(int cpu)
{
int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
@@ -691,12 +691,12 @@ void __init init_cpu_to_node(void)
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
# ifndef CONFIG_NUMA_EMU
-void __cpuinit numa_add_cpu(int cpu)
+void numa_add_cpu(int cpu)
{
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
-void __cpuinit numa_remove_cpu(int cpu)
+void numa_remove_cpu(int cpu)
{
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
@@ -763,17 +763,17 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable)
}
# ifndef CONFIG_NUMA_EMU
-static void __cpuinit numa_set_cpumask(int cpu, bool enable)
+static void numa_set_cpumask(int cpu, bool enable)
{
debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
}
-void __cpuinit numa_add_cpu(int cpu)
+void numa_add_cpu(int cpu)
{
numa_set_cpumask(cpu, true);
}
-void __cpuinit numa_remove_cpu(int cpu)
+void numa_remove_cpu(int cpu)
{
numa_set_cpumask(cpu, false);
}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index dbbbb47260cc..a8f90ce3dedf 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -10,7 +10,7 @@
#include "numa_internal.h"
-static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
+static int emu_nid_to_phys[MAX_NUMNODES];
static char *emu_cmdline __initdata;
void __init numa_emu_cmdline(char *str)
@@ -444,7 +444,7 @@ no_emu:
}
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
-void __cpuinit numa_add_cpu(int cpu)
+void numa_add_cpu(int cpu)
{
int physnid, nid;
@@ -462,7 +462,7 @@ void __cpuinit numa_add_cpu(int cpu)
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
}
-void __cpuinit numa_remove_cpu(int cpu)
+void numa_remove_cpu(int cpu)
{
int i;
@@ -470,7 +470,7 @@ void __cpuinit numa_remove_cpu(int cpu)
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
}
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
-static void __cpuinit numa_set_cpumask(int cpu, bool enable)
+static void numa_set_cpumask(int cpu, bool enable)
{
int nid, physnid;
@@ -490,12 +490,12 @@ static void __cpuinit numa_set_cpumask(int cpu, bool enable)
}
}
-void __cpuinit numa_add_cpu(int cpu)
+void numa_add_cpu(int cpu)
{
numa_set_cpumask(cpu, true);
}
-void __cpuinit numa_remove_cpu(int cpu)
+void numa_remove_cpu(int cpu)
{
numa_set_cpumask(cpu, false);
}
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 410531d3c292..90555bf60aa4 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -5,7 +5,7 @@
#include <asm/pgtable.h>
#include <asm/proto.h>
-static int disable_nx __cpuinitdata;
+static int disable_nx;
/*
* noexec = on|off
@@ -29,7 +29,7 @@ static int __init noexec_setup(char *str)
}
early_param("noexec", noexec_setup);
-void __cpuinit x86_configure_nx(void)
+void x86_configure_nx(void)
{
if (cpu_has_nx && !disable_nx)
__supported_pte_mask |= _PAGE_NX;
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index e9e6ed5cdf94..a48be98e9ded 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -312,7 +312,7 @@ static int __init early_fill_mp_bus_info(void)
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
-static void __cpuinit enable_pci_io_ecs(void *unused)
+static void enable_pci_io_ecs(void *unused)
{
u64 reg;
rdmsrl(MSR_AMD64_NB_CFG, reg);
@@ -322,8 +322,8 @@ static void __cpuinit enable_pci_io_ecs(void *unused)
}
}
-static int __cpuinit amd_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int amd_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
int cpu = (long)hcpu;
switch (action) {
@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
+static struct notifier_block amd_cpu_notifier = {
.notifier_call = amd_cpu_notify,
};
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index f8ab4945892e..8244f5ec2f4c 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -12,8 +12,10 @@
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/reboot.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
+#include <linux/reboot.h>
#include <asm/ce4100.h>
#include <asm/prom.h>
@@ -134,7 +136,7 @@ static void __init sdv_arch_setup(void)
}
#ifdef CONFIG_X86_IO_APIC
-static void __cpuinit sdv_pci_init(void)
+static void sdv_pci_init(void)
{
x86_of_pci_init();
/* We can't set this earlier, because we need to calibrate the timer */
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index c8d5577044bb..90f6ed127096 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -931,13 +931,6 @@ void __init efi_enter_virtual_mode(void)
va = efi_ioremap(md->phys_addr, size,
md->type, md->attribute);
- if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
- if (!va)
- pr_err("ioremap of 0x%llX failed!\n",
- (unsigned long long)md->phys_addr);
- continue;
- }
-
md->virt_addr = (u64) (unsigned long) va;
if (!va) {
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index a0a0a4389bbd..47fe66fe61f1 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -65,7 +65,7 @@
* lapic (always-on,ARAT) ------ 150
*/
-__cpuinitdata enum mrst_timer_options mrst_timer_options;
+enum mrst_timer_options mrst_timer_options;
static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
@@ -248,7 +248,7 @@ static void __init mrst_time_init(void)
apbt_time_init();
}
-static void __cpuinit mrst_arch_setup(void)
+static void mrst_arch_setup(void)
{
if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
__mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index ae7319db18ee..5e04a1c899fa 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -508,7 +508,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
{
struct rt_sigframe __user *frame;
int err = 0;
- struct task_struct *me = current;
frame = (struct rt_sigframe __user *)
round_down(stack_top - sizeof(struct rt_sigframe), 16);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2fa02bc50034..193097ef3d7d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1681,8 +1681,8 @@ static void __init init_hvm_pv_info(void)
xen_domain_type = XEN_HVM_DOMAIN;
}
-static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
int cpu = (long)hcpu;
switch (action) {
@@ -1700,7 +1700,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
+static struct notifier_block xen_hvm_cpu_notifier = {
.notifier_call = xen_hvm_cpu_notify,
};
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 94eac5c85cdc..056d11faef21 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -475,7 +475,7 @@ static void __init fiddle_vdso(void)
#endif
}
-static int __cpuinit register_callback(unsigned type, const void *func)
+static int register_callback(unsigned type, const void *func)
{
struct callback_register callback = {
.type = type,
@@ -486,7 +486,7 @@ static int __cpuinit register_callback(unsigned type, const void *func)
return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
}
-void __cpuinit xen_enable_sysenter(void)
+void xen_enable_sysenter(void)
{
int ret;
unsigned sysenter_feature;
@@ -505,7 +505,7 @@ void __cpuinit xen_enable_sysenter(void)
setup_clear_cpu_cap(sysenter_feature);
}
-void __cpuinit xen_enable_syscall(void)
+void xen_enable_syscall(void)
{
#ifdef CONFIG_X86_64
int ret;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index c1367b29c3b1..ca92754eb846 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -65,7 +65,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __cpuinit cpu_bringup(void)
+static void cpu_bringup(void)
{
int cpu;
@@ -97,7 +97,7 @@ static void __cpuinit cpu_bringup(void)
wmb(); /* make sure everything is out */
}
-static void __cpuinit cpu_bringup_and_idle(void)
+static void cpu_bringup_and_idle(void)
{
cpu_bringup();
cpu_startup_entry(CPUHP_ONLINE);
@@ -326,7 +326,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
set_cpu_present(cpu, true);
}
-static int __cpuinit
+static int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
struct vcpu_guest_context *ctxt;
@@ -397,7 +397,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
return 0;
}
-static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
+static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
{
int rc;
@@ -470,7 +470,7 @@ static void xen_cpu_die(unsigned int cpu)
xen_teardown_timer(cpu);
}
-static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
+static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
{
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
@@ -691,7 +691,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
xen_init_lock_cpu(0);
}
-static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
+static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
rc = native_cpu_up(cpu, tidle);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index a40f8508e760..cf3caee356b3 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -361,7 +361,7 @@ static irqreturn_t dummy_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void __cpuinit xen_init_lock_cpu(int cpu)
+void xen_init_lock_cpu(int cpu)
{
int irq;
char *name;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index a95b41744ad0..86782c5d7e2a 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -73,7 +73,7 @@ static inline void xen_hvm_smp_init(void) {}
#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init xen_init_spinlocks(void);
-void __cpuinit xen_init_lock_cpu(int cpu);
+void xen_init_lock_cpu(int cpu);
void xen_uninit_lock_cpu(int cpu);
#else
static inline void xen_init_spinlocks(void)
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index bdbb17312526..24bb0c1776ba 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -162,7 +162,7 @@ irqreturn_t timer_interrupt (int irq, void *dev_id)
}
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
{
loops_per_jiffy = CCOUNT_PER_JIFFY;
printk("Calibrating delay loop (skipped)... "